From d379f1fc904ca463604a78872c04e9b0e951ce86 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Fri, 19 Feb 2021 13:52:44 -0800 Subject: [PATCH 001/215] trim maps --- ledger/acctupdates.go | 9 ++++++++- ledger/ledgercore/statedelta_test.go | 9 +++++++++ ledger/txtail.go | 8 +++++++- 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index e7ae52566e..10fffbd2f8 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1422,9 +1422,16 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S if rnd != au.latest()+1 { au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas)) } + + // Trim size of creatableDeltas map + creatableDeltas := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable, len(delta.Creatables)) + for k, v := range delta.Creatables { + creatableDeltas[k] = v + } + au.deltas = append(au.deltas, delta.Accts) au.protos = append(au.protos, proto) - au.creatableDeltas = append(au.creatableDeltas, delta.Creatables) + au.creatableDeltas = append(au.creatableDeltas, creatableDeltas) au.roundDigest = append(au.roundDigest, blk.Digest()) au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1]) diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go index b7be6eafc7..5b0d941ad9 100644 --- a/ledger/ledgercore/statedelta_test.go +++ b/ledger/ledgercore/statedelta_test.go @@ -92,3 +92,12 @@ func TestAccountDeltas(t *testing.T) { a.Equal(addr1, address) a.Equal(sample1, data) } + +func BenchmarkMakeStateDelta(b *testing.B) { + hint := 23000 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + MakeStateDelta(nil, 0, hint) + } +} diff --git a/ledger/txtail.go b/ledger/txtail.go index 6f6c08c917..03e6798b22 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -98,9 +98,15 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) { return } + // Trim size of txLeases map + txLeases := make(map[ledgercore.Txlease]basics.Round, len(delta.Txleases)) + for k, v := range delta.Txleases { + txLeases[k] = v + } + t.recent[rnd] = roundTxMembers{ txids: delta.Txids, - txleases: delta.Txleases, + txleases: txLeases, proto: config.Consensus[blk.CurrentProtocol], } From 86f0a1c28b634d5b03d5dfafa5bb57cae4f3d3f2 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Wed, 24 Feb 2021 09:23:29 -0800 Subject: [PATCH 002/215] move compression to finalValidation --- ledger/acctupdates.go | 8 +------- ledger/eval.go | 1 + ledger/ledgercore/statedelta.go | 31 +++++++++++++++++++++++++++++++ ledger/txtail.go | 8 +------- 4 files changed, 34 insertions(+), 14 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 10fffbd2f8..1ff28f1270 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1423,15 +1423,9 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas)) } - // Trim size of creatableDeltas map - creatableDeltas := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable, len(delta.Creatables)) - for k, v := range delta.Creatables { - creatableDeltas[k] = v - } - au.deltas = append(au.deltas, delta.Accts) au.protos = append(au.protos, proto) - au.creatableDeltas = append(au.creatableDeltas, creatableDeltas) + au.creatableDeltas = append(au.creatableDeltas, delta.Creatables) au.roundDigest = append(au.roundDigest, blk.Digest()) au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1]) diff --git a/ledger/eval.go b/ledger/eval.go index a63b09eb02..3484209ee1 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1009,6 +1009,7 @@ func (eval *BlockEvaluator) endOfBlock() error { // FinalValidation does the validation that must happen after the block is built and all state updates are computed func (eval *BlockEvaluator) finalValidation() error { + eval.state.mods.Compress() if eval.validate { // check commitments txnRoot, err := eval.block.PaysetCommit() diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index ad2f698bbf..055a84f781 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -68,6 +68,9 @@ type StateDelta struct { // previous block timestamp PrevTimestamp int64 + + // initial hint for allocating data structures for StateDelta + initialTransactionsCount int } // AccountDeltas stores ordered accounts and allows fast lookup by address @@ -149,3 +152,31 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { } ad.acctsCache[addr] = last } + +func (sd *StateDelta) Compress() { + sd.Accts.accts = sd.Accts.accts[:len(sd.Accts.accts)] + + if len(sd.Accts.acctsCache) < sd.initialTransactionsCount / 2 { + acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) + for k, v := range sd.Accts.acctsCache { + acctsCache[k] = v + } + sd.Accts.acctsCache = acctsCache + } + + if len(sd.Txleases) < sd.initialTransactionsCount / 2 { + txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) + for k, v := range sd.Txleases { + txLeases[k] = v + } + sd.Txleases = txLeases + } + + if len(sd.Creatables) < sd.initialTransactionsCount / 2 { + creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) + for k, v := range sd.Creatables { + creatableDeltas[k] = v + } + sd.Creatables = creatableDeltas + } +} diff --git a/ledger/txtail.go b/ledger/txtail.go index 03e6798b22..6f6c08c917 100644 --- a/ledger/txtail.go +++ b/ledger/txtail.go @@ -98,15 +98,9 @@ func (t *txTail) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) { return } - // Trim size of txLeases map - txLeases := make(map[ledgercore.Txlease]basics.Round, len(delta.Txleases)) - for k, v := range delta.Txleases { - txLeases[k] = v - } - t.recent[rnd] = roundTxMembers{ txids: delta.Txids, - txleases: txLeases, + txleases: delta.Txleases, proto: config.Consensus[blk.CurrentProtocol], } From 93b492f9192789e95d2b43828a93cabfcb994219 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Wed, 24 Feb 2021 10:18:38 -0800 Subject: [PATCH 003/215] address comments --- ledger/eval.go | 2 +- ledger/ledgercore/statedelta.go | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index 3484209ee1..5d4d501d38 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1009,7 +1009,7 @@ func (eval *BlockEvaluator) endOfBlock() error { // FinalValidation does the validation that must happen after the block is built and all state updates are computed func (eval *BlockEvaluator) finalValidation() error { - eval.state.mods.Compress() + eval.state.mods.OptimizeAllocatedMemory() if eval.validate { // check commitments txnRoot, err := eval.block.PaysetCommit() diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 055a84f781..359b727a10 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -153,10 +153,15 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { ad.acctsCache[addr] = last } -func (sd *StateDelta) Compress() { - sd.Accts.accts = sd.Accts.accts[:len(sd.Accts.accts)] +// OptimizeAllocatedMemory by reallocating maps to needed capacity +func (sd *StateDelta) OptimizeAllocatedMemory() { + if len(sd.Accts.accts) < sd.initialTransactionsCount/2 { + accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) + copy(accts, sd.Accts.accts) + sd.Accts.accts = accts + } - if len(sd.Accts.acctsCache) < sd.initialTransactionsCount / 2 { + if len(sd.Accts.acctsCache) < sd.initialTransactionsCount/2 { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -164,7 +169,7 @@ func (sd *StateDelta) Compress() { sd.Accts.acctsCache = acctsCache } - if len(sd.Txleases) < sd.initialTransactionsCount / 2 { + if len(sd.Txleases) < sd.initialTransactionsCount/2 { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -172,7 +177,7 @@ func (sd *StateDelta) Compress() { sd.Txleases = txLeases } - if len(sd.Creatables) < sd.initialTransactionsCount / 2 { + if len(sd.Creatables) < sd.initialTransactionsCount/2 { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v From fe46619d437dcd03ed31925cb7e2e29d9472b035 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Wed, 24 Feb 2021 10:57:35 -0800 Subject: [PATCH 004/215] fix bug --- ledger/ledgercore/statedelta.go | 1 + 1 file changed, 1 insertion(+) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 359b727a10..93bdb1d42e 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -93,6 +93,7 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int) Creatables: make(map[basics.CreatableIndex]ModifiedCreatable, hint), Hdr: hdr, PrevTimestamp: prevTimestamp, + initialTransactionsCount: hint, } } From e5307a8a7a5f6454f60af5cb9f3c471f5cb5ae9e Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Wed, 24 Feb 2021 13:41:44 -0800 Subject: [PATCH 005/215] change threshold --- ledger/ledgercore/statedelta.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 93bdb1d42e..2ef99cae08 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -88,11 +88,11 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int) accts: make([]basics.BalanceRecord, 0, hint*2), acctsCache: make(map[basics.Address]int, hint*2), }, - Txids: make(map[transactions.Txid]basics.Round, hint), - Txleases: make(map[Txlease]basics.Round, hint), - Creatables: make(map[basics.CreatableIndex]ModifiedCreatable, hint), - Hdr: hdr, - PrevTimestamp: prevTimestamp, + Txids: make(map[transactions.Txid]basics.Round, hint), + Txleases: make(map[Txlease]basics.Round, hint), + Creatables: make(map[basics.CreatableIndex]ModifiedCreatable, hint), + Hdr: hdr, + PrevTimestamp: prevTimestamp, initialTransactionsCount: hint, } } @@ -156,13 +156,13 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { // OptimizeAllocatedMemory by reallocating maps to needed capacity func (sd *StateDelta) OptimizeAllocatedMemory() { - if len(sd.Accts.accts) < sd.initialTransactionsCount/2 { + if len(sd.Accts.accts) < sd.initialTransactionsCount-500 { accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) copy(accts, sd.Accts.accts) sd.Accts.accts = accts } - if len(sd.Accts.acctsCache) < sd.initialTransactionsCount/2 { + if len(sd.Accts.acctsCache) < sd.initialTransactionsCount-500 { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -170,7 +170,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { sd.Accts.acctsCache = acctsCache } - if len(sd.Txleases) < sd.initialTransactionsCount/2 { + if len(sd.Txleases) < sd.initialTransactionsCount-1000 { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -178,7 +178,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { sd.Txleases = txLeases } - if len(sd.Creatables) < sd.initialTransactionsCount/2 { + if len(sd.Creatables) < sd.initialTransactionsCount-1000 { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v From 364e512d3566a0a59de5adec159bd73ec4c67665 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 24 Feb 2021 22:23:19 -0500 Subject: [PATCH 006/215] try to preload all account data before evaluation. --- ledger/eval.go | 117 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 82 insertions(+), 35 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index a63b09eb02..9cb68985c7 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1143,17 +1143,19 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali wg.Wait() }() - // If validationCtx or underlying ctx are Done, end prefetch - if usePrefetch { - wg.Add(1) - go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg) - } - // Next, transactions paysetgroups, err := blk.DecodePaysetGroups() if err != nil { return ledgercore.StateDelta{}, err } + + // If validationCtx or underlying ctx are Done, end prefetch + if usePrefetch { + //wg.Add(1) + //go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg) + } + paysetgroupsCh := loadAccounts(l, blk.Round()-1, eval.state, paysetgroups) + var txvalidator evalTxValidator if validate { _, ok := config.Consensus[blk.CurrentProtocol] @@ -1171,8 +1173,17 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali } - for _, txgroup := range paysetgroups { + for { select { + case txgroup, ok := <-paysetgroupsCh: + if !ok { + break + } + err = eval.TransactionGroup(txgroup) + if err != nil { + return ledgercore.StateDelta{}, err + } + continue case <-ctx.Done(): return ledgercore.StateDelta{}, ctx.Err() case err, open := <-txvalidator.done: @@ -1180,13 +1191,9 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali if open && err != nil { return ledgercore.StateDelta{}, err } - default: - } - - err = eval.TransactionGroup(txgroup) - if err != nil { - return ledgercore.StateDelta{}, err + continue } + break } // Finally, procees any pending end-of-block state changes @@ -1218,31 +1225,71 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali return eval.state.deltas(), nil } -func prefetchThread(ctx context.Context, state roundCowParent, payset []transactions.SignedTxnInBlock, wg *sync.WaitGroup) { - defer wg.Done() - maybelookup := func(addr basics.Address) { - if addr.IsZero() { - return +func loadAccounts(l ledgerForEvaluator, rnd basics.Round, state *roundCowState, groups [][]transactions.SignedTxnWithAD) chan []transactions.SignedTxnWithAD { + outChan := make(chan []transactions.SignedTxnWithAD, len(groups)) + go func() { + type addrTask struct { + addr basics.Address + waitGroups []*sync.WaitGroup } - state.lookup(addr) - } - for _, stxn := range payset { - select { - case <-ctx.Done(): - return - default: + accountsChannels := make(map[basics.Address]*addrTask) + addressesCh := make(chan *addrTask, len(groups)*16*10) + initAccount := func(addr basics.Address, wg *sync.WaitGroup) { + if addr.IsZero() { + return + } + if _, have := accountsChannels[addr]; !have { + task := &addrTask{ + addr: addr, + waitGroups: []*sync.WaitGroup{wg}, + } + accountsChannels[addr] = task + addressesCh <- task + } else { + task := accountsChannels[addr] + task.waitGroups = append(task.waitGroups, wg) + } + wg.Add(1) + } + groupsReady := make([]*sync.WaitGroup, len(groups)) + for i, group := range groups { + groupWg := &sync.WaitGroup{} + groupsReady[i] = groupWg + for _, stxn := range group { + initAccount(stxn.Txn.Sender, groupWg) + initAccount(stxn.Txn.Receiver, groupWg) + initAccount(stxn.Txn.CloseRemainderTo, groupWg) + initAccount(stxn.Txn.AssetSender, groupWg) + initAccount(stxn.Txn.AssetReceiver, groupWg) + initAccount(stxn.Txn.AssetCloseTo, groupWg) + initAccount(stxn.Txn.FreezeAccount, groupWg) + for _, xa := range stxn.Txn.Accounts { + initAccount(xa, groupWg) + } + } } - state.lookup(stxn.Txn.Sender) - maybelookup(stxn.Txn.Receiver) - maybelookup(stxn.Txn.CloseRemainderTo) - maybelookup(stxn.Txn.AssetSender) - maybelookup(stxn.Txn.AssetReceiver) - maybelookup(stxn.Txn.AssetCloseTo) - maybelookup(stxn.Txn.FreezeAccount) - for _, xa := range stxn.Txn.Accounts { - maybelookup(xa) + close(addressesCh) + + base := state.lookupParent.(*roundCowBase) + + for i := 0; i < 4; i++ { + go func() { + for task := range addressesCh { + // load the address + base.lookup(task.addr) + for _, wg := range task.waitGroups { + wg.Done() + } + } + }() } - } + for i, wg := range groupsReady { + wg.Wait() + outChan <- groups[i] + } + close(outChan) + }() + return outChan } // Validate uses the ledger to validate block blk as a candidate next block. From 14b2d6fbaaf5988fc4dd624c6c65702ed8f5b39f Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 25 Feb 2021 00:39:03 -0500 Subject: [PATCH 007/215] update --- ledger/eval.go | 92 +++++++++++++++++++++++++++++++-------------- ledger/eval_test.go | 4 ++ 2 files changed, 67 insertions(+), 29 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index 9cb68985c7..b0127e6275 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -22,8 +22,6 @@ import ( "fmt" "sync" - "github.com/algorand/go-deadlock" - "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/crypto/compactcert" @@ -73,9 +71,6 @@ type roundCowBase struct { // are beyond the scope of this cache. // The account data store here is always the account data without the rewards. accounts map[basics.Address]basics.AccountData - - // accountsMu is the accounts read-write mutex, used to syncronize the access ot the accounts map. - accountsMu deadlock.RWMutex } func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) { @@ -86,18 +81,13 @@ func (x *roundCowBase) getCreator(cidx basics.CreatableIndex, ctype basics.Creat // first, and if it cannot find it there, it would defer to the underlaying implementation. // note that errors in accounts data retrivals are not cached as these typically cause the transaction evaluation to fail. func (x *roundCowBase) lookup(addr basics.Address) (basics.AccountData, error) { - x.accountsMu.RLock() if accountData, found := x.accounts[addr]; found { - x.accountsMu.RUnlock() return accountData, nil } - x.accountsMu.RUnlock() accountData, _, err := x.l.LookupWithoutRewards(x.rnd, addr) if err == nil { - x.accountsMu.Lock() x.accounts[addr] = accountData - x.accountsMu.Unlock() } return accountData, err } @@ -1154,7 +1144,7 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali //wg.Add(1) //go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg) } - paysetgroupsCh := loadAccounts(l, blk.Round()-1, eval.state, paysetgroups) + paysetgroupsCh := loadAccounts(l, blk.Round()-1, paysetgroups) var txvalidator evalTxValidator if validate { @@ -1173,13 +1163,18 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali } + base := eval.state.lookupParent.(*roundCowBase) + for { select { case txgroup, ok := <-paysetgroupsCh: if !ok { break } - err = eval.TransactionGroup(txgroup) + for _, br := range txgroup.balances { + base.accounts[br.Addr] = br.AccountData + } + err = eval.TransactionGroup(txgroup.group) if err != nil { return ledgercore.StateDelta{}, err } @@ -1225,35 +1220,53 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali return eval.state.deltas(), nil } -func loadAccounts(l ledgerForEvaluator, rnd basics.Round, state *roundCowState, groups [][]transactions.SignedTxnWithAD) chan []transactions.SignedTxnWithAD { - outChan := make(chan []transactions.SignedTxnWithAD, len(groups)) +type loadedTransactionGroup struct { + group []transactions.SignedTxnWithAD + balances []basics.BalanceRecord +} + +func loadAccounts(l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD) chan loadedTransactionGroup { + outChan := make(chan loadedTransactionGroup, len(groups)) go func() { + type groupTask struct { + balances []basics.BalanceRecord + balancesCount int + done chan struct{} + } type addrTask struct { - addr basics.Address - waitGroups []*sync.WaitGroup + addr basics.Address + groups []*groupTask + groupIndices []int } + accountsChannels := make(map[basics.Address]*addrTask) addressesCh := make(chan *addrTask, len(groups)*16*10) - initAccount := func(addr basics.Address, wg *sync.WaitGroup) { + totalBalances := 0 + initAccount := func(addr basics.Address, wg *groupTask) { if addr.IsZero() { return } if _, have := accountsChannels[addr]; !have { task := &addrTask{ - addr: addr, - waitGroups: []*sync.WaitGroup{wg}, + addr: addr, + groups: make([]*groupTask, 1, 4), + groupIndices: make([]int, 1, 4), } + task.groups[0] = wg + task.groupIndices[0] = wg.balancesCount accountsChannels[addr] = task addressesCh <- task } else { task := accountsChannels[addr] - task.waitGroups = append(task.waitGroups, wg) + task.groups = append(task.groups, wg) + task.groupIndices = append(task.groupIndices, wg.balancesCount) } - wg.Add(1) + wg.balancesCount++ + totalBalances++ } - groupsReady := make([]*sync.WaitGroup, len(groups)) + groupsReady := make([]*groupTask, len(groups)) for i, group := range groups { - groupWg := &sync.WaitGroup{} + groupWg := &groupTask{} groupsReady[i] = groupWg for _, stxn := range group { initAccount(stxn.Txn.Sender, groupWg) @@ -1270,22 +1283,43 @@ func loadAccounts(l ledgerForEvaluator, rnd basics.Round, state *roundCowState, } close(addressesCh) - base := state.lookupParent.(*roundCowBase) + allBalances := make([]basics.BalanceRecord, totalBalances) + usedBalances := 0 + for _, gr := range groupsReady { + gr.balances = allBalances[usedBalances : usedBalances+gr.balancesCount] + gr.done = make(chan struct{}, gr.balancesCount) + usedBalances += gr.balancesCount + } for i := 0; i < 4; i++ { go func() { for task := range addressesCh { // load the address - base.lookup(task.addr) - for _, wg := range task.waitGroups { - wg.Done() + acctData, _, err := l.LookupWithoutRewards(rnd, task.addr) + br := basics.BalanceRecord{ + Addr: task.addr, + AccountData: acctData, + } + if err == nil { + for i, wg := range task.groups { + wg.balances[task.groupIndices[i]] = br + wg.done <- struct{}{} + } + } else { + // todo - handle error cases. + panic(err) } } }() } for i, wg := range groupsReady { - wg.Wait() - outChan <- groups[i] + for j := 0; j < wg.balancesCount; j++ { + <-wg.done + } + outChan <- loadedTransactionGroup{ + group: groups[i], + balances: wg.balances, + } } close(outChan) }() diff --git a/ledger/eval_test.go b/ledger/eval_test.go index 5142ffe14b..0bec4e8439 100644 --- a/ledger/eval_test.go +++ b/ledger/eval_test.go @@ -514,6 +514,10 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool) { for i := 0; i < numTxns; i++ { sender := i % len(addrs) receiver := (i + 1) % len(addrs) + // The following would create more random selection of accounts, and prevent a cache of half of the accounts.. + // iDigest := crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24)}) + // sender := (uint64(iDigest[0]) + uint64(iDigest[1])*256 + uint64(iDigest[2])*256*256) % uint64(len(addrs)) + // receiver := (uint64(iDigest[4]) + uint64(iDigest[5])*256 + uint64(iDigest[6])*256*256) % uint64(len(addrs)) txn := transactions.Transaction{ Type: protocol.PaymentTx, Header: transactions.Header{ From 1db17a85327e6178b92f2cc9b0d954c27a838a2d Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 25 Feb 2021 09:44:55 -0500 Subject: [PATCH 008/215] some more refactoring and error handling --- ledger/eval.go | 68 +++++++++++++++++++++++++++++++++++--------------- 1 file changed, 48 insertions(+), 20 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index b0127e6275..f9a6b802cf 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1144,7 +1144,7 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali //wg.Add(1) //go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg) } - paysetgroupsCh := loadAccounts(l, blk.Round()-1, paysetgroups) + paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups) var txvalidator evalTxValidator if validate { @@ -1170,7 +1170,10 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali case txgroup, ok := <-paysetgroupsCh: if !ok { break + } else if txgroup.err != nil { + return ledgercore.StateDelta{}, err } + for _, br := range txgroup.balances { base.accounts[br.Addr] = br.AccountData } @@ -1223,21 +1226,23 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali type loadedTransactionGroup struct { group []transactions.SignedTxnWithAD balances []basics.BalanceRecord + err error } -func loadAccounts(l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD) chan loadedTransactionGroup { +func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD) chan loadedTransactionGroup { outChan := make(chan loadedTransactionGroup, len(groups)) go func() { type groupTask struct { balances []basics.BalanceRecord balancesCount int - done chan struct{} + done chan error } type addrTask struct { addr basics.Address groups []*groupTask groupIndices []int } + defer close(outChan) accountsChannels := make(map[basics.Address]*addrTask) addressesCh := make(chan *addrTask, len(groups)*16*10) @@ -1287,41 +1292,64 @@ func loadAccounts(l ledgerForEvaluator, rnd basics.Round, groups [][]transaction usedBalances := 0 for _, gr := range groupsReady { gr.balances = allBalances[usedBalances : usedBalances+gr.balancesCount] - gr.done = make(chan struct{}, gr.balancesCount) + gr.done = make(chan error, gr.balancesCount) usedBalances += gr.balancesCount } for i := 0; i < 4; i++ { go func() { - for task := range addressesCh { - // load the address - acctData, _, err := l.LookupWithoutRewards(rnd, task.addr) - br := basics.BalanceRecord{ - Addr: task.addr, - AccountData: acctData, - } - if err == nil { - for i, wg := range task.groups { - wg.balances[task.groupIndices[i]] = br - wg.done <- struct{}{} + for { + select { + case task, ok := <-addressesCh: + // load the address + if !ok { + return } - } else { - // todo - handle error cases. - panic(err) + acctData, _, err := l.LookupWithoutRewards(rnd, task.addr) + br := basics.BalanceRecord{ + Addr: task.addr, + AccountData: acctData, + } + if err == nil { + for i, wg := range task.groups { + wg.balances[task.groupIndices[i]] = br + wg.done <- nil + } + } else { + for i, wg := range task.groups { + wg.balances[task.groupIndices[i]] = br + wg.done <- err + } + } + case <-ctx.Done(): + return } + } }() } + for i, wg := range groupsReady { for j := 0; j < wg.balancesCount; j++ { - <-wg.done + select { + case err := <-wg.done: + if err != nil { + outChan <- loadedTransactionGroup{ + group: groups[i], + err: err, + } + return + } + case <-ctx.Done(): + return + } + } outChan <- loadedTransactionGroup{ group: groups[i], balances: wg.balances, } } - close(outChan) }() return outChan } From 7c065ba43a37d8f6c81f76e872fef5d2eaeb386f Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 25 Feb 2021 11:04:12 -0500 Subject: [PATCH 009/215] add fee sink --- ledger/eval.go | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index f9a6b802cf..bec8bb2ce8 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1144,7 +1144,7 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali //wg.Add(1) //go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg) } - paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups) + paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups, blk.BlockHeader.FeeSink) var txvalidator evalTxValidator if validate { @@ -1229,7 +1229,7 @@ type loadedTransactionGroup struct { err error } -func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD) chan loadedTransactionGroup { +func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address) chan loadedTransactionGroup { outChan := make(chan loadedTransactionGroup, len(groups)) go func() { type groupTask struct { @@ -1269,6 +1269,13 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g wg.balancesCount++ totalBalances++ } + if len(groups) > 0 { + task := &addrTask{ + addr: feeSinkAddr, + } + addressesCh <- task + accountsChannels[feeSinkAddr] = task + } groupsReady := make([]*groupTask, len(groups)) for i, group := range groups { groupWg := &groupTask{} @@ -1286,6 +1293,11 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g } } } + + // Add fee sink to the first group + if len(groupsReady) > 0 { + initAccount(feeSinkAddr, groupsReady[0]) + } close(addressesCh) allBalances := make([]basics.BalanceRecord, totalBalances) From 854aa0cce6684311df3ff2520321b2712ad6b4eb Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Fri, 26 Feb 2021 12:59:33 -0800 Subject: [PATCH 010/215] reallocate based on memory savings --- ledger/ledgercore/statedelta.go | 13 +++++--- ledger/ledgercore/statedelta_test.go | 48 ++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+), 4 deletions(-) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 2ef99cae08..e9980979c7 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -155,14 +155,17 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { } // OptimizeAllocatedMemory by reallocating maps to needed capacity +// For each data structure, reallocate if it would save us at least 50MB aggregate func (sd *StateDelta) OptimizeAllocatedMemory() { - if len(sd.Accts.accts) < sd.initialTransactionsCount-500 { + // accts takes up 232 bytes per entry, and is saved for 320 rounds + if (2 * sd.initialTransactionsCount - len(sd.Accts.accts)) * 232 * 320 < 50000000 { accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) copy(accts, sd.Accts.accts) sd.Accts.accts = accts } - if len(sd.Accts.acctsCache) < sd.initialTransactionsCount-500 { + // acctsCache takes up 64 bytes per entry, and is saved for 320 rounds + if (2 * sd.initialTransactionsCount - len(sd.Accts.acctsCache)) * 64 * 320 < 50000000 { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -170,7 +173,8 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { sd.Accts.acctsCache = acctsCache } - if len(sd.Txleases) < sd.initialTransactionsCount-1000 { + // TxLeases takes up 112 bytes per entry, and is saved for 1000 rounds + if (sd.initialTransactionsCount - len(sd.Txleases)) * 112 * 1000 < 50000000 { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -178,7 +182,8 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { sd.Txleases = txLeases } - if len(sd.Creatables) < sd.initialTransactionsCount-1000 { + // Creatables takes up 100 bytes per entry, and is saved for 320 rounds + if (sd.initialTransactionsCount - len(sd.Creatables)) * 100 * 320 < 50000000 { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go index 5b0d941ad9..5d779cc98a 100644 --- a/ledger/ledgercore/statedelta_test.go +++ b/ledger/ledgercore/statedelta_test.go @@ -101,3 +101,51 @@ func BenchmarkMakeStateDelta(b *testing.B) { MakeStateDelta(nil, 0, hint) } } + +func BenchmarkBalanceRecord(b *testing.B) { + hint := 23000 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + x := make([]basics.BalanceRecord, 0, hint*2) + if len(x) > 0 { + return + } + } +} + +func BenchmarkAcctCache(b *testing.B) { + hint := 23000 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + x := make(map[basics.Address]int, hint*2) + if len(x) > 0 { + return + } + } +} + +func BenchmarkCreatables(b *testing.B) { + hint := 23000 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + x := make(map[basics.CreatableIndex]ModifiedCreatable, hint) + if len(x) > 0 { + return + } + } +} + +func BenchmarkTxLeases(b *testing.B) { + hint := 23000 + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + x := make(map[Txlease]basics.Round, hint) + if len(x) > 0 { + return + } + } +} \ No newline at end of file From 7c56299554a7c157e8a5a42a1df6278c7b6a9d7a Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Fri, 26 Feb 2021 13:05:02 -0800 Subject: [PATCH 011/215] format --- ledger/acctupdates.go | 1 - ledger/ledgercore/statedelta.go | 8 ++++---- ledger/ledgercore/statedelta_test.go | 2 +- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 1ff28f1270..e7ae52566e 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1422,7 +1422,6 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S if rnd != au.latest()+1 { au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas)) } - au.deltas = append(au.deltas, delta.Accts) au.protos = append(au.protos, proto) au.creatableDeltas = append(au.creatableDeltas, delta.Creatables) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index e9980979c7..9c2e69e7f2 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -158,14 +158,14 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { // For each data structure, reallocate if it would save us at least 50MB aggregate func (sd *StateDelta) OptimizeAllocatedMemory() { // accts takes up 232 bytes per entry, and is saved for 320 rounds - if (2 * sd.initialTransactionsCount - len(sd.Accts.accts)) * 232 * 320 < 50000000 { + if (2*sd.initialTransactionsCount-len(sd.Accts.accts))*232*320 < 50000000 { accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) copy(accts, sd.Accts.accts) sd.Accts.accts = accts } // acctsCache takes up 64 bytes per entry, and is saved for 320 rounds - if (2 * sd.initialTransactionsCount - len(sd.Accts.acctsCache)) * 64 * 320 < 50000000 { + if (2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*64*320 < 50000000 { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -174,7 +174,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { } // TxLeases takes up 112 bytes per entry, and is saved for 1000 rounds - if (sd.initialTransactionsCount - len(sd.Txleases)) * 112 * 1000 < 50000000 { + if (sd.initialTransactionsCount-len(sd.Txleases))*112*1000 < 50000000 { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -183,7 +183,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { } // Creatables takes up 100 bytes per entry, and is saved for 320 rounds - if (sd.initialTransactionsCount - len(sd.Creatables)) * 100 * 320 < 50000000 { + if (sd.initialTransactionsCount-len(sd.Creatables))*100*320 < 50000000 { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go index 5d779cc98a..222c53feb0 100644 --- a/ledger/ledgercore/statedelta_test.go +++ b/ledger/ledgercore/statedelta_test.go @@ -148,4 +148,4 @@ func BenchmarkTxLeases(b *testing.B) { return } } -} \ No newline at end of file +} From e55a7d66b69f97a3a776b4c2a18d07512f844db1 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 26 Feb 2021 16:53:27 -0500 Subject: [PATCH 012/215] changes per reviewer feedback --- ledger/eval.go | 79 +++++++++++++++++++++++++++----------- ledger/eval_test.go | 2 +- ledger/ledger.go | 4 +- ledger/ledger_perf_test.go | 2 +- 4 files changed, 60 insertions(+), 27 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index bec8bb2ce8..ec40a4bbe8 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -46,6 +46,11 @@ var ErrNoSpace = errors.New("block does not have space for transaction") // many transactions in a block. const maxPaysetHint = 20000 +// asyncAccountLoadingThreadCount controls how many go routines would be used +// to load the account data before the eval() start processing individual +// transaction group. +const asyncAccountLoadingThreadCount = 4 + type roundCowBase struct { l ledgerForCowBase @@ -1120,7 +1125,7 @@ func (validator *evalTxValidator) run() { // Validate: eval(ctx, l, blk, true, txcache, executionPool, true) // AddBlock: eval(context.Background(), l, blk, false, txcache, nil, true) // tracker: eval(context.Background(), l, blk, false, txcache, nil, false) -func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool, usePrefetch bool) (ledgercore.StateDelta, error) { +func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, validate bool, txcache verify.VerifiedTransactionCache, executionPool execpool.BacklogPool) (ledgercore.StateDelta, error) { eval, err := startEvaluator(l, blk.BlockHeader, len(blk.Payset), validate, false) if err != nil { return ledgercore.StateDelta{}, err @@ -1139,11 +1144,6 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali return ledgercore.StateDelta{}, err } - // If validationCtx or underlying ctx are Done, end prefetch - if usePrefetch { - //wg.Add(1) - //go prefetchThread(validationCtx, eval.state.lookupParent, blk.Payset, &wg) - } paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups, blk.BlockHeader.FeeSink) var txvalidator evalTxValidator @@ -1223,37 +1223,53 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali return eval.state.deltas(), nil } +// loadedTransactionGroup is a helper struct to allow asyncronious loading of the account data needed by the transaction groups type loadedTransactionGroup struct { - group []transactions.SignedTxnWithAD + // group is the transaction group + group []transactions.SignedTxnWithAD + // balances is a list of all the balances that the transaction group refer to and are needed. balances []basics.BalanceRecord - err error + // err indicates whether any of the balances in this structure have failed to load. In case of an error, at least + // one of the entries in the balances would be uninitialized. + err error } +// loadAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group. +// The order of the transaction groups returned by the channel is identical to the one in the input array. func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address) chan loadedTransactionGroup { outChan := make(chan loadedTransactionGroup, len(groups)) go func() { + // groupTask helps to organize the account loading for each transaction group. type groupTask struct { - balances []basics.BalanceRecord + // balances contains the loaded balances each transaction group have + balances []basics.BalanceRecord + // balancesCount is the number of balances that nees to be loaded per transaction group balancesCount int - done chan error + // done is a waiting channel for all the account data for the transaction group to be loaded + done chan error } + // addrTask manage the loading of a single account address. type addrTask struct { - addr basics.Address - groups []*groupTask + // account address to fetch + address basics.Address + // a list of transaction group tasks that depends on this address + groups []*groupTask + // a list of indices into the groupTask.balances where the address would be stored groupIndices []int } defer close(outChan) accountsChannels := make(map[basics.Address]*addrTask) addressesCh := make(chan *addrTask, len(groups)*16*10) + // totalBalances counts the total number of balances over all the transaction groups totalBalances := 0 initAccount := func(addr basics.Address, wg *groupTask) { if addr.IsZero() { return } - if _, have := accountsChannels[addr]; !have { + if task, have := accountsChannels[addr]; !have { task := &addrTask{ - addr: addr, + address: addr, groups: make([]*groupTask, 1, 4), groupIndices: make([]int, 1, 4), } @@ -1262,20 +1278,22 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g accountsChannels[addr] = task addressesCh <- task } else { - task := accountsChannels[addr] task.groups = append(task.groups, wg) task.groupIndices = append(task.groupIndices, wg.balancesCount) } wg.balancesCount++ totalBalances++ } + // add the fee sink address to the accountsChannels/addressesCh so that it will be loaded first. if len(groups) > 0 { task := &addrTask{ - addr: feeSinkAddr, + address: feeSinkAddr, } addressesCh <- task accountsChannels[feeSinkAddr] = task } + + // iterate over the transaction groups and add all their account addresses to the list groupsReady := make([]*groupTask, len(groups)) for i, group := range groups { groupWg := &groupTask{} @@ -1300,6 +1318,9 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g } close(addressesCh) + // updata all the groups task : + // allocate the correct number of balances, as well as + // enough space on the "done" channel. allBalances := make([]basics.BalanceRecord, totalBalances) usedBalances := 0 for _, gr := range groupsReady { @@ -1308,32 +1329,40 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g usedBalances += gr.balancesCount } - for i := 0; i < 4; i++ { + // create few go-routines to load asyncroniously the account data. + for i := 0; i < asyncAccountLoadingThreadCount; i++ { go func() { for { select { case task, ok := <-addressesCh: // load the address if !ok { + // the channel got closed, which mean we're done. return } - acctData, _, err := l.LookupWithoutRewards(rnd, task.addr) + // lookup the account data directly from the ledger. + acctData, _, err := l.LookupWithoutRewards(rnd, task.address) br := basics.BalanceRecord{ - Addr: task.addr, + Addr: task.address, AccountData: acctData, } + // if there is no error.. if err == nil { + // update all the group tasks with the new acquired balance. for i, wg := range task.groups { wg.balances[task.groupIndices[i]] = br + // write a nil to indicate that we're loaded one entry. wg.done <- nil } } else { - for i, wg := range task.groups { - wg.balances[task.groupIndices[i]] = br + // there was an error loading that entry. + for _, wg := range task.groups { + // notify the channel of the error. wg.done <- err } } case <-ctx.Done(): + // if the context was canceled, abort right away. return } @@ -1341,11 +1370,14 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g }() } + // iterate on the transaction groups tasks. This array retains the original order. for i, wg := range groupsReady { + // Wait to receive wg.balancesCount nil error messages, one for each address referenced in this txn group. for j := 0; j < wg.balancesCount; j++ { select { case err := <-wg.done: if err != nil { + // if there is an error, report the error to the output channel. outChan <- loadedTransactionGroup{ group: groups[i], err: err, @@ -1355,8 +1387,9 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g case <-ctx.Done(): return } - } + // if we had no error, write the result to the output channel. + // this write will not block since we preallocated enough space on the channel. outChan <- loadedTransactionGroup{ group: groups[i], balances: wg.balances, @@ -1371,7 +1404,7 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g // not a valid block (e.g., it has duplicate transactions, overspends some // account, etc). func (l *Ledger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ValidatedBlock, error) { - delta, err := eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool, true) + delta, err := eval(ctx, l, blk, true, l.verifiedTxnCache, executionPool) if err != nil { return nil, err } diff --git a/ledger/eval_test.go b/ledger/eval_test.go index 0bec4e8439..cc27fa0836 100644 --- a/ledger/eval_test.go +++ b/ledger/eval_test.go @@ -561,7 +561,7 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool) { if withCrypto { _, err = l2.Validate(context.Background(), validatedBlock.blk, backlogPool) } else { - _, err = eval(context.Background(), l2, validatedBlock.blk, false, nil, nil, true) + _, err = eval(context.Background(), l2, validatedBlock.blk, false, nil, nil) } require.NoError(b, err) } diff --git a/ledger/ledger.go b/ledger/ledger.go index 1faf3a13c1..f264052ad4 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -545,7 +545,7 @@ func (l *Ledger) BlockCert(rnd basics.Round) (blk bookkeeping.Block, cert agreem func (l *Ledger) AddBlock(blk bookkeeping.Block, cert agreement.Certificate) error { // passing nil as the executionPool is ok since we've asking the evaluator to skip verification. - updates, err := eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil, true) + updates, err := eval(context.Background(), l, blk, false, l.verifiedTxnCache, nil) if err != nil { return err } @@ -647,7 +647,7 @@ func (l *Ledger) trackerLog() logging.Logger { // evaluator to shortcut the "main" ledger ( i.e. this struct ) and avoid taking the trackers lock a second time. func (l *Ledger) trackerEvalVerified(blk bookkeeping.Block, accUpdatesLedger ledgerForEvaluator) (ledgercore.StateDelta, error) { // passing nil as the executionPool is ok since we've asking the evaluator to skip verification. - return eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil, false) + return eval(context.Background(), accUpdatesLedger, blk, false, l.verifiedTxnCache, nil) } // IsWritingCatchpointFile returns true when a catchpoint file is being generated. The function is used by the catchup service diff --git a/ledger/ledger_perf_test.go b/ledger/ledger_perf_test.go index cea9aea430..b1b2244b40 100644 --- a/ledger/ledger_perf_test.go +++ b/ledger/ledger_perf_test.go @@ -319,7 +319,7 @@ func benchmarkFullBlocks(params testParams, b *testing.B) { vc := verify.GetMockedCache(true) b.ResetTimer() for _, blk := range blocks { - _, err = eval(context.Background(), l1, blk, true, vc, nil, true) + _, err = eval(context.Background(), l1, blk, true, vc, nil) require.NoError(b, err) err = l1.AddBlock(blk, cert) require.NoError(b, err) From 9fb630e269512c87ff3980e1788a935b6761a2df Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 27 Feb 2021 13:59:12 -0500 Subject: [PATCH 013/215] update per reviewer's requests. --- ledger/eval.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index ec40a4bbe8..be236f6ebe 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1165,11 +1165,12 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali base := eval.state.lookupParent.(*roundCowBase) +transactionGroupLoop: for { select { case txgroup, ok := <-paysetgroupsCh: if !ok { - break + break transactionGroupLoop } else if txgroup.err != nil { return ledgercore.StateDelta{}, err } @@ -1181,7 +1182,6 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali if err != nil { return ledgercore.StateDelta{}, err } - continue case <-ctx.Done(): return ledgercore.StateDelta{}, ctx.Err() case err, open := <-txvalidator.done: @@ -1189,9 +1189,7 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali if open && err != nil { return ledgercore.StateDelta{}, err } - continue } - break } // Finally, procees any pending end-of-block state changes From 331c3f74a332ef53300626ad27836b97803ba58a Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 1 Mar 2021 13:27:25 -0500 Subject: [PATCH 014/215] Address some of the reviewer's comments. --- ledger/eval.go | 39 +++++++++++++++++++++------------------ 1 file changed, 21 insertions(+), 18 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index be236f6ebe..21caf8f486 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1144,7 +1144,7 @@ func eval(ctx context.Context, l ledgerForEvaluator, blk bookkeeping.Block, vali return ledgercore.StateDelta{}, err } - paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups, blk.BlockHeader.FeeSink) + paysetgroupsCh := loadAccounts(ctx, l, blk.Round()-1, paysetgroups, blk.BlockHeader.FeeSink, blk.ConsensusProtocol()) var txvalidator evalTxValidator if validate { @@ -1234,7 +1234,7 @@ type loadedTransactionGroup struct { // loadAccounts loads the account data for the provided transaction group list. It also loads the feeSink account and add it to the first returned transaction group. // The order of the transaction groups returned by the channel is identical to the one in the input array. -func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address) chan loadedTransactionGroup { +func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, groups [][]transactions.SignedTxnWithAD, feeSinkAddr basics.Address, consensusParams config.ConsensusParams) chan loadedTransactionGroup { outChan := make(chan loadedTransactionGroup, len(groups)) go func() { // groupTask helps to organize the account loading for each transaction group. @@ -1257,15 +1257,17 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g } defer close(outChan) - accountsChannels := make(map[basics.Address]*addrTask) - addressesCh := make(chan *addrTask, len(groups)*16*10) + accountTasks := make(map[basics.Address]*addrTask) + maxAddressesPerTransaction := 7 + consensusParams.MaxAppTxnAccounts + addressesCh := make(chan *addrTask, len(groups)*consensusParams.MaxTxGroupSize*maxAddressesPerTransaction) // totalBalances counts the total number of balances over all the transaction groups totalBalances := 0 + initAccount := func(addr basics.Address, wg *groupTask) { if addr.IsZero() { return } - if task, have := accountsChannels[addr]; !have { + if task, have := accountTasks[addr]; !have { task := &addrTask{ address: addr, groups: make([]*groupTask, 1, 4), @@ -1273,7 +1275,8 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g } task.groups[0] = wg task.groupIndices[0] = wg.balancesCount - accountsChannels[addr] = task + + accountTasks[addr] = task addressesCh <- task } else { task.groups = append(task.groups, wg) @@ -1282,30 +1285,30 @@ func loadAccounts(ctx context.Context, l ledgerForEvaluator, rnd basics.Round, g wg.balancesCount++ totalBalances++ } - // add the fee sink address to the accountsChannels/addressesCh so that it will be loaded first. + // add the fee sink address to the accountTasks/addressesCh so that it will be loaded first. if len(groups) > 0 { task := &addrTask{ address: feeSinkAddr, } addressesCh <- task - accountsChannels[feeSinkAddr] = task + accountTasks[feeSinkAddr] = task } // iterate over the transaction groups and add all their account addresses to the list groupsReady := make([]*groupTask, len(groups)) for i, group := range groups { - groupWg := &groupTask{} - groupsReady[i] = groupWg + task := &groupTask{} + groupsReady[i] = task for _, stxn := range group { - initAccount(stxn.Txn.Sender, groupWg) - initAccount(stxn.Txn.Receiver, groupWg) - initAccount(stxn.Txn.CloseRemainderTo, groupWg) - initAccount(stxn.Txn.AssetSender, groupWg) - initAccount(stxn.Txn.AssetReceiver, groupWg) - initAccount(stxn.Txn.AssetCloseTo, groupWg) - initAccount(stxn.Txn.FreezeAccount, groupWg) + initAccount(stxn.Txn.Sender, task) + initAccount(stxn.Txn.Receiver, task) + initAccount(stxn.Txn.CloseRemainderTo, task) + initAccount(stxn.Txn.AssetSender, task) + initAccount(stxn.Txn.AssetReceiver, task) + initAccount(stxn.Txn.AssetCloseTo, task) + initAccount(stxn.Txn.FreezeAccount, task) for _, xa := range stxn.Txn.Accounts { - initAccount(xa, groupWg) + initAccount(xa, task) } } } From 55305451d298e78b9f6217ccf19d9e5a80206389 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Mon, 1 Mar 2021 11:24:58 -0800 Subject: [PATCH 015/215] use constants --- ledger/eval.go | 2 +- ledger/ledgercore/statedelta.go | 19 ++++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index 5d4d501d38..9feb8493c5 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -1009,7 +1009,7 @@ func (eval *BlockEvaluator) endOfBlock() error { // FinalValidation does the validation that must happen after the block is built and all state updates are computed func (eval *BlockEvaluator) finalValidation() error { - eval.state.mods.OptimizeAllocatedMemory() + eval.state.mods.OptimizeAllocatedMemory(eval.proto) if eval.validate { // check commitments txnRoot, err := eval.block.PaysetCommit() diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 9c2e69e7f2..f109809edd 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -17,11 +17,20 @@ package ledgercore import ( + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" ) +const ( + accountArrayEntrySize = uint64(232) // Measured by BenchmarkBalanceRecord + accountMapCacheEntrySize = uint64(64) // Measured by BenchmarkAcctCache + txleasesEntrySize = uint64(112) // Measured by BenchmarkTxLeases + creatablesEntrySize = uint64(100) // Measured by BenchmarkCreatables + stateDeltaTargetOptimizationThreshold = uint64(50000000) +) + // ModifiedCreatable defines the changes to a single single creatable state type ModifiedCreatable struct { // Type of the creatable: app or asset @@ -156,16 +165,16 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { // OptimizeAllocatedMemory by reallocating maps to needed capacity // For each data structure, reallocate if it would save us at least 50MB aggregate -func (sd *StateDelta) OptimizeAllocatedMemory() { +func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { // accts takes up 232 bytes per entry, and is saved for 320 rounds - if (2*sd.initialTransactionsCount-len(sd.Accts.accts))*232*320 < 50000000 { + if uint64(2*sd.initialTransactionsCount-len(sd.Accts.accts))*accountArrayEntrySize*proto.MaxBalLookback < stateDeltaTargetOptimizationThreshold { accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) copy(accts, sd.Accts.accts) sd.Accts.accts = accts } // acctsCache takes up 64 bytes per entry, and is saved for 320 rounds - if (2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*64*320 < 50000000 { + if uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*proto.MaxBalLookback < stateDeltaTargetOptimizationThreshold { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -174,7 +183,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { } // TxLeases takes up 112 bytes per entry, and is saved for 1000 rounds - if (sd.initialTransactionsCount-len(sd.Txleases))*112*1000 < 50000000 { + if uint64(sd.initialTransactionsCount-len(sd.Txleases))*txleasesEntrySize*proto.MaxTxnLife < stateDeltaTargetOptimizationThreshold { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -183,7 +192,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory() { } // Creatables takes up 100 bytes per entry, and is saved for 320 rounds - if (sd.initialTransactionsCount-len(sd.Creatables))*100*320 < 50000000 { + if uint64(sd.initialTransactionsCount-len(sd.Creatables))*creatablesEntrySize*proto.MaxBalLookback < stateDeltaTargetOptimizationThreshold { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v From f8da08827b06ac3e8c8a47c7745ac089f3a04f65 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Tue, 2 Mar 2021 09:45:59 -0800 Subject: [PATCH 016/215] bug fix --- ledger/ledgercore/statedelta.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index f109809edd..650f3a547a 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -167,14 +167,14 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { // For each data structure, reallocate if it would save us at least 50MB aggregate func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { // accts takes up 232 bytes per entry, and is saved for 320 rounds - if uint64(2*sd.initialTransactionsCount-len(sd.Accts.accts))*accountArrayEntrySize*proto.MaxBalLookback < stateDeltaTargetOptimizationThreshold { + if uint64(2*sd.initialTransactionsCount-len(sd.Accts.accts))*accountArrayEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) copy(accts, sd.Accts.accts) sd.Accts.accts = accts } // acctsCache takes up 64 bytes per entry, and is saved for 320 rounds - if uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*proto.MaxBalLookback < stateDeltaTargetOptimizationThreshold { + if uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -183,7 +183,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { } // TxLeases takes up 112 bytes per entry, and is saved for 1000 rounds - if uint64(sd.initialTransactionsCount-len(sd.Txleases))*txleasesEntrySize*proto.MaxTxnLife < stateDeltaTargetOptimizationThreshold { + if uint64(sd.initialTransactionsCount-len(sd.Txleases))*txleasesEntrySize*proto.MaxTxnLife > stateDeltaTargetOptimizationThreshold { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -192,7 +192,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { } // Creatables takes up 100 bytes per entry, and is saved for 320 rounds - if uint64(sd.initialTransactionsCount-len(sd.Creatables))*creatablesEntrySize*proto.MaxBalLookback < stateDeltaTargetOptimizationThreshold { + if uint64(sd.initialTransactionsCount-len(sd.Creatables))*creatablesEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v From b325f01ad11db9baa5abd1345baadb6151c44c14 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 3 Mar 2021 23:06:20 -0500 Subject: [PATCH 017/215] initial version --- ledger/acctupdates.go | 117 +++++++++++++++++++++++++++++++++++++++--- 1 file changed, 109 insertions(+), 8 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 099d43724b..99d46a237e 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -79,6 +79,20 @@ const baseAccountsPendingAccountsBufferSize = 100000 // is being flushed into the main base account cache. const baseAccountsPendingAccountsWarnThreshold = 85000 +// initializeCachesReadaheadBlocksStream defines how many block we're going to attempt to queue for the +// initializeCaches method before it can process and store the account changes to disk. +const initializeCachesReadaheadBlocksStream = 4 + +// initializeCachesRoundFlushInterval defines the number of rounds between every to consecutive +// attempts to flush the memory account data to disk. Setting this value too high would increase +// memory utilization. Setting this too low, would increase disk i/o. +const initializeCachesRoundFlushInterval = 1000 + +// initializingAccountCachesMessageTimeout controls the amount of time passes before we +// log "initializingAccount initializing.." message to the log file. This is primarily for +// nodes with slower disk access, where a feedback that the node is functioning correctly is needed. +const initializingAccountCachesMessageTimeout = 3 * time.Second + var trieMemoryConfig = merkletrie.MemoryConfig{ NodesCountPerPage: merkleCommitterNodesPerPage, CachedNodesCount: trieCachedNodesCount, @@ -881,7 +895,9 @@ func (au *accountUpdates) totalsImpl(rnd basics.Round) (totals ledgercore.Accoun return } -// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks +// initializeCaches fills up the accountUpdates cache with the most recent ~320 blocks ( on normal execution ). +// the method also support balances recovery in cases where the difference between the lastBalancesRound and the lastestBlockRound +// is far greater than 320; in these cases, it would flush to disk periodically in order to avoid high memory consumption. func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, writingCatchpointRound basics.Round) (catchpointBlockDigest crypto.Digest, err error) { var blk bookkeeping.Block var delta ledgercore.StateDelta @@ -896,26 +912,111 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, } } - for lastBalancesRound < lastestBlockRound { - next := lastBalancesRound + 1 + skipAccountCacheMessage := make(chan struct{}, 1) + writeAccountCacheMessageCompleted := make(chan struct{}) + defer func() { + close(skipAccountCacheMessage) + select { + case <-writeAccountCacheMessageCompleted: + au.log.Infof("initializeCaches completed initializing account data caches") + default: + } + }() + go func() { + select { + case <-time.After(initializingAccountCachesMessageTimeout): + au.log.Infof("initializeCaches is initializing account data caches") + close(writeAccountCacheMessageCompleted) + case <-skipAccountCacheMessage: + } + }() - blk, err = au.ledger.Block(next) - if err != nil { - return + blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream) + blockEvalFailed := make(chan struct{}, 1) + go func() { + defer close(blocksStream) + for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ { + blk, err = au.ledger.Block(roundNumber) + if err != nil { + return + } + select { + case blocksStream <- blk: + case <-blockEvalFailed: + return + } } + }() + + lastFlushedRound := lastBalancesRound + const accountsCacheLoadingMessageInterval = 5 * time.Second + lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2) + for blk := range blocksStream { delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval) if err != nil { + close(blockEvalFailed) return } au.newBlockImpl(blk, delta) - lastBalancesRound = next - if next == basics.Round(writingCatchpointRound) { + if blk.Round() == basics.Round(writingCatchpointRound) { catchpointBlockDigest = blk.Digest() } + // flush to disk if any of the following applies: + // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk + // 2. if we completed the loading and we loaded up more than 320 rounds. + if blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval || + (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound) { + // adjust the last flush time, so that we would not hold off the flushing due to "working too fast" + au.lastFlushTime = time.Now().Add(-balancesFlushInterval) + + // The unlocking/relocking here isn't very elegant, but it does get the work done : + // this method is called on either startup or when fast catchup is complete. In the former usecase, the + // locking here is not really needed since the system is only starting up, and there are no other + // consumers for the accounts update. On the latter usecase, the function would always have exactly 320 rounds, + // and therefore this wouldn't be an issue. + // However, to make sure we're not missing any other future codepath, unlocking here and re-locking later on is a pretty + // safe bet. + au.accountsMu.Unlock() + + // flush the account data + au.committedUpTo(blk.Round()) + + // wait for the writing to complete. + au.waitAccountsWriting() + + // The au.dbRound after writing should be ~320 behind the block round. + roundsBehind := blk.Round() - au.dbRound + + au.accountsMu.Lock() + + if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) { + // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes. + close(blockEvalFailed) + au.log.Errorf("initializeCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", au.dbRound, blk.Round()) + err = fmt.Errorf("initializeCaches failed to initialize the account data caches") + return + } + + // and once we flushed it to disk, update the lastFlushedRound + lastFlushedRound = blk.Round() + } + + // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess. + if time.Now().Sub(lastProgressMessage) > accountsCacheLoadingMessageInterval { + // drop the initial message if we're got to this point so that we'll have this message instead. + select { + case skipAccountCacheMessage <- struct{}{}: + default: + } + au.log.Infof("initializeCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound) + lastProgressMessage = time.Now() + } + + // prepare for the next iteration. accLedgerEval.prevHeader = *delta.Hdr } return From 56b2cfa7ea71de67cbacde19caa1a42332eb9a35 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 4 Mar 2021 09:36:15 -0500 Subject: [PATCH 018/215] fix datarace --- ledger/acctupdates.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 99d46a237e..22647fb2d1 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -933,11 +933,12 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, blocksStream := make(chan bookkeeping.Block, initializeCachesReadaheadBlocksStream) blockEvalFailed := make(chan struct{}, 1) + var blockRetrievalError error go func() { defer close(blocksStream) for roundNumber := lastBalancesRound + 1; roundNumber <= lastestBlockRound; roundNumber++ { - blk, err = au.ledger.Block(roundNumber) - if err != nil { + blk, blockRetrievalError = au.ledger.Block(roundNumber) + if blockRetrievalError != nil { return } select { @@ -1019,6 +1020,10 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, // prepare for the next iteration. accLedgerEval.prevHeader = *delta.Hdr } + + if blockRetrievalError != nil { + err = blockRetrievalError + } return } From 8ced9b0fbdb0b466cd58cbd0049d30f228fea16d Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Thu, 4 Mar 2021 14:57:14 -0300 Subject: [PATCH 019/215] Uncolorize mnemonic output on Windows --- cmd/goal/messages.go | 1 + cmd/goal/wallet.go | 7 ++++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go index a6796c8111..e0864c24d4 100644 --- a/cmd/goal/messages.go +++ b/cmd/goal/messages.go @@ -165,6 +165,7 @@ const ( infoBackupExplanation = "Your new wallet has a backup phrase that can be used for recovery.\nKeeping this backup phrase safe is extremely important.\nWould you like to see it now? (Y/n): " infoPrintedBackupPhrase = "Your backup phrase is printed below.\nKeep this information safe -- never share it with anyone!" infoBackupPhrase = "\n\x1B[32m%s\033[0m" + infoBackupPhraseWindows = "\n%s" infoNoWallets = "No wallets found. You can create a wallet with `goal wallet new`" errorCouldntCreateWallet = "Couldn't create wallet: %s" errorCouldntInitializeWallet = "Couldn't initialize wallet: %s" diff --git a/cmd/goal/wallet.go b/cmd/goal/wallet.go index 3d62a0d5b4..7b315364e0 100644 --- a/cmd/goal/wallet.go +++ b/cmd/goal/wallet.go @@ -21,6 +21,7 @@ import ( "bytes" "fmt" "os" + "runtime" "strings" "github.com/spf13/cobra" @@ -166,7 +167,11 @@ var newWalletCmd = &cobra.Command{ // Display the mnemonic to the user reportInfoln(infoPrintedBackupPhrase) - reportInfof(infoBackupPhrase, mnemonic) + if runtime.GOOS == "windows" { + reportInfof(infoBackupPhraseWindows, mnemonic) + } else { + reportInfof(infoBackupPhrase, mnemonic) + } } } From 8a6dcb88ede62e621101c12c96cc93dc1436c2ba Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Thu, 4 Mar 2021 14:57:41 -0300 Subject: [PATCH 020/215] Changed KMD directory safety check on Windows --- nodecontrol/kmdControl.go | 2 +- nodecontrol/kmdControl_non_windows.go | 29 +++++++++++++++++++++++++++ nodecontrol/kmdControl_windows.go | 25 +++++++++++++++++++++++ 3 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 nodecontrol/kmdControl_non_windows.go create mode 100644 nodecontrol/kmdControl_windows.go diff --git a/nodecontrol/kmdControl.go b/nodecontrol/kmdControl.go index b23e34d176..b30523a7e3 100644 --- a/nodecontrol/kmdControl.go +++ b/nodecontrol/kmdControl.go @@ -200,7 +200,7 @@ func (kc *KMDController) StartKMD(args KMDStartArgs) (alreadyRunning bool, err e logging.Base().Errorf("%s: kmd data dir exists but is not a directory", kc.kmdDataDir) return false, errors.New("bad kmd data dir") } - if (dataDirStat.Mode() & 0077) != 0 { + if !isDirectorySafe(dataDirStat) { logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777, DefaultKMDDataDirPerms) return false, errors.New("kmd data dir not secure") } diff --git a/nodecontrol/kmdControl_non_windows.go b/nodecontrol/kmdControl_non_windows.go new file mode 100644 index 0000000000..8d6fcdaf90 --- /dev/null +++ b/nodecontrol/kmdControl_non_windows.go @@ -0,0 +1,29 @@ +// +build !windows +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package nodecontrol + +import ( + "os" +) + +func isDirectorySafe(dirStats os.FileInfo) bool { + if (dirStats.Mode() & 0077) != 0 { + return false + } + return true +} diff --git a/nodecontrol/kmdControl_windows.go b/nodecontrol/kmdControl_windows.go new file mode 100644 index 0000000000..7232093332 --- /dev/null +++ b/nodecontrol/kmdControl_windows.go @@ -0,0 +1,25 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package nodecontrol + +import ( + "os" +) + +func isDirectorySafe(_ os.FileInfo) bool { + return true +} \ No newline at end of file From 48d1ad664acb0e42e1b1a47cb429aa0a3255b8f7 Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Thu, 4 Mar 2021 14:58:00 -0300 Subject: [PATCH 021/215] Improved process kill/existence check on Windows --- nodecontrol/NodeController.go | 3 +- util/process_common.go | 6 +++ util/process_windows.go | 90 +++++++++++++++++++++++++++++++---- 3 files changed, 89 insertions(+), 10 deletions(-) diff --git a/nodecontrol/NodeController.go b/nodecontrol/NodeController.go index 202772f70d..0673dce9ad 100644 --- a/nodecontrol/NodeController.go +++ b/nodecontrol/NodeController.go @@ -17,7 +17,6 @@ package nodecontrol import ( - "os" "path/filepath" "syscall" "time" @@ -113,7 +112,7 @@ func (nc NodeController) stopProcesses() (kmdAlreadyStopped bool, err error) { } func killPID(pid int) error { - process, err := os.FindProcess(pid) + process, err := util.FindProcess(pid) if process == nil || err != nil { return err } diff --git a/util/process_common.go b/util/process_common.go index ffefbdb349..bb706b308e 100644 --- a/util/process_common.go +++ b/util/process_common.go @@ -19,9 +19,15 @@ package util import ( + "os" "syscall" ) +// FindProcess looks for a running process by its pid +func FindProcess(pid int) (*os.Process, error) { + return os.FindProcess(pid) +} + // KillProcess kills a running OS process func KillProcess(pid int, sig syscall.Signal) error { return syscall.Kill(pid, sig) diff --git a/util/process_windows.go b/util/process_windows.go index c86abdc289..6677345720 100644 --- a/util/process_windows.go +++ b/util/process_windows.go @@ -19,23 +19,88 @@ package util import ( + "errors" "os" + "syscall" "unsafe" "golang.org/x/sys/windows" ) -// KillProcess kills a running OS process -func KillProcess(pid int, _ os.Signal) error { +const ( + ERROR_INVALID_PARAMETER = syscall.Errno(87) + + processTerminateWaitInMs = 1000 +) + +// FindProcess looks for a running process by its pid +func FindProcess(pid int) (*os.Process, error) { + var h syscall.Handle + + process, err := os.FindProcess(pid) + if err != nil { + // NOTE: Unlike Unix, Windows tries to open the target process in order to kill it. + // ERROR_INVALID_PARAMETER is returned if the process does not exists. + // To mimic other OS behavior, if the process does not exist, don't return an error + var syscallError syscall.Errno + + if errors.As(err, &syscallError) { + if syscallError == ERROR_INVALID_PARAMETER { + return nil, nil + } + } + return nil, err + } - p, err := os.FindProcess(pid) + // If we have a process, check if it is terminated + h, err = syscall.OpenProcess(syscall.SYNCHRONIZE, false, uint32(pid)) if err == nil { + defer func() { + _ = syscall.CloseHandle(h) + }() - for _, v := range getChildrenProcesses(pid) { - _ = v.Kill() + ret, e2 := syscall.WaitForSingleObject(h, 0) + if e2 == nil && ret == syscall.WAIT_OBJECT_0 { + return nil, nil } + } - err = p.Kill() + return process, nil +} + +// KillProcess kills a running OS process +func KillProcess(pid int, signal os.Signal) error { + p, err := FindProcess(pid) + if err == nil { + if p != nil { + if signal != syscall.Signal(0) { + for _, v := range getChildrenProcesses(pid) { + err2 := v.Kill() + if err2 == nil { + waitUntilProcessEnds(v.Pid) + } + } + + err = p.Kill() + waitUntilProcessEnds(p.Pid) + } + } else { + // Signal(0) only checks if we have access to kill a process and if it is really dead + if p != nil { + var h syscall.Handle + + h, err = syscall.OpenProcess(syscall.SYNCHRONIZE|syscall.PROCESS_TERMINATE, false, uint32(pid)) + if err == nil { + ret, e2 := syscall.WaitForSingleObject(h, 0) + if e2 == nil && ret == syscall.WAIT_OBJECT_0 { + err = errors.New("os: process already finished") + } + _ = syscall.CloseHandle(h) + } + } else { + err = errors.New("os: process already finished") + } + } } return err } @@ -52,8 +117,8 @@ func getChildrenProcesses(parentPid int) []*os.Process { if err := windows.Process32First(snap, &pe32); err == nil { for { if pe32.ParentProcessID == uint32(parentPid) { - p, err := os.FindProcess(int(pe32.ProcessID)) - if err == nil { + p, err := FindProcess(int(pe32.ProcessID)) + if err == nil && p != nil { out = append(out, p) } } @@ -65,3 +130,12 @@ func getChildrenProcesses(parentPid int) []*os.Process { } return out } + +func waitUntilProcessEnds(pid int) { + h, err := syscall.OpenProcess(syscall.SYNCHRONIZE, false, uint32(pid)) + if err == nil { + _, _ = syscall.WaitForSingleObject(h, processTerminateWaitInMs) + + _ = syscall.CloseHandle(h) + } +} \ No newline at end of file From 45d7dae212e8277a459c48e26820b275f8c85d9f Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 4 Mar 2021 14:26:38 -0500 Subject: [PATCH 022/215] Add unit test --- ledger/acctupdates.go | 10 +++-- ledger/acctupdates_test.go | 92 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 3 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 22647fb2d1..58caf5184e 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -912,7 +912,7 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, } } - skipAccountCacheMessage := make(chan struct{}, 1) + skipAccountCacheMessage := make(chan struct{}) writeAccountCacheMessageCompleted := make(chan struct{}) defer func() { close(skipAccountCacheMessage) @@ -994,8 +994,10 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, au.accountsMu.Lock() + // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit ) if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) { - // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes. + // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any futher changes + // would just accumulate in memory. close(blockEvalFailed) au.log.Errorf("initializeCaches was unable to fill up the account caches accounts round = %d, block round = %d. See above error for more details.", au.dbRound, blk.Round()) err = fmt.Errorf("initializeCaches failed to initialize the account data caches") @@ -1008,9 +1010,11 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, // if enough time have passed since the last time we wrote a message to the log file then give the user an update about the progess. if time.Now().Sub(lastProgressMessage) > accountsCacheLoadingMessageInterval { - // drop the initial message if we're got to this point so that we'll have this message instead. + // drop the initial message if we're got to this point since a message saying "still initializing" that comes after "is initializing" doesn't seems to be right. select { case skipAccountCacheMessage <- struct{}{}: + // if we got to this point, we should be able to close the writeAccountCacheMessageCompleted channel to have the "completed initializing" message written. + close(writeAccountCacheMessageCompleted) default: } au.log.Infof("initializeCaches is still initializing account data caches, %d rounds loaded out of %d rounds", blk.Round()-lastBalancesRound, lastestBlockRound-lastBalancesRound) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 5d4028f8c9..9475771f8e 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -1545,3 +1545,95 @@ func TestReproducibleCatchpointLabels(t *testing.T) { } } } + +// TestCachesInitialization test the functionality of the initializeCaches cache. +func TestCachesInitialization(t *testing.T) { + protocolVersion := protocol.ConsensusCurrentVersion + proto := config.Consensus[protocolVersion] + + initialRounds := uint64(1) + + ml := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion) + ml.log.SetLevel(logging.Warn) + defer ml.Close() + + accountsCount := 5 + accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)} + rewardsLevels := []uint64{0} + + pooldata := basics.AccountData{} + pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 * 1000 * 1000 + pooldata.Status = basics.NotParticipating + accts[0][testPoolAddr] = pooldata + + sinkdata := basics.AccountData{} + sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 + sinkdata.Status = basics.NotParticipating + accts[0][testSinkAddr] = sinkdata + + au := &accountUpdates{} + au.initialize(config.GetDefaultLocal(), ".", proto, accts[0]) + err := au.loadFromDisk(ml) + require.NoError(t, err) + defer au.close() + + // cover initialRounds genesis blocks + rewardLevel := uint64(0) + for i := 1; i < int(initialRounds); i++ { + accts = append(accts, accts[0]) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + recoveredLedgerRound := basics.Round(initializeCachesRoundFlushInterval * 3) + + for i := basics.Round(initialRounds); i <= recoveredLedgerRound; i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + accountChanges := 2 + + updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) + prevTotals, err := au.Totals(basics.Round(i - 1)) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = protocolVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta.Accts.MergeAccounts(updates) + ml.addMockBlock(blockEntry{block: blk}, delta) + au.newBlock(blk, delta) + au.committedUpTo(basics.Round(i)) + au.waitAccountsWriting() + accts = append(accts, totals) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + au.close() + + // create another mocked ledger, but this time with a fresh new tracker database. + ml2 := makeMockLedgerForTracker(t, true, int(initialRounds), protocolVersion) + ml2.log.SetLevel(logging.Warn) + defer ml2.Close() + + // and "fix" it to contain the blocks and deltas from before. + ml2.blocks = ml.blocks + ml2.deltas = ml.deltas + + au = &accountUpdates{} + au.initialize(config.GetDefaultLocal(), ".", proto, accts[0]) + err = au.loadFromDisk(ml2) + require.NoError(t, err) + + // make sure the deltas array end up containing only the most recent 320 rounds. + require.Equal(t, int(proto.MaxBalLookback), len(au.deltas)) + require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.dbRound) +} From 92b87c7eaf51392ed4bc5a4d5d0cf9ddac32131f Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Thu, 4 Mar 2021 19:21:50 -0300 Subject: [PATCH 023/215] Renamed file --- .../{kmdControl_non_windows.go => kmdControl_common.go} | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) rename nodecontrol/{kmdControl_non_windows.go => kmdControl_common.go} (99%) diff --git a/nodecontrol/kmdControl_non_windows.go b/nodecontrol/kmdControl_common.go similarity index 99% rename from nodecontrol/kmdControl_non_windows.go rename to nodecontrol/kmdControl_common.go index 8d6fcdaf90..8f0827bcab 100644 --- a/nodecontrol/kmdControl_non_windows.go +++ b/nodecontrol/kmdControl_common.go @@ -1,4 +1,3 @@ -// +build !windows // Copyright (C) 2019-2021 Algorand, Inc. // This file is part of go-algorand // @@ -15,6 +14,8 @@ // You should have received a copy of the GNU Affero General Public License // along with go-algorand. If not, see . +// +build !windows + package nodecontrol import ( From 022b0cf73c82a97d9a430ab2836c4dc8343c0bd4 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 4 Mar 2021 18:06:17 -0500 Subject: [PATCH 024/215] Avoid database timeout warning message + improved syncronous writings --- ledger/acctupdates.go | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 58caf5184e..54a1243ce7 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -93,6 +93,12 @@ const initializeCachesRoundFlushInterval = 1000 // nodes with slower disk access, where a feedback that the node is functioning correctly is needed. const initializingAccountCachesMessageTimeout = 3 * time.Second +// accountsUpdatePerRoundHighWatermark is the warning watermark for updating accounts data that takes +// longer then expected. We set it up here for one second per round, so that if we're bulk updating +// four rounds, we would allow up to 4 seconds. This becomes important when supporting balances recovery +// where we end up batching up to 1000 rounds in a single update. +const accountsUpdatePerRoundHighWatermark = 1 * time.Second + var trieMemoryConfig = merkletrie.MemoryConfig{ NodesCountPerPage: merkleCommitterNodesPerPage, CachedNodesCount: trieCachedNodesCount, @@ -236,6 +242,12 @@ type accountUpdates struct { // baseAccounts stores the most recently used accounts, at exactly dbRound baseAccounts lruAccounts + + // the synchronous mode that would be used for the account database. + synchronousMode db.SynchronousMode + + // the synchronous mode that would be used while the accounts database is being rebuilt. + accountsRebuildSynchronousMode db.SynchronousMode } type deferredCommit struct { @@ -309,6 +321,8 @@ func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, gene au.commitSyncerClosed = make(chan struct{}) close(au.commitSyncerClosed) au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker()) + au.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode) + au.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode) } // loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional @@ -974,6 +988,9 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, // adjust the last flush time, so that we would not hold off the flushing due to "working too fast" au.lastFlushTime = time.Now().Add(-balancesFlushInterval) + // switch to rebuild synchronous mode to improve performance + au.dbs.Wdb.SetSynchronousMode(context.Background(), au.accountsRebuildSynchronousMode, au.accountsRebuildSynchronousMode >= db.SynchronousModeFull) + // The unlocking/relocking here isn't very elegant, but it does get the work done : // this method is called on either startup or when fast catchup is complete. In the former usecase, the // locking here is not really needed since the system is only starting up, and there are no other @@ -994,6 +1011,9 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, au.accountsMu.Lock() + // restore default synchronous mode + au.dbs.Wdb.SetSynchronousMode(context.Background(), au.synchronousMode, au.synchronousMode >= db.SynchronousModeFull) + // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit ) if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) { // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any futher changes @@ -1999,6 +2019,8 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb treeTargetRound = dbRound + basics.Round(offset) } + db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset))) + err = compactDeltas.accountsLoadOld(tx) if err != nil { return err From e1cd3216524c5969d3b127bb688229f59a9c38ff Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 5 Mar 2021 10:01:32 -0500 Subject: [PATCH 025/215] better implementation & faster test --- ledger/acctupdates.go | 22 +++++++++++++++++----- ledger/acctupdates_test.go | 4 ++-- 2 files changed, 19 insertions(+), 7 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 54a1243ce7..ba89f66c32 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -967,6 +967,16 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, const accountsCacheLoadingMessageInterval = 5 * time.Second lastProgressMessage := time.Now().Add(-accountsCacheLoadingMessageInterval / 2) + // rollbackSynchronousMode ensures that we switch to "fast writing mode" when we start flushing out rounds to disk, and that + // we exit this mode when we're done. + rollbackSynchronousMode := false + defer func() { + if rollbackSynchronousMode { + // restore default synchronous mode + au.dbs.Wdb.SetSynchronousMode(context.Background(), au.synchronousMode, au.synchronousMode >= db.SynchronousModeFull) + } + }() + for blk := range blocksStream { delta, err = au.ledger.trackerEvalVerified(blk, &accLedgerEval) if err != nil { @@ -988,8 +998,13 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, // adjust the last flush time, so that we would not hold off the flushing due to "working too fast" au.lastFlushTime = time.Now().Add(-balancesFlushInterval) - // switch to rebuild synchronous mode to improve performance - au.dbs.Wdb.SetSynchronousMode(context.Background(), au.accountsRebuildSynchronousMode, au.accountsRebuildSynchronousMode >= db.SynchronousModeFull) + if !rollbackSynchronousMode { + // switch to rebuild synchronous mode to improve performance + au.dbs.Wdb.SetSynchronousMode(context.Background(), au.accountsRebuildSynchronousMode, au.accountsRebuildSynchronousMode >= db.SynchronousModeFull) + + // flip the switch to rollback the synchronous mode once we're done. + rollbackSynchronousMode = true + } // The unlocking/relocking here isn't very elegant, but it does get the work done : // this method is called on either startup or when fast catchup is complete. In the former usecase, the @@ -1011,9 +1026,6 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, au.accountsMu.Lock() - // restore default synchronous mode - au.dbs.Wdb.SetSynchronousMode(context.Background(), au.synchronousMode, au.synchronousMode >= db.SynchronousModeFull) - // are we too far behind ? ( taking into consideration the catchpoint writing, which can stall the writing for quite a bit ) if roundsBehind > initializeCachesRoundFlushInterval+basics.Round(au.catchpointInterval) { // we're unable to persist changes. This is unexpected, but there is no point in keep trying batching additional changes since any futher changes diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 9475771f8e..4731cc6614 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -1575,7 +1575,6 @@ func TestCachesInitialization(t *testing.T) { au.initialize(config.GetDefaultLocal(), ".", proto, accts[0]) err := au.loadFromDisk(ml) require.NoError(t, err) - defer au.close() // cover initialRounds genesis blocks rewardLevel := uint64(0) @@ -1584,7 +1583,7 @@ func TestCachesInitialization(t *testing.T) { rewardsLevels = append(rewardsLevels, rewardLevel) } - recoveredLedgerRound := basics.Round(initializeCachesRoundFlushInterval * 3) + recoveredLedgerRound := basics.Round(initialRounds + initializeCachesRoundFlushInterval + proto.MaxBalLookback + 1) for i := basics.Round(initialRounds); i <= recoveredLedgerRound; i++ { rewardLevelDelta := crypto.RandUint64() % 5 @@ -1632,6 +1631,7 @@ func TestCachesInitialization(t *testing.T) { au.initialize(config.GetDefaultLocal(), ".", proto, accts[0]) err = au.loadFromDisk(ml2) require.NoError(t, err) + defer au.close() // make sure the deltas array end up containing only the most recent 320 rounds. require.Equal(t, int(proto.MaxBalLookback), len(au.deltas)) From 09284a22237b5a41dfd48a9884df11e6691f78c5 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 01:25:27 -0500 Subject: [PATCH 026/215] Fix parameter check in abort procedure --- test/e2e-go/cli/goal/expect/goalExpectCommon.exp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index 7a14e84f64..c752d125d0 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -46,7 +46,7 @@ package require Tcl 8.0 proc ::AlgorandGoal::Abort { ERROR } { puts "Aborting with Error: $ERROR" - if { "$::GLOBAL_TEST_ALGO_DIR" != "" && "$::GLOBAL_TEST_ROOT_DIR" != "" } { + if { [info exists ::GLOBAL_TEST_ALGO_DIR] && [info exists ::GLOBAL_TEST_ROOT_DIR] } { # terminate child algod processes, if there are active child processes the test will hang on a test failure puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR" puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR" From 1e1b82b79e0ee682fba1a03674f1a8cbd287c106 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Tue, 9 Mar 2021 13:49:57 -0500 Subject: [PATCH 027/215] Net tag stats (#1938) * TagCounter for recording stats about network tags --- network/wsPeer.go | 9 ++ util/metrics/tagcounter.go | 146 ++++++++++++++++++++++++++++++++ util/metrics/tagcounter_test.go | 133 +++++++++++++++++++++++++++++ 3 files changed, 288 insertions(+) create mode 100644 util/metrics/tagcounter.go create mode 100644 util/metrics/tagcounter_test.go diff --git a/network/wsPeer.go b/network/wsPeer.go index 99a96d1351..ed51ec442c 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -48,10 +48,15 @@ const averageMessageLength = 2 * 1024 // Most of the messages are smaller tha const msgsInReadBufferPerPeer = 10 var networkSentBytesTotal = metrics.MakeCounter(metrics.NetworkSentBytesTotal) +var networkSentBytesByTag = metrics.NewTagCounter("algod_network_sent_bytes_{TAG}", "Number of bytes that were sent over the network per message tag") var networkReceivedBytesTotal = metrics.MakeCounter(metrics.NetworkReceivedBytesTotal) +var networkReceivedBytesByTag = metrics.NewTagCounter("algod_network_received_bytes_{TAG}", "Number of bytes that were received from the network per message tag") var networkMessageReceivedTotal = metrics.MakeCounter(metrics.NetworkMessageReceivedTotal) +var networkMessageReceivedByTag = metrics.NewTagCounter("algod_network_message_received_{TAG}", "Number of complete messages that were received from the network per message tag") var networkMessageSentTotal = metrics.MakeCounter(metrics.NetworkMessageSentTotal) +var networkMessageSentByTag = metrics.NewTagCounter("algod_network_message_sent_{TAG}", "Number of complete messages that were sent to the network per message tag") + var networkConnectionsDroppedTotal = metrics.MakeCounter(metrics.NetworkConnectionsDroppedTotal) var networkMessageQueueMicrosTotal = metrics.MakeCounter(metrics.MetricName{Name: "algod_network_message_sent_queue_micros_total", Description: "Total microseconds message spent waiting in queue to be sent"}) @@ -401,6 +406,8 @@ func (wp *wsPeer) readLoop() { atomic.StoreInt64(&wp.lastPacketTime, msg.Received) networkReceivedBytesTotal.AddUint64(uint64(len(msg.Data)+2), nil) networkMessageReceivedTotal.AddUint64(1, nil) + networkReceivedBytesByTag.Add(string(tag[:]), uint64(len(msg.Data)+2)) + networkMessageReceivedByTag.Add(string(tag[:]), 1) msg.Sender = wp // for outgoing connections, we want to notify the connection monitor that we've received @@ -569,7 +576,9 @@ func (wp *wsPeer) writeLoopSend(msg sendMessage) disconnectReason { } atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano()) networkSentBytesTotal.AddUint64(uint64(len(msg.data)), nil) + networkSentBytesByTag.Add(string(tag), uint64(len(msg.data))) networkMessageSentTotal.AddUint64(1, nil) + networkMessageSentByTag.Add(string(tag), 1) networkMessageQueueMicrosTotal.AddUint64(uint64(time.Now().Sub(msg.peerEnqueued).Nanoseconds()/1000), nil) return disconnectReasonNone } diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go new file mode 100644 index 0000000000..5d08480394 --- /dev/null +++ b/util/metrics/tagcounter.go @@ -0,0 +1,146 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package metrics + +import ( + "strconv" + "strings" + "sync/atomic" + + "github.com/algorand/go-deadlock" +) + +// NewTagCounter makes a set of metrics under rootName for tagged counting. +// "{TAG}" in rootName is replaced by the tag, otherwise "_{TAG}" is appended. +func NewTagCounter(rootName, desc string) *TagCounter { + tc := &TagCounter{Name: rootName, Description: desc} + DefaultRegistry().Register(tc) + return tc +} + +// TagCounter holds a set of counters +type TagCounter struct { + Name string + Description string + + // a read only race-free reference to tags + tagptr atomic.Value + + tags map[string]*uint64 + + storage [][]uint64 + storagePos int + + tagLock deadlock.Mutex +} + +// Add t[tag] += val, fast and multithread safe +func (tc *TagCounter) Add(tag string, val uint64) { + for { + var tags map[string]*uint64 + tagptr := tc.tagptr.Load() + if tagptr != nil { + tags = tagptr.(map[string]*uint64) + } + + count, ok := tags[tag] + if ok { + atomic.AddUint64(count, val) + return + } + tc.tagLock.Lock() + if _, ok = tc.tags[tag]; !ok { + // Still need to add a new tag. + // Make a new map so there's never any race. + newtags := make(map[string]*uint64, len(tc.tags)+1) + for k, v := range tc.tags { + newtags[k] = v + } + var st []uint64 + if len(tc.storage) > 0 { + st = tc.storage[len(tc.storage)-1] + //fmt.Printf("new tag %v, old block\n", tag) + } + if tc.storagePos > (len(st) - 1) { + //fmt.Printf("new tag %v, new block\n", tag) + st = make([]uint64, 16) + tc.storagePos = 0 + tc.storage = append(tc.storage, st) + } + newtags[tag] = &(st[tc.storagePos]) + //fmt.Printf("tag %v = %p\n", tag, newtags[tag]) + tc.storagePos++ + tc.tags = newtags + tc.tagptr.Store(newtags) + } + tc.tagLock.Unlock() + } +} + +// WriteMetric is part of the Metric interface +func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) { + // TODO: what to do with "parentLabels"? obsolete part of interface? + buf.WriteString("# ") + buf.WriteString(tc.Name) + buf.WriteString(" ") + buf.WriteString(tc.Description) + buf.WriteString("\n") + isTemplate := strings.Contains(tc.Name, "{TAG}") + tags := tc.tagptr.Load().(map[string]*uint64) + for tag, tagcount := range tags { + if tagcount == nil { + continue + } + if isTemplate { + name := strings.ReplaceAll(tc.Name, "{TAG}", tag) + buf.WriteString(name) + buf.WriteRune(' ') + buf.WriteString(strconv.FormatUint(*tagcount, 10)) + buf.WriteRune('\n') + } else { + buf.WriteString(tc.Name) + buf.WriteRune('_') + buf.WriteString(tag) + buf.WriteRune(' ') + buf.WriteString(strconv.FormatUint(*tagcount, 10)) + buf.WriteRune('\n') + } + } +} + +// AddMetric is part of the Metric interface +// Copy the values in this TagCounter out into the string-string map. +func (tc *TagCounter) AddMetric(values map[string]string) { + tagp := tc.tagptr.Load() + if tagp == nil { + return + } + isTemplate := strings.Contains(tc.Name, "{TAG}") + tags := tagp.(map[string]*uint64) + for tag, tagcount := range tags { + if tagcount == nil { + continue + } + var name string + if isTemplate { + name = strings.ReplaceAll(tc.Name, "{TAG}", tag) + } else { + name = tc.Name + "_" + tag + } + values[name] = strconv.FormatUint(*tagcount, 10) + } +} diff --git a/util/metrics/tagcounter_test.go b/util/metrics/tagcounter_test.go new file mode 100644 index 0000000000..21215eb3d7 --- /dev/null +++ b/util/metrics/tagcounter_test.go @@ -0,0 +1,133 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package metrics + +import ( + "fmt" + "sync" + "testing" +) + +func TestTagCounter(t *testing.T) { + tags := make([]string, 17) + for i := range tags { + tags[i] = fmt.Sprintf("A%c", 'A'+i) + } + //t.Logf("tags %v", tags) + countsIn := make([]uint64, len(tags)) + for i := range countsIn { + countsIn[i] = uint64(10 * (i + 1)) + } + + tc := NewTagCounter("tc", "wat") + var wg sync.WaitGroup + wg.Add(len(tags)) + + runf := func(tag string, count uint64) { + for i := 0; i < int(count); i++ { + tc.Add(tag, 1) + } + wg.Done() + } + + for i, tag := range tags { + go runf(tag, countsIn[i]) + } + wg.Wait() + + endtags := tc.tagptr.Load().(map[string]*uint64) + for i, tag := range tags { + countin := countsIn[i] + endcountp := endtags[tag] + if endcountp == nil { + t.Errorf("tag[%d] %s nil counter", i, tag) + continue + } + endcount := *endcountp + if endcount != countin { + t.Errorf("tag[%d] %v wanted %d got %d", i, tag, countin, endcount) + } + } +} + +func BenchmarkTagCounter(b *testing.B) { + b.Logf("b.N = %d", b.N) + t := b + tags := make([]string, 17) + for i := range tags { + tags[i] = fmt.Sprintf("A%c", 'A'+i) + } + //t.Logf("tags %v", tags) + triangle := make([]int, len(tags)) + tsum := 0 + for i := range triangle { + triangle[i] = i + 1 + tsum += i + 1 + } + wholeN := b.N / tsum + remainder := b.N - (tsum * wholeN) + rchunk := (remainder / len(tags)) + 1 + countsIn := make([]uint64, len(tags)) + csum := uint64(0) + for i := range countsIn { + rcc := rchunk + if remainder < rcc { + rcc = remainder + remainder = 0 + } else { + remainder -= rchunk + } + countsIn[i] = uint64((triangle[i] * wholeN) + rcc) + csum += countsIn[i] + } + if csum != uint64(b.N) { + b.Errorf("b.N = %d, but total = %d", b.N, csum) + } + + tc := NewTagCounter("tc", "wat") + //var wg sync.WaitGroup + //wg.Add(len(tags)) + + runf := func(tag string, count uint64) { + for i := 0; i < int(count); i++ { + tc.Add(tag, 1) + } + //wg.Done() + } + + for i, tag := range tags { + // don't run in threads so that we can benchmark time + runf(tag, countsIn[i]) + } + //wg.Wait() + + endtags := tc.tagptr.Load().(map[string]*uint64) + for i, tag := range tags { + countin := countsIn[i] + endcount := uint64(0) + endcountp := endtags[tag] + if endcountp != nil { + endcount = *endcountp + //t.Errorf("tag[%d] %s nil counter", i, tag) + //continue + } + //endcount := *endcountp + if endcount != countin { + t.Errorf("tag[%d] %v wanted %d got %d", i, tag, countin, endcount) + } + } +} From 0c44a0faf07c365b28d64844e56656832094adf2 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 19:02:45 -0500 Subject: [PATCH 028/215] breaking up the block per review comment --- test/e2e-go/cli/goal/expect/goalExpectCommon.exp | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index c752d125d0..d8fe5d22c8 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -46,13 +46,18 @@ package require Tcl 8.0 proc ::AlgorandGoal::Abort { ERROR } { puts "Aborting with Error: $ERROR" - if { [info exists ::GLOBAL_TEST_ALGO_DIR] && [info exists ::GLOBAL_TEST_ROOT_DIR] } { + if { [info exists ::GLOBAL_TEST_ROOT_DIR] } { # terminate child algod processes, if there are active child processes the test will hang on a test failure - puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR" puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR" puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME" ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR } + + if { [info exists ::GLOBAL_TEST_ALGO_DIR] } { + puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR" + ::AlgorandGoal::StopNode $::GLOBAL_TEST_ROOT_DIR + } + exit 1 } From 51e808df0d6a4cd1315d7510e0ed99afccc5d415 Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Wed, 10 Mar 2021 14:30:19 -0300 Subject: [PATCH 029/215] Split messages for ANSI and non-ANSI consoles --- cmd/goal/messages.go | 2 -- cmd/goal/messages_common.go | 24 ++++++++++++++++++++++++ cmd/goal/messages_windows.go | 22 ++++++++++++++++++++++ cmd/goal/wallet.go | 7 +------ 4 files changed, 47 insertions(+), 8 deletions(-) create mode 100644 cmd/goal/messages_common.go create mode 100644 cmd/goal/messages_windows.go diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go index e0864c24d4..e503f3aa2b 100644 --- a/cmd/goal/messages.go +++ b/cmd/goal/messages.go @@ -164,8 +164,6 @@ const ( infoCreatedWallet = "Created wallet '%s'" infoBackupExplanation = "Your new wallet has a backup phrase that can be used for recovery.\nKeeping this backup phrase safe is extremely important.\nWould you like to see it now? (Y/n): " infoPrintedBackupPhrase = "Your backup phrase is printed below.\nKeep this information safe -- never share it with anyone!" - infoBackupPhrase = "\n\x1B[32m%s\033[0m" - infoBackupPhraseWindows = "\n%s" infoNoWallets = "No wallets found. You can create a wallet with `goal wallet new`" errorCouldntCreateWallet = "Couldn't create wallet: %s" errorCouldntInitializeWallet = "Couldn't initialize wallet: %s" diff --git a/cmd/goal/messages_common.go b/cmd/goal/messages_common.go new file mode 100644 index 0000000000..39b2ae636f --- /dev/null +++ b/cmd/goal/messages_common.go @@ -0,0 +1,24 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// +build !windows + +package main + +const ( + // Wallet + infoBackupPhrase = "\n\x1B[32m%s\033[0m" +) diff --git a/cmd/goal/messages_windows.go b/cmd/goal/messages_windows.go new file mode 100644 index 0000000000..3d06815cd0 --- /dev/null +++ b/cmd/goal/messages_windows.go @@ -0,0 +1,22 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package main + +const ( + // Wallet + infoBackupPhrase = "\n%s" +) diff --git a/cmd/goal/wallet.go b/cmd/goal/wallet.go index 7b315364e0..3d62a0d5b4 100644 --- a/cmd/goal/wallet.go +++ b/cmd/goal/wallet.go @@ -21,7 +21,6 @@ import ( "bytes" "fmt" "os" - "runtime" "strings" "github.com/spf13/cobra" @@ -167,11 +166,7 @@ var newWalletCmd = &cobra.Command{ // Display the mnemonic to the user reportInfoln(infoPrintedBackupPhrase) - if runtime.GOOS == "windows" { - reportInfof(infoBackupPhraseWindows, mnemonic) - } else { - reportInfof(infoBackupPhrase, mnemonic) - } + reportInfof(infoBackupPhrase, mnemonic) } } From 4135b816a196f1c8338ae617212ed40684a2960e Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Wed, 10 Mar 2021 14:32:11 -0300 Subject: [PATCH 030/215] Moved dir safety check logging to per-OS code --- nodecontrol/kmdControl.go | 3 +-- nodecontrol/kmdControl_common.go | 5 ++++- nodecontrol/kmdControl_windows.go | 4 ++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/nodecontrol/kmdControl.go b/nodecontrol/kmdControl.go index b30523a7e3..a608cac160 100644 --- a/nodecontrol/kmdControl.go +++ b/nodecontrol/kmdControl.go @@ -200,8 +200,7 @@ func (kc *KMDController) StartKMD(args KMDStartArgs) (alreadyRunning bool, err e logging.Base().Errorf("%s: kmd data dir exists but is not a directory", kc.kmdDataDir) return false, errors.New("bad kmd data dir") } - if !isDirectorySafe(dataDirStat) { - logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777, DefaultKMDDataDirPerms) + if !kc.isDirectorySafe(dataDirStat) { return false, errors.New("kmd data dir not secure") } } else { diff --git a/nodecontrol/kmdControl_common.go b/nodecontrol/kmdControl_common.go index 8f0827bcab..4404bffb86 100644 --- a/nodecontrol/kmdControl_common.go +++ b/nodecontrol/kmdControl_common.go @@ -20,10 +20,13 @@ package nodecontrol import ( "os" + + "github.com/algorand/go-algorand/logging" ) -func isDirectorySafe(dirStats os.FileInfo) bool { +func (kc *KMDController) isDirectorySafe(dirStats os.FileInfo) bool { if (dirStats.Mode() & 0077) != 0 { + logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777, DefaultKMDDataDirPerms) return false } return true diff --git a/nodecontrol/kmdControl_windows.go b/nodecontrol/kmdControl_windows.go index 7232093332..601cba07f3 100644 --- a/nodecontrol/kmdControl_windows.go +++ b/nodecontrol/kmdControl_windows.go @@ -20,6 +20,6 @@ import ( "os" ) -func isDirectorySafe(_ os.FileInfo) bool { +func (kc *KMDController) isDirectorySafe(_ os.FileInfo) bool { return true -} \ No newline at end of file +} From a42275aac0156b6ddf2c4c81c31a0e8d14d92d01 Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Wed, 10 Mar 2021 14:32:45 -0300 Subject: [PATCH 031/215] Improved process tree kill in Windows --- util/process_windows.go | 164 +++++++++++++++++++++++++--------------- 1 file changed, 101 insertions(+), 63 deletions(-) diff --git a/util/process_windows.go b/util/process_windows.go index 6677345720..7f21e08c62 100644 --- a/util/process_windows.go +++ b/util/process_windows.go @@ -30,7 +30,15 @@ import ( const ( ERROR_INVALID_PARAMETER = syscall.Errno(87) + STATUS_CANCELLED = uint32(0xC0000120) + processTerminateWaitInMs = 1000 + + killChildsPassCount = 4 +) + +var ( + errFinishedProcess = errors.New("os: process already finished") ) // FindProcess looks for a running process by its pid @@ -39,15 +47,8 @@ func FindProcess(pid int) (*os.Process, error) { process, err := os.FindProcess(pid) if err != nil { - // NOTE: Unlike Unix, Windows tries to open the target process in order to kill it. - // ERROR_INVALID_PARAMETER is returned if the process does not exists. - // To mimic other OS behavior, if the process does not exist, don't return an error - var syscallError syscall.Errno - - if errors.As(err, &syscallError) { - if syscallError == ERROR_INVALID_PARAMETER { - return nil, nil - } + if isInvalidParameterError(err) { // NOTE: See function definition for details + return nil, nil } return nil, err } @@ -63,6 +64,10 @@ func FindProcess(pid int) (*os.Process, error) { if e2 == nil && ret == syscall.WAIT_OBJECT_0 { return nil, nil } + } else { + if isInvalidParameterError(err) { // NOTE: See function definition for details + return nil, nil + } } return process, nil @@ -70,72 +75,105 @@ func FindProcess(pid int) (*os.Process, error) { // KillProcess kills a running OS process func KillProcess(pid int, signal os.Signal) error { - p, err := FindProcess(pid) - if err == nil { - if p != nil { - if signal != syscall.Signal(0) { - for _, v := range getChildrenProcesses(pid) { - err2 := v.Kill() - if err2 == nil { - waitUntilProcessEnds(v.Pid) - } - } - - err = p.Kill() - waitUntilProcessEnds(p.Pid) - } - } else { - // Signal(0) only checks if we have access to kill a process and if it is really dead - if p != nil { - var h syscall.Handle - - h, err = syscall.OpenProcess(syscall.SYNCHRONIZE|syscall.PROCESS_TERMINATE, false, uint32(pid)) - if err == nil { - ret, e2 := syscall.WaitForSingleObject(h, 0) - if e2 == nil && ret == syscall.WAIT_OBJECT_0 { - err = errors.New("os: process already finished") - } - _ = syscall.CloseHandle(h) - } - } else { - err = errors.New("os: process already finished") - } + // Signal(0) only checks if we have access to kill a process and if it is really dead + if signal == syscall.Signal(0) { + return isProcessAlive(pid) + } + + return killProcessTree(pid) +} + +func isProcessAlive(pid int) error { + var ret uint32 + + h, err := syscall.OpenProcess(syscall.SYNCHRONIZE|syscall.PROCESS_TERMINATE, false, uint32(pid)) + if err != nil { + if isInvalidParameterError(err) { // NOTE: See function definition for details + return errFinishedProcess } + return err } + ret, err = syscall.WaitForSingleObject(h, 0) + if err == nil && ret == syscall.WAIT_OBJECT_0 { + err = errFinishedProcess + } + + _ = syscall.CloseHandle(h) return err } -func getChildrenProcesses(parentPid int) []*os.Process { - out := []*os.Process{} +func killProcessTree(pid int) error { + err := killProcess(pid) + if err != nil { + return err + } + + // We do several passes just in case the process being killed spawns a new one + for pass := 1; pass <= killChildsPassCount; pass++ { + childProcessList := getChildProcesses(pid) + if len(childProcessList) == 0 { + break + } + for _, childPid := range childProcessList { + killProcessTree(childPid) + } + } + + return nil +} + +func getChildProcesses(pid int) []int { + var pe32 windows.ProcessEntry32 + + out := make([]int, 0) + snap, err := windows.CreateToolhelp32Snapshot(windows.TH32CS_SNAPPROCESS, uint32(0)) - if err == nil { - var pe32 windows.ProcessEntry32 - - defer windows.CloseHandle(snap) - - pe32.Size = uint32(unsafe.Sizeof(pe32)) - if err := windows.Process32First(snap, &pe32); err == nil { - for { - if pe32.ParentProcessID == uint32(parentPid) { - p, err := FindProcess(int(pe32.ProcessID)) - if err == nil && p != nil { - out = append(out, p) - } - } - if err = windows.Process32Next(snap, &pe32); err != nil { - break - } - } + if err != nil { + return out + } + + defer func() { + _ = windows.CloseHandle(snap) + }() + + pe32.Size = uint32(unsafe.Sizeof(pe32)) + err = windows.Process32First(snap, &pe32) + for err != nil { + if pe32.ParentProcessID == uint32(pid) { + // Add to list + out = append(out, int(pe32.ProcessID)) } + + err = windows.Process32Next(snap, &pe32) } + return out } -func waitUntilProcessEnds(pid int) { - h, err := syscall.OpenProcess(syscall.SYNCHRONIZE, false, uint32(pid)) +func killProcess(pid int) error { + h, err := syscall.OpenProcess(syscall.SYNCHRONIZE | syscall.PROCESS_TERMINATE, false, uint32(pid)) if err == nil { - _, _ = syscall.WaitForSingleObject(h, processTerminateWaitInMs) + err = syscall.TerminateProcess(h, STATUS_CANCELLED) + if err == nil { + _, _ = syscall.WaitForSingleObject(h, processTerminateWaitInMs) + } _ = syscall.CloseHandle(h) } -} \ No newline at end of file + + return err +} + +// NOTE: Unlike Unix, Windows tries to open the target process in order to kill it. +// ERROR_INVALID_PARAMETER is returned if the process does not exists. +// To mimic other OS behavior, if the process does not exist, don't return an error +func isInvalidParameterError(err error) bool { + var syscallError syscall.Errno + + if errors.As(err, &syscallError) { + if syscallError == ERROR_INVALID_PARAMETER { + return true + } + } + return false +} From 2bbf81577b447e51385d4c8b51e8282d15cb33f2 Mon Sep 17 00:00:00 2001 From: Mauro Leggieri Date: Wed, 10 Mar 2021 15:50:11 -0300 Subject: [PATCH 032/215] Fixed wrong variable name --- nodecontrol/kmdControl_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nodecontrol/kmdControl_common.go b/nodecontrol/kmdControl_common.go index 4404bffb86..947ccd80a4 100644 --- a/nodecontrol/kmdControl_common.go +++ b/nodecontrol/kmdControl_common.go @@ -26,7 +26,7 @@ import ( func (kc *KMDController) isDirectorySafe(dirStats os.FileInfo) bool { if (dirStats.Mode() & 0077) != 0 { - logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dataDirStat.Mode()&0777, DefaultKMDDataDirPerms) + logging.Base().Errorf("%s: kmd data dir exists but is too permissive (%o), change to (%o)", kc.kmdDataDir, dirStats.Mode()&0777, DefaultKMDDataDirPerms) return false } return true From 86fa0e0b2c3ca57211efe46f3043aba1e0e9b2f8 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 10 Mar 2021 15:52:29 -0500 Subject: [PATCH 033/215] Check several assembly/disassembly across versions --- data/transactions/logic/assembler_test.go | 110 ++++++++++++---------- 1 file changed, 58 insertions(+), 52 deletions(-) diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 762335193a..6600bfb2c8 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -1103,75 +1103,81 @@ int 1 func TestAssembleAsset(t *testing.T) { t.Parallel() - - testLine(t, "asset_holding_get ABC 1", AssemblerMaxVersion, "asset_holding_get expects one argument") - testLine(t, "asset_holding_get ABC", AssemblerMaxVersion, "asset_holding_get unknown arg: ABC") - testLine(t, "asset_params_get ABC 1", AssemblerMaxVersion, "asset_params_get expects one argument") - testLine(t, "asset_params_get ABC", AssemblerMaxVersion, "asset_params_get unknown arg: ABC") + introduction := OpsByName[LogicVersion]["asset_holding_get"].Version + for v := introduction; v <= AssemblerMaxVersion; v++ { + testLine(t, "asset_holding_get ABC 1", v, "asset_holding_get expects one argument") + testLine(t, "asset_holding_get ABC", v, "asset_holding_get unknown arg: ABC") + testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects one argument") + testLine(t, "asset_params_get ABC", v, "asset_params_get unknown arg: ABC") + } } func TestDisassembleSingleOp(t *testing.T) { t.Parallel() - // test ensures no double arg_0 entries in disassembly listing - sample := fmt.Sprintf("// version %d\narg_0\n", AssemblerMaxVersion) - ops, err := AssembleStringWithVersion(sample, AssemblerMaxVersion) - require.NoError(t, err) - require.Equal(t, 2, len(ops.Program)) - disassembled, err := Disassemble(ops.Program) - require.NoError(t, err) - require.Equal(t, sample, disassembled) + for v := uint64(1); v <= AssemblerMaxVersion; v++ { + // test ensures no double arg_0 entries in disassembly listing + sample := fmt.Sprintf("// version %d\narg_0\n", v) + ops, err := AssembleStringWithVersion(sample, v) + require.NoError(t, err) + require.Equal(t, 2, len(ops.Program)) + disassembled, err := Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, sample, disassembled) + } } func TestDisassembleTxna(t *testing.T) { t.Parallel() - // check txn and txna are properly disassembled - txnSample := fmt.Sprintf("// version %d\ntxn Sender\n", AssemblerMaxVersion) - ops, err := AssembleStringWithVersion(txnSample, AssemblerMaxVersion) - require.NoError(t, err) - disassembled, err := Disassemble(ops.Program) - require.NoError(t, err) - require.Equal(t, txnSample, disassembled) + // txn was 1, but this tests both + introduction := OpsByName[LogicVersion]["gtxna"].Version + for v := introduction; v <= AssemblerMaxVersion; v++ { + // check txn and txna are properly disassembled + txnSample := fmt.Sprintf("// version %d\ntxn Sender\n", v) + ops := testProg(t, txnSample, v) + disassembled, err := Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, txnSample, disassembled) - txnaSample := fmt.Sprintf("// version %d\ntxna Accounts 0\n", AssemblerMaxVersion) - ops, err = AssembleStringWithVersion(txnaSample, AssemblerMaxVersion) - require.NoError(t, err) - disassembled, err = Disassemble(ops.Program) - require.NoError(t, err) - require.Equal(t, txnaSample, disassembled) + txnaSample := fmt.Sprintf("// version %d\ntxna Accounts 0\n", v) + ops = testProg(t, txnaSample, v) + disassembled, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, txnaSample, disassembled) - txnSample2 := fmt.Sprintf("// version %d\ntxn Accounts 0\n", AssemblerMaxVersion) - ops, err = AssembleStringWithVersion(txnSample2, AssemblerMaxVersion) - require.NoError(t, err) - disassembled, err = Disassemble(ops.Program) - require.NoError(t, err) - // compare with txnaSample, not txnSample2 - require.Equal(t, txnaSample, disassembled) + txnSample2 := fmt.Sprintf("// version %d\ntxn Accounts 0\n", v) + ops = testProg(t, txnSample2, v) + disassembled, err = Disassemble(ops.Program) + require.NoError(t, err) + // compare with txnaSample, not txnSample2 + require.Equal(t, txnaSample, disassembled) + } } func TestDisassembleGtxna(t *testing.T) { t.Parallel() // check gtxn and gtxna are properly disassembled - gtxnSample := fmt.Sprintf("// version %d\ngtxn 0 Sender\n", AssemblerMaxVersion) - ops, err := AssembleStringWithVersion(gtxnSample, AssemblerMaxVersion) - require.NoError(t, err) - disassembled, err := Disassemble(ops.Program) - require.NoError(t, err) - require.Equal(t, gtxnSample, disassembled) - gtxnaSample := fmt.Sprintf("// version %d\ngtxna 0 Accounts 0\n", AssemblerMaxVersion) - ops, err = AssembleStringWithVersion(gtxnaSample, AssemblerMaxVersion) - require.NoError(t, err) - disassembled, err = Disassemble(ops.Program) - require.NoError(t, err) - require.Equal(t, gtxnaSample, disassembled) + introduction := OpsByName[LogicVersion]["gtxna"].Version + for v := introduction; v <= AssemblerMaxVersion; v++ { + gtxnSample := fmt.Sprintf("// version %d\ngtxn 0 Sender\n", v) + ops := testProg(t, gtxnSample, v) + disassembled, err := Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, gtxnSample, disassembled) - gtxnSample2 := fmt.Sprintf("// version %d\ngtxn 0 Accounts 0\n", AssemblerMaxVersion) - ops, err = AssembleStringWithVersion(gtxnSample2, AssemblerMaxVersion) - require.NoError(t, err) - disassembled, err = Disassemble(ops.Program) - require.NoError(t, err) - // comapre with gtxnaSample, not gtxnSample2 - require.Equal(t, gtxnaSample, disassembled) + gtxnaSample := fmt.Sprintf("// version %d\ngtxna 0 Accounts 0\n", v) + ops = testProg(t, gtxnaSample, v) + disassembled, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, gtxnaSample, disassembled) + + gtxnSample2 := fmt.Sprintf("// version %d\ngtxn 0 Accounts 0\n", v) + ops = testProg(t, gtxnSample2, v) + disassembled, err = Disassemble(ops.Program) + require.NoError(t, err) + // compare with gtxnaSample, not gtxnSample2 + require.Equal(t, gtxnaSample, disassembled) + } } func TestDisassembleLastLabel(t *testing.T) { From c8e8b513206f0eea6f18f8996f4010fae22ceb5a Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 10 Mar 2021 15:55:04 -0500 Subject: [PATCH 034/215] Thinko, caught by Pavel --- data/transactions/logic/opcodes.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 8748b42256..d0bf31f20d 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -51,7 +51,7 @@ func costly(cost int) opDetails { func immediates(name string, rest ...string) opDetails { num := 1 + len(rest) - immediates := make([]immediate, num, len(rest)+1) + immediates := make([]immediate, num, num) immediates[0] = immediate{name, immByte} for i, n := range rest { immediates[i+1] = immediate{n, immByte} From 9da41f44d73522c70bd075865693be75de759841 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 10 Mar 2021 17:11:11 -0500 Subject: [PATCH 035/215] Call ops.checkArgs() in asembly loop for consistency. --- data/transactions/logic/assembler.go | 72 +++++++++++------------ data/transactions/logic/assembler_test.go | 18 ++++-- data/transactions/logic/eval.go | 2 +- data/transactions/logic/eval_test.go | 4 +- data/transactions/logic/opcodes.go | 3 +- 5 files changed, 53 insertions(+), 46 deletions(-) diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 99e2d28e3e..345121f834 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -115,8 +115,14 @@ func (ops *OpStream) ReferToLabel(pc int, label string) { ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label}) } -func (ops *OpStream) tpush(argType StackType) { - ops.typeStack = append(ops.typeStack, argType) +// returns allows opcodes like `txn` to be specific about their return +// value types, based on the field requested, rather than use Any as +// specified by opSpec. +func (ops *OpStream) returns(argTypes ...StackType) { + for range argTypes { + ops.tpop() + } + ops.tpusha(argTypes) } func (ops *OpStream) tpusha(argType []StackType) { @@ -249,82 +255,87 @@ func (ops *OpStream) Txn(val uint64) { } ops.pending.WriteByte(0x31) ops.pending.WriteByte(uint8(val)) - ops.tpush(TxnFieldTypes[val]) + ops.returns(TxnFieldTypes[val]) } // Txna writes opcodes for loading array field from the current transaction func (ops *OpStream) Txna(fieldNum uint64, arrayFieldIdx uint64) { if fieldNum >= uint64(len(TxnFieldNames)) { ops.errorf("invalid txn field: %d", fieldNum) - fieldNum = 0 // avoid further error in tpush as we forge ahead + return } if arrayFieldIdx > 255 { ops.errorf("txna array index beyond 255: %d", arrayFieldIdx) + return } ops.pending.WriteByte(0x36) ops.pending.WriteByte(uint8(fieldNum)) ops.pending.WriteByte(uint8(arrayFieldIdx)) - ops.tpush(TxnFieldTypes[fieldNum]) + ops.returns(TxnFieldTypes[fieldNum]) } // Gtxn writes opcodes for loading a field from the current transaction func (ops *OpStream) Gtxn(gid, val uint64) { if val >= uint64(len(TxnFieldNames)) { ops.errorf("invalid gtxn field: %d", val) - val = 0 // avoid further error in tpush as we forge ahead + return } if gid > 255 { ops.errorf("gtxn transaction index beyond 255: %d", gid) + return } ops.pending.WriteByte(0x33) ops.pending.WriteByte(uint8(gid)) ops.pending.WriteByte(uint8(val)) - ops.tpush(TxnFieldTypes[val]) + ops.returns(TxnFieldTypes[val]) } // Gtxna writes opcodes for loading an array field from the current transaction func (ops *OpStream) Gtxna(gid, fieldNum uint64, arrayFieldIdx uint64) { if fieldNum >= uint64(len(TxnFieldNames)) { ops.errorf("invalid txn field: %d", fieldNum) - fieldNum = 0 // avoid further error in tpush as we forge ahead + return } if gid > 255 { ops.errorf("gtxna group index beyond 255: %d", gid) + return } if arrayFieldIdx > 255 { ops.errorf("gtxna array index beyond 255: %d", arrayFieldIdx) + return } ops.pending.WriteByte(0x37) ops.pending.WriteByte(uint8(gid)) ops.pending.WriteByte(uint8(fieldNum)) ops.pending.WriteByte(uint8(arrayFieldIdx)) - ops.tpush(TxnFieldTypes[fieldNum]) + ops.returns(TxnFieldTypes[fieldNum]) } // Gtxns writes opcodes for loading a field from the current transaction func (ops *OpStream) Gtxns(fieldNum uint64) { if fieldNum >= uint64(len(TxnFieldNames)) { ops.errorf("invalid gtxns field: %d", fieldNum) - fieldNum = 0 // avoid further error in tpush as we forge ahead + return } ops.pending.WriteByte(0x38) ops.pending.WriteByte(uint8(fieldNum)) - ops.tpush(TxnFieldTypes[fieldNum]) + ops.returns(TxnFieldTypes[fieldNum]) } // Gtxnsa writes opcodes for loading an array field from the current transaction func (ops *OpStream) Gtxnsa(fieldNum uint64, arrayFieldIdx uint64) { if fieldNum >= uint64(len(TxnFieldNames)) { ops.errorf("invalid gtxnsa field: %d", fieldNum) - fieldNum = 0 // avoid further error in tpush as we forge ahead + return } if arrayFieldIdx > 255 { ops.errorf("gtxnsa array index beyond 255: %d", arrayFieldIdx) + return } ops.pending.WriteByte(0x39) ops.pending.WriteByte(uint8(fieldNum)) ops.pending.WriteByte(uint8(arrayFieldIdx)) - ops.tpush(TxnFieldTypes[fieldNum]) + ops.returns(TxnFieldTypes[fieldNum]) } // Global writes opcodes for loading an evaluator-global field @@ -332,35 +343,32 @@ func (ops *OpStream) Global(val GlobalField) { ops.pending.WriteByte(0x32) ops.pending.WriteByte(uint8(val)) ops.trace("%s (%s)", GlobalFieldNames[val], GlobalFieldTypes[val].String()) - ops.tpush(GlobalFieldTypes[val]) + ops.returns(GlobalFieldTypes[val]) } // AssetHolding writes opcodes for accessing data from AssetHolding func (ops *OpStream) AssetHolding(val uint64) { if val >= uint64(len(AssetHoldingFieldNames)) { ops.errorf("invalid asset holding field: %d", val) - val = 0 // avoid further error in tpush as we forge ahead + return } ops.pending.WriteByte(OpsByName[ops.Version]["asset_holding_get"].Opcode) ops.pending.WriteByte(uint8(val)) - ops.tpush(AssetHoldingFieldTypes[val]) - ops.tpush(StackUint64) + ops.returns(AssetHoldingFieldTypes[val], StackUint64) } // AssetParams writes opcodes for accessing data from AssetParams func (ops *OpStream) AssetParams(val uint64) { if val >= uint64(len(AssetParamsFieldNames)) { ops.errorf("invalid asset params field: %d", val) - val = 0 // avoid further error in tpush as we forge ahead + return } ops.pending.WriteByte(OpsByName[ops.Version]["asset_params_get"].Opcode) ops.pending.WriteByte(uint8(val)) - ops.tpush(AssetParamsFieldTypes[val]) - ops.tpush(StackUint64) + ops.returns(AssetParamsFieldTypes[val], StackUint64) } func assembleInt(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { return ops.error("int needs one argument") } @@ -392,7 +400,6 @@ func assembleInt(ops *OpStream, spec *OpSpec, args []string) error { // Explicit invocation of const lookup and push func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { return ops.error("intc operation needs one argument") } @@ -404,7 +411,6 @@ func assembleIntC(ops *OpStream, spec *OpSpec, args []string) error { return nil } func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { ops.error("bytec operation needs one argument") } @@ -417,7 +423,6 @@ func assembleByteC(ops *OpStream, spec *OpSpec, args []string) error { } func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { ops.errorf("%s needs one argument", spec.Name) } @@ -432,7 +437,6 @@ func asmPushInt(ops *OpStream, spec *OpSpec, args []string) error { return nil } func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { ops.errorf("%s needs one argument", spec.Name) } @@ -596,7 +600,6 @@ func parseStringLiteral(input string) (result []byte, err error) { // byte 0x.... // byte "this is a string\n" func assembleByte(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) == 0 { return ops.error("byte operation needs byte literal argument") } @@ -660,7 +663,6 @@ func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error { // addr A1EU... // parses base32-with-checksum account address strings into a byte literal func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { return ops.error("addr operation needs one argument") } @@ -673,7 +675,6 @@ func assembleAddr(ops *OpStream, spec *OpSpec, args []string) error { } func assembleArg(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { return ops.error("arg operation needs one argument") } @@ -686,7 +687,6 @@ func assembleArg(ops *OpStream, spec *OpSpec, args []string) error { } func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != 1 { return ops.error("branch operation needs label argument") } @@ -893,13 +893,11 @@ func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error { func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { - ops.error("asset_holding_get expects one argument") - args = []string{AssetHoldingFieldNames[0]} + return ops.error("asset_holding_get expects one argument") } val, ok := assetHoldingFields[args[0]] if !ok { - ops.errorf("asset_holding_get unknown arg: %v", args[0]) - val = 0 + return ops.errorf("asset_holding_get unknown arg: %v", args[0]) } ops.AssetHolding(val) return nil @@ -907,13 +905,11 @@ func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error { func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { - ops.error("asset_params_get expects one argument") - args = []string{AssetParamsFieldNames[0]} + return ops.error("asset_params_get expects one argument") } val, ok := assetParamsFields[args[0]] if !ok { - ops.errorf("asset_params_get unknown arg: %v", args[0]) - val = 0 + return ops.errorf("asset_params_get unknown arg: %v", args[0]) } ops.AssetParams(val) return nil @@ -923,7 +919,6 @@ type assembleFunc func(*OpStream, *OpSpec, []string) error // Basic assembly. Any extra bytes of opcode are encoded as byte immediates. func asmDefault(ops *OpStream, spec *OpSpec, args []string) error { - ops.checkArgs(*spec) if len(args) != spec.Details.Size-1 { ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size) } @@ -1133,6 +1128,7 @@ func (ops *OpStream) assemble(fin io.Reader) error { if ok { ops.trace("%3d: %s\t", ops.sourceLine, opstring) ops.RecordSourceLine() + ops.checkArgs(spec) spec.asm(ops, &spec, fields[1:]) ops.trace("\n") continue diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 6600bfb2c8..2aeb33c33a 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -337,7 +337,7 @@ func testMatch(t *testing.T, actual, expected string) { } else if strings.HasSuffix(expected, "...") { require.Contains(t, "^"+actual, "^"+expected[:len(expected)-3]) } else { - require.Equal(t, actual, expected) + require.Equal(t, expected, actual) } } @@ -371,7 +371,7 @@ func testProg(t *testing.T, source string, ver uint64, expected ...expect) *OpSt break } } - require.NotNil(t, found) + require.NotNil(t, found, "No error on line %d", exp.l) msg := found.Unwrap().Error() testMatch(t, msg, exp.s) } @@ -1105,8 +1105,18 @@ func TestAssembleAsset(t *testing.T) { t.Parallel() introduction := OpsByName[LogicVersion]["asset_holding_get"].Version for v := introduction; v <= AssemblerMaxVersion; v++ { - testLine(t, "asset_holding_get ABC 1", v, "asset_holding_get expects one argument") - testLine(t, "asset_holding_get ABC", v, "asset_holding_get unknown arg: ABC") + testProg(t, "asset_holding_get ABC 1", v, + expect{1, "asset_holding_get arg 1..."}) + testProg(t, "int 1; asset_holding_get ABC 1", v, + expect{2, "asset_holding_get arg 0..."}) + testProg(t, "int 1; int 1; asset_holding_get ABC 1", v, + expect{3, "asset_holding_get expects one argument"}) + testProg(t, "int 1; int 1; asset_holding_get ABC", v, + expect{3, "asset_holding_get unknown arg: ABC"}) + + testProg(t, "byte 0x1234; asset_params_get ABC 1", v, + expect{2, "asset_params_get arg 0 wanted type uint64..."}) + testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects one argument") testLine(t, "asset_params_get ABC", v, "asset_params_get unknown arg: ABC") } diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 7809dd4ce8..3c5bc3f4fa 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -1218,7 +1218,7 @@ func opDig(cx *evalContext) { depth := int(uint(cx.program[cx.pc+1])) idx := len(cx.stack) - 1 - depth // Need to check stack size explicitly here because checkArgs() doesn't understand dig - // so we can't expect out stack to be prechecked. + // so we can't expect our stack to be prechecked. if idx < 0 { cx.err = fmt.Errorf("dig %d with stack size = %d", depth, len(cx.stack)) return diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 1fd7e64950..922d01fbc9 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -3671,8 +3671,8 @@ func TestAllowedOpcodesV2(t *testing.T) { "app_global_put": "byte 0x41; dup; app_global_put", "app_local_del": "int 0; byte 0x41; app_local_del", "app_global_del": "byte 0x41; app_global_del", - "asset_holding_get": "asset_holding_get AssetBalance", - "asset_params_get": "asset_params_get AssetTotal", + "asset_holding_get": "int 1; int 1; asset_holding_get AssetBalance", + "asset_params_get": "int 1; asset_params_get AssetTotal", } excluded := map[string]bool{ diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index d0bf31f20d..ffe07d771f 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -111,7 +111,8 @@ var anyIntInt = StackTypes{StackAny, StackUint64, StackUint64} // // Any changes should be reflected in README_in.md which serves as the language spec. // -// WARNING: special case op assembly by argOps functions must do their own type stack maintenance via ops.tpop() ops.tpush()/ops.tpusha() +// Note: assembly can specialize an Any return type if known at +// assembly-time, with ops.tspecify() var OpSpecs = []OpSpec{ {0x00, "err", opErr, asmDefault, disDefault, nil, nil, 1, modeAny, opDefault}, {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(7)}, From 98f2843fa7a6b9140db41e9cbb4ebfb51a163ada Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 10 Mar 2021 20:33:05 -0500 Subject: [PATCH 036/215] clearer test, per code review --- data/transactions/logic/opcodes_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/data/transactions/logic/opcodes_test.go b/data/transactions/logic/opcodes_test.go index 7c61f90680..36dca364a1 100644 --- a/data/transactions/logic/opcodes_test.go +++ b/data/transactions/logic/opcodes_test.go @@ -117,8 +117,8 @@ func TestOpcodesByVersion(t *testing.T) { func TestOpcodesVersioningV2(t *testing.T) { t.Parallel() - require.Equal(t, 4, len(opsByOpcode)) - require.Equal(t, 4, len(OpsByName)) + require.Equal(t, LogicVersion+1, len(opsByOpcode)) + require.Equal(t, LogicVersion+1, len(OpsByName)) // ensure v0 has only v0 opcodes cntv0 := 0 From ad2a1601365217d6362210c0eea5b2eb79da4926 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 10 Mar 2021 20:36:06 -0500 Subject: [PATCH 037/215] Rewards unneeded for min balance calculation --- ledger/applications.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/ledger/applications.go b/ledger/applications.go index 9dee2b4e63..dae09990ff 100644 --- a/ledger/applications.go +++ b/ledger/applications.go @@ -76,8 +76,7 @@ func (al *logicLedger) Balance(addr basics.Address) (res basics.MicroAlgos, err } func (al *logicLedger) MinBalance(addr basics.Address, proto *config.ConsensusParams) (res basics.MicroAlgos, err error) { - // Fetch record with pending rewards applied - record, err := al.cow.Get(addr, true) + record, err := al.cow.Get(addr, false) // pending rewards unneeded if err != nil { return } From 65753ae18b38656153f49f3fa7591f93dceb8a8d Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 10 Mar 2021 22:52:02 -0500 Subject: [PATCH 038/215] update per reviewer comments. --- ledger/acctupdates.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index ba89f66c32..c8c4522e56 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -94,7 +94,7 @@ const initializeCachesRoundFlushInterval = 1000 const initializingAccountCachesMessageTimeout = 3 * time.Second // accountsUpdatePerRoundHighWatermark is the warning watermark for updating accounts data that takes -// longer then expected. We set it up here for one second per round, so that if we're bulk updating +// longer than expected. We set it up here for one second per round, so that if we're bulk updating // four rounds, we would allow up to 4 seconds. This becomes important when supporting balances recovery // where we end up batching up to 1000 rounds in a single update. const accountsUpdatePerRoundHighWatermark = 1 * time.Second @@ -993,8 +993,9 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, // flush to disk if any of the following applies: // 1. if we have loaded up more than initializeCachesRoundFlushInterval rounds since the last time we flushed the data to disk // 2. if we completed the loading and we loaded up more than 320 rounds. - if blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval || - (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound) { + flushIntervalExceed := blk.Round()-lastFlushedRound > initializeCachesRoundFlushInterval + loadCompleted := (lastestBlockRound == blk.Round() && lastBalancesRound+basics.Round(blk.ConsensusProtocol().MaxBalLookback) < lastestBlockRound) + if flushIntervalExceed || loadCompleted { // adjust the last flush time, so that we would not hold off the flushing due to "working too fast" au.lastFlushTime = time.Now().Add(-balancesFlushInterval) From c6023df8e6004bf1908bb488f9321499920af2ab Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 11 Mar 2021 10:31:37 -0500 Subject: [PATCH 039/215] Get opcodes from specs, not magic constants. --- data/transactions/logic/assembler.go | 255 +++++++++------------------ 1 file changed, 85 insertions(+), 170 deletions(-) diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 345121f834..4f150b890b 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -227,147 +227,6 @@ func (ops *OpStream) ByteLiteral(val []byte) { ops.Bytec(constIndex) } -// Arg writes opcodes for loading from Lsig.Args -func (ops *OpStream) Arg(val uint64) error { - switch val { - case 0: - ops.pending.WriteByte(0x2d) // arg_0 - case 1: - ops.pending.WriteByte(0x2e) // arg_1 - case 2: - ops.pending.WriteByte(0x2f) // arg_2 - case 3: - ops.pending.WriteByte(0x30) // arg_3 - default: - if val > 0xff { - return ops.error("cannot have more than 256 args") - } - ops.pending.WriteByte(0x2c) - ops.pending.WriteByte(uint8(val)) - } - return nil -} - -// Txn writes opcodes for loading a field from the current transaction -func (ops *OpStream) Txn(val uint64) { - if val >= uint64(len(TxnFieldNames)) { - ops.errorf("invalid txn field: %d", val) - } - ops.pending.WriteByte(0x31) - ops.pending.WriteByte(uint8(val)) - ops.returns(TxnFieldTypes[val]) -} - -// Txna writes opcodes for loading array field from the current transaction -func (ops *OpStream) Txna(fieldNum uint64, arrayFieldIdx uint64) { - if fieldNum >= uint64(len(TxnFieldNames)) { - ops.errorf("invalid txn field: %d", fieldNum) - return - } - if arrayFieldIdx > 255 { - ops.errorf("txna array index beyond 255: %d", arrayFieldIdx) - return - } - ops.pending.WriteByte(0x36) - ops.pending.WriteByte(uint8(fieldNum)) - ops.pending.WriteByte(uint8(arrayFieldIdx)) - ops.returns(TxnFieldTypes[fieldNum]) -} - -// Gtxn writes opcodes for loading a field from the current transaction -func (ops *OpStream) Gtxn(gid, val uint64) { - if val >= uint64(len(TxnFieldNames)) { - ops.errorf("invalid gtxn field: %d", val) - return - } - if gid > 255 { - ops.errorf("gtxn transaction index beyond 255: %d", gid) - return - } - ops.pending.WriteByte(0x33) - ops.pending.WriteByte(uint8(gid)) - ops.pending.WriteByte(uint8(val)) - ops.returns(TxnFieldTypes[val]) -} - -// Gtxna writes opcodes for loading an array field from the current transaction -func (ops *OpStream) Gtxna(gid, fieldNum uint64, arrayFieldIdx uint64) { - if fieldNum >= uint64(len(TxnFieldNames)) { - ops.errorf("invalid txn field: %d", fieldNum) - return - } - if gid > 255 { - ops.errorf("gtxna group index beyond 255: %d", gid) - return - } - if arrayFieldIdx > 255 { - ops.errorf("gtxna array index beyond 255: %d", arrayFieldIdx) - return - } - ops.pending.WriteByte(0x37) - ops.pending.WriteByte(uint8(gid)) - ops.pending.WriteByte(uint8(fieldNum)) - ops.pending.WriteByte(uint8(arrayFieldIdx)) - ops.returns(TxnFieldTypes[fieldNum]) -} - -// Gtxns writes opcodes for loading a field from the current transaction -func (ops *OpStream) Gtxns(fieldNum uint64) { - if fieldNum >= uint64(len(TxnFieldNames)) { - ops.errorf("invalid gtxns field: %d", fieldNum) - return - } - ops.pending.WriteByte(0x38) - ops.pending.WriteByte(uint8(fieldNum)) - ops.returns(TxnFieldTypes[fieldNum]) -} - -// Gtxnsa writes opcodes for loading an array field from the current transaction -func (ops *OpStream) Gtxnsa(fieldNum uint64, arrayFieldIdx uint64) { - if fieldNum >= uint64(len(TxnFieldNames)) { - ops.errorf("invalid gtxnsa field: %d", fieldNum) - return - } - if arrayFieldIdx > 255 { - ops.errorf("gtxnsa array index beyond 255: %d", arrayFieldIdx) - return - } - ops.pending.WriteByte(0x39) - ops.pending.WriteByte(uint8(fieldNum)) - ops.pending.WriteByte(uint8(arrayFieldIdx)) - ops.returns(TxnFieldTypes[fieldNum]) -} - -// Global writes opcodes for loading an evaluator-global field -func (ops *OpStream) Global(val GlobalField) { - ops.pending.WriteByte(0x32) - ops.pending.WriteByte(uint8(val)) - ops.trace("%s (%s)", GlobalFieldNames[val], GlobalFieldTypes[val].String()) - ops.returns(GlobalFieldTypes[val]) -} - -// AssetHolding writes opcodes for accessing data from AssetHolding -func (ops *OpStream) AssetHolding(val uint64) { - if val >= uint64(len(AssetHoldingFieldNames)) { - ops.errorf("invalid asset holding field: %d", val) - return - } - ops.pending.WriteByte(OpsByName[ops.Version]["asset_holding_get"].Opcode) - ops.pending.WriteByte(uint8(val)) - ops.returns(AssetHoldingFieldTypes[val], StackUint64) -} - -// AssetParams writes opcodes for accessing data from AssetParams -func (ops *OpStream) AssetParams(val uint64) { - if val >= uint64(len(AssetParamsFieldNames)) { - ops.errorf("invalid asset params field: %d", val) - return - } - ops.pending.WriteByte(OpsByName[ops.Version]["asset_params_get"].Opcode) - ops.pending.WriteByte(uint8(val)) - ops.returns(AssetParamsFieldTypes[val], StackUint64) -} - func assembleInt(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { return ops.error("int needs one argument") @@ -612,7 +471,7 @@ func assembleByte(ops *OpStream, spec *OpSpec, args []string) error { } func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error { - ops.pending.WriteByte(0x20) // intcblock + ops.pending.WriteByte(spec.Opcode) var scratch [binary.MaxVarintLen64]byte l := binary.PutUvarint(scratch[:], uint64(len(args))) ops.pending.Write(scratch[:l]) @@ -631,7 +490,7 @@ func assembleIntCBlock(ops *OpStream, spec *OpSpec, args []string) error { } func assembleByteCBlock(ops *OpStream, spec *OpSpec, args []string) error { - ops.pending.WriteByte(0x26) // bytecblock + ops.pending.WriteByte(spec.Opcode) bvals := make([][]byte, 0, len(args)) rest := args for len(rest) > 0 { @@ -682,8 +541,21 @@ func assembleArg(ops *OpStream, spec *OpSpec, args []string) error { if err != nil { return ops.error(err) } - ops.Arg(val) - return nil + altSpec := *spec + if val < 4 { + switch val { + case 0: + altSpec = OpsByName[ops.Version]["arg_0"] + case 1: + altSpec = OpsByName[ops.Version]["arg_1"] + case 2: + altSpec = OpsByName[ops.Version]["arg_2"] + case 3: + altSpec = OpsByName[ops.Version]["arg_3"] + } + args = []string{} + } + return asmDefault(ops, &altSpec, args) } func assembleBranch(ops *OpStream, spec *OpSpec, args []string) error { @@ -725,8 +597,9 @@ func assembleTxn(ops *OpStream, spec *OpSpec, args []string) error { if fs.version > ops.Version { return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version) } - val := fs.field - ops.Txn(uint64(val)) + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(fs.field)) + ops.returns(TxnFieldTypes[fs.field]) return nil } @@ -736,7 +609,8 @@ func assembleTxn2(ops *OpStream, spec *OpSpec, args []string) error { return assembleTxn(ops, spec, args) } if len(args) == 2 { - return assembleTxna(ops, spec, args) + txna := OpsByName[ops.Version]["txna"] + return assembleTxna(ops, &txna, args) } return ops.error("txn expects one or two arguments") } @@ -760,8 +634,14 @@ func assembleTxna(ops *OpStream, spec *OpSpec, args []string) error { if err != nil { return ops.error(err) } - fieldNum := fs.field - ops.Txna(uint64(fieldNum), uint64(arrayFieldIdx)) + if arrayFieldIdx > 255 { + return ops.errorf("txna array index beyond 255: %d", arrayFieldIdx) + } + + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(fs.field)) + ops.pending.WriteByte(uint8(arrayFieldIdx)) + ops.returns(TxnFieldTypes[fs.field]) return nil } @@ -769,10 +649,14 @@ func assembleGtxn(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 2 { return ops.error("gtxn expects two arguments") } - gtid, err := strconv.ParseUint(args[0], 0, 64) + slot, err := strconv.ParseUint(args[0], 0, 64) if err != nil { return ops.error(err) } + if slot > 255 { + return ops.errorf("gtxn transaction index beyond 255: %d", slot) + } + fs, ok := txnFieldSpecByName[args[1]] if !ok { return ops.errorf("gtxn unknown field: %v", args[1]) @@ -784,8 +668,11 @@ func assembleGtxn(ops *OpStream, spec *OpSpec, args []string) error { if fs.version > ops.Version { return ops.errorf("field %s available in version %d. Missed #pragma version?", args[1], fs.version) } - val := fs.field - ops.Gtxn(gtid, uint64(val)) + + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(slot)) + ops.pending.WriteByte(uint8(fs.field)) + ops.returns(TxnFieldTypes[fs.field]) return nil } @@ -794,7 +681,8 @@ func assembleGtxn2(ops *OpStream, spec *OpSpec, args []string) error { return assembleGtxn(ops, spec, args) } if len(args) == 3 { - return assembleGtxna(ops, spec, args) + gtxna := OpsByName[ops.Version]["gtxna"] + return assembleGtxna(ops, >xna, args) } return ops.error("gtxn expects two or three arguments") } @@ -803,10 +691,14 @@ func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 3 { return ops.error("gtxna expects three arguments") } - gtid, err := strconv.ParseUint(args[0], 0, 64) + slot, err := strconv.ParseUint(args[0], 0, 64) if err != nil { return ops.error(err) } + if slot > 255 { + return ops.errorf("gtxna group index beyond 255: %d", slot) + } + fs, ok := txnFieldSpecByName[args[1]] if !ok { return ops.errorf("gtxna unknown field: %v", args[1]) @@ -822,14 +714,22 @@ func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error { if err != nil { return ops.error(err) } - fieldNum := fs.field - ops.Gtxna(gtid, uint64(fieldNum), uint64(arrayFieldIdx)) + if arrayFieldIdx > 255 { + return ops.errorf("gtxna array index beyond 255: %d", arrayFieldIdx) + } + + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(slot)) + ops.pending.WriteByte(uint8(fs.field)) + ops.pending.WriteByte(uint8(arrayFieldIdx)) + ops.returns(TxnFieldTypes[fs.field]) return nil } func assembleGtxns(ops *OpStream, spec *OpSpec, args []string) error { if len(args) == 2 { - return assembleGtxnsa(ops, spec, args) + gtxnsa := OpsByName[ops.Version]["gtxnsa"] + return assembleGtxnsa(ops, >xnsa, args) } if len(args) != 1 { return ops.error("gtxns expects one or two immediate arguments") @@ -845,8 +745,10 @@ func assembleGtxns(ops *OpStream, spec *OpSpec, args []string) error { if fs.version > ops.Version { return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version) } - val := fs.field - ops.Gtxns(uint64(val)) + + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(fs.field)) + ops.returns(TxnFieldTypes[fs.field]) return nil } @@ -869,25 +771,34 @@ func assembleGtxnsa(ops *OpStream, spec *OpSpec, args []string) error { if err != nil { return ops.error(err) } - fieldNum := fs.field - ops.Gtxnsa(uint64(fieldNum), uint64(arrayFieldIdx)) + if arrayFieldIdx > 255 { + return ops.errorf("gtxnsa array index beyond 255: %d", arrayFieldIdx) + } + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(fs.field)) + ops.pending.WriteByte(uint8(arrayFieldIdx)) + ops.returns(TxnFieldTypes[fs.field]) return nil } func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != 1 { - ops.error("global expects one argument") - args = []string{GlobalFieldNames[0]} + return ops.error("global expects one argument") } fs, ok := globalFieldSpecByName[args[0]] if !ok { - ops.errorf("global unknown field: %v", args[0]) - fs, _ = globalFieldSpecByName[GlobalFieldNames[0]] + return ops.errorf("global unknown field: %v", args[0]) } if fs.version > ops.Version { + // no return here. we may as well continue to maintain typestack ops.errorf("global %s available in version %d. Missed #pragma version?", args[0], fs.version) } - ops.Global(fs.gfield) + + val := fs.gfield + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(val)) + ops.trace("%s (%s)", GlobalFieldNames[val], GlobalFieldTypes[val].String()) + ops.returns(GlobalFieldTypes[val]) return nil } @@ -899,7 +810,9 @@ func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error { if !ok { return ops.errorf("asset_holding_get unknown arg: %v", args[0]) } - ops.AssetHolding(val) + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(val)) + ops.returns(AssetHoldingFieldTypes[val], StackUint64) return nil } @@ -911,7 +824,9 @@ func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error { if !ok { return ops.errorf("asset_params_get unknown arg: %v", args[0]) } - ops.AssetParams(val) + ops.pending.WriteByte(spec.Opcode) + ops.pending.WriteByte(uint8(val)) + ops.returns(AssetParamsFieldTypes[val], StackUint64) return nil } @@ -920,7 +835,7 @@ type assembleFunc func(*OpStream, *OpSpec, []string) error // Basic assembly. Any extra bytes of opcode are encoded as byte immediates. func asmDefault(ops *OpStream, spec *OpSpec, args []string) error { if len(args) != spec.Details.Size-1 { - ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size) + ops.errorf("%s expects %d immediate arguments", spec.Name, spec.Details.Size-1) } ops.pending.WriteByte(spec.Opcode) for i := 0; i < spec.Details.Size-1; i++ { From cdf70e7f73a8f67baa4a31fa3f2767efeaf9e7d3 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 11 Mar 2021 11:59:25 -0500 Subject: [PATCH 040/215] updates per reviewer's request. --- ledger/acctupdates.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index c8c4522e56..1e7daef2ab 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -932,10 +932,16 @@ func (au *accountUpdates) initializeCaches(lastBalancesRound, lastestBlockRound, close(skipAccountCacheMessage) select { case <-writeAccountCacheMessageCompleted: - au.log.Infof("initializeCaches completed initializing account data caches") + if err == nil { + au.log.Infof("initializeCaches completed initializing account data caches") + } default: } }() + + // this goroutine logs a message once if the parent function have not completed in initializingAccountCachesMessageTimeout seconds. + // the message is important, since we're blocking on the ledger block database here, and we want to make sure that we log a message + // within the above timeout. go func() { select { case <-time.After(initializingAccountCachesMessageTimeout): From 604e52b8f16f6097d052ddc6e6ed4c680b4bb07e Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 11 Mar 2021 12:06:57 -0500 Subject: [PATCH 041/215] Present costs more succintly when they repeat. --- cmd/opdoc/opdoc.go | 12 ++++++--- data/transactions/logic/TEAL_opcodes.md | 9 +++---- data/transactions/logic/doc.go | 34 +++++++++++++++---------- data/transactions/logic/doc_test.go | 10 ++++---- 4 files changed, 37 insertions(+), 28 deletions(-) diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index 3c1be04ce5..55d212ce38 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -119,7 +119,6 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) { if opextra != "" { ws = " " } - costs := logic.OpAllCosts(op.Name) fmt.Fprintf(out, "\n## %s%s\n\n- Opcode: 0x%02x%s%s\n", op.Name, immediateMarkdown(op), op.Opcode, ws, opextra) if op.Args == nil { fmt.Fprintf(out, "- Pops: _None_\n") @@ -148,13 +147,18 @@ func opToMarkdown(out io.Writer, op *logic.OpSpec) (err error) { } fmt.Fprintf(out, "- %s\n", logic.OpDoc(op.Name)) // if cost changed with versions print all of them + costs := logic.OpAllCosts(op.Name) if len(costs) > 1 { fmt.Fprintf(out, "- **Cost**:\n") - for v := 1; v < len(costs); v++ { - fmt.Fprintf(out, " - %d (LogicSigVersion = %d)\n", costs[v], v) + for _, cost := range costs { + if cost.From == cost.To { + fmt.Fprintf(out, " - %d (LogicSigVersion = %d)\n", cost.Cost, cost.To) + } else { + fmt.Fprintf(out, " - %d (%d <= LogicSigVersion <= %d)\n", cost.Cost, cost.From, cost.To) + } } } else { - cost := costs[0] + cost := costs[0].Cost if cost != 1 { fmt.Fprintf(out, "- **Cost**: %d\n", cost) } diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md index 7a3ab2e53c..fd92df4988 100644 --- a/data/transactions/logic/TEAL_opcodes.md +++ b/data/transactions/logic/TEAL_opcodes.md @@ -18,8 +18,7 @@ Ops have a 'cost' of 1 unless otherwise specified. - SHA256 hash of value X, yields [32]byte - **Cost**: - 7 (LogicSigVersion = 1) - - 35 (LogicSigVersion = 2) - - 35 (LogicSigVersion = 3) + - 35 (2 <= LogicSigVersion <= 3) ## keccak256 @@ -29,8 +28,7 @@ Ops have a 'cost' of 1 unless otherwise specified. - Keccak256 hash of value X, yields [32]byte - **Cost**: - 26 (LogicSigVersion = 1) - - 130 (LogicSigVersion = 2) - - 130 (LogicSigVersion = 3) + - 130 (2 <= LogicSigVersion <= 3) ## sha512_256 @@ -40,8 +38,7 @@ Ops have a 'cost' of 1 unless otherwise specified. - SHA512_256 hash of value X, yields [32]byte - **Cost**: - 9 (LogicSigVersion = 1) - - 45 (LogicSigVersion = 2) - - 45 (LogicSigVersion = 3) + - 45 (2 <= LogicSigVersion <= 3) ## ed25519verify diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 4da698f345..9a4941c576 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -226,21 +226,29 @@ var OpGroupList = []OpGroup{ {"State Access", []string{"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}}, } -// OpAllCosts returns an array of the relative cost score for an op by version. -// If all the costs are the same the array is single entry -// otherwise it has costs by op version -func OpAllCosts(opName string) []int { - cost := OpsByName[LogicVersion][opName].Details.Cost - costs := make([]int, LogicVersion+1) - isDifferent := false +type OpCost struct { + From int + To int + Cost int +} + +// OpAllCosts returns an array of the cost score for an op by version. +// Each entry indicates the cost over a range of versions, so if the +// cost has remained constant, there is only one result, otherwise +// each entry shows the cost for a consecutive range of versions, +// inclusive. +func OpAllCosts(opName string) []OpCost { + var costs []OpCost for v := 1; v <= LogicVersion; v++ { - costs[v] = OpsByName[v][opName].Details.Cost - if costs[v] > 0 && costs[v] != cost { - isDifferent = true + cost := OpsByName[v][opName].Details.Cost + if cost == 0 { + continue + } + if costs == nil || cost != costs[len(costs)-1].Cost { + costs = append(costs, OpCost{v, v, cost}) + } else { + costs[len(costs)-1].To = v } - } - if !isDifferent { - return []int{cost} } return costs diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go index 75568d2e08..1a9558255f 100644 --- a/data/transactions/logic/doc_test.go +++ b/data/transactions/logic/doc_test.go @@ -86,13 +86,13 @@ func TestOpDocExtra(t *testing.T) { func TestOpAllCosts(t *testing.T) { a := OpAllCosts("+") - require.Equal(t, 1, len(a)) - require.Equal(t, 1, a[0]) + require.Len(t, a, 1) + require.Equal(t, 1, a[0].Cost) a = OpAllCosts("sha256") - require.True(t, len(a) > 1) - for v := 1; v <= LogicVersion; v++ { - require.True(t, a[v] > 1) + require.Len(t, a, 2) + for _, cost := range a { + require.True(t, cost.Cost > 1) } } From e9de60e38977dd1f6d7d2c850336bdd2aff0c5bc Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Thu, 11 Mar 2021 13:43:22 -0500 Subject: [PATCH 042/215] allow struct field to be sync.Mutex if tagged `algofix:"allow sync.Mutex"` --- cmd/algofix/deadlock.go | 35 ++++++- cmd/algofix/deadlock_test.go | 176 +++++++++++++++++++++++++++++++++++ 2 files changed, 209 insertions(+), 2 deletions(-) create mode 100644 cmd/algofix/deadlock_test.go diff --git a/cmd/algofix/deadlock.go b/cmd/algofix/deadlock.go index aae724defd..4df1bdc301 100644 --- a/cmd/algofix/deadlock.go +++ b/cmd/algofix/deadlock.go @@ -18,6 +18,7 @@ package main import ( "go/ast" + "strings" ) func init() { @@ -37,7 +38,29 @@ func deadlock(f *ast.File) bool { } fixed := false + + var provisionalRewrites []*ast.SelectorExpr + walk(f, func(n interface{}) { + if f, ok := n.(*ast.Field); ok { + if f.Tag != nil { + if strings.Contains(f.Tag.Value, `algofix:"allow sync.Mutex"`) { + exceptPos := f.Pos() + exceptEnd := f.End() + // cancel a provisional rewrite if it winds up being contained in a struct field decl with a tag to allow sync.Mutex + for i, e := range provisionalRewrites { + if e == nil { + continue + } + if exceptPos <= e.Pos() && e.End() <= exceptEnd { + provisionalRewrites[i] = nil + } + } + } + } + return + } + e, ok := n.(*ast.SelectorExpr) if !ok { return @@ -50,11 +73,19 @@ func deadlock(f *ast.File) bool { estr := pkg.Name + "." + e.Sel.Name if estr == "sync.Mutex" || estr == "sync.RWMutex" { - e.X = &ast.Ident{Name: "deadlock"} - fixed = true + provisionalRewrites = append(provisionalRewrites, e) } }) + // actually apply any provisional rewrites that weren't cancelled + for _, e := range provisionalRewrites { + if e == nil { + continue + } + e.X = &ast.Ident{Name: "deadlock"} + fixed = true + } + if fixed { addImport(f, "github.com/algorand/go-deadlock") } diff --git a/cmd/algofix/deadlock_test.go b/cmd/algofix/deadlock_test.go new file mode 100644 index 0000000000..f1f3aa1b04 --- /dev/null +++ b/cmd/algofix/deadlock_test.go @@ -0,0 +1,176 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package main + +import ( + "bytes" + "fmt" + "go/format" + "go/parser" + "go/token" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +const deadlockSimpleSrc = `package main + +import ( + "sync" +) + +func main() { + // lol wut? + var l sync.Mutex + var r sync.Mutex + var x sync.Mutex + + l.Lock() + defer l.Unlock() + r.Lock() + defer r.Unlock() + x.Lock() + defer x.Unlock() +} +` +const deadlockSimpleDest = `package main + +import ( + "github.com/algorand/go-deadlock" + "sync" +) + +func main() { + // lol wut? + var l deadlock.Mutex + var r deadlock.Mutex + var x deadlock.Mutex + + l.Lock() + defer l.Unlock() + r.Lock() + defer r.Unlock() + x.Lock() + defer x.Unlock() +} +` + +func tripleTickToBacktick(x string) string { + return strings.ReplaceAll(x, "'''", "`") +} + +const deadlockTestSrc = `package main + +import ( + "sync" +) + +type thing struct { + l sync.Mutex + r sync.Mutex '''algofix:"allow sync.Mutex"''' + x sync.Mutex +} + +func (t *thing) foo() { + t.l.Lock() + defer t.l.Unlock() + t.r.Lock() + defer t.r.Unlock() + t.x.Lock() + defer t.x.Unlock() +} + +func main() { + var t thing + t.foo() +} +` + +const deadlockTestFin = `package main + +import ( + "github.com/algorand/go-deadlock" + "sync" +) + +type thing struct { + l deadlock.Mutex + r sync.Mutex '''algofix:"allow sync.Mutex"''' + x deadlock.Mutex +} + +func (t *thing) foo() { + t.l.Lock() + defer t.l.Unlock() + t.r.Lock() + defer t.r.Unlock() + t.x.Lock() + defer t.x.Unlock() +} + +func main() { + var t thing + t.foo() +} +` + +func TestDeadlockRewrite(t *testing.T) { + t.Run("simple", func(t *testing.T) { testDeadlock(t, deadlockSimpleSrc, deadlockSimpleDest) }) + t.Run("onoff", func(t *testing.T) { testDeadlock(t, deadlockTestSrc, deadlockTestFin) }) +} + +func testGoFmt(fset *token.FileSet, node interface{}) (out string, err error) { + var buf bytes.Buffer + err = format.Node(&buf, fset, node) + if err == nil { + out = string(buf.Bytes()) + } + return +} + +func testDeadlock(t *testing.T, src, dest string) { + src = tripleTickToBacktick(src) + dest = tripleTickToBacktick(dest) + fset := token.NewFileSet() + filename := "testmain.go" + file, err := parser.ParseFile(fset, filename, src, parserMode) + require.NoError(t, err) + fixed := deadlock(file) + require.True(t, fixed) + src2, err := testGoFmt(fset, file) + require.NoError(t, err) + + // rinse, repeat? + newFile, err := parser.ParseFile(fset, filename, src2, parserMode) + require.NoError(t, err) + src3, err := testGoFmt(fset, newFile) + require.NoError(t, err) + + if string(src3) != dest { + fmt.Printf("===== %s orig =====\n", t.Name()) + fmt.Println(string(src)) + fmt.Printf("===== %s orig =====\n", t.Name()) + fmt.Printf("===== %s src2 =====\n", t.Name()) + fmt.Println(string(src2)) + fmt.Printf("===== %s src2 =====\n", t.Name()) + fmt.Printf("===== %s actual =====\n", t.Name()) + fmt.Println(string(src3)) + fmt.Printf("===== %s actual =====\n", t.Name()) + } + require.Equal(t, dest, string(src3)) +} From 3c3520625012d078684da7ecebce83cc25bdf12b Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 11 Mar 2021 14:35:24 -0500 Subject: [PATCH 043/215] Use map literals to simplify life --- cmd/opdoc/opdoc.go | 4 +- data/transactions/logic/doc.go | 539 +++++++++++++--------------- data/transactions/logic/doc_test.go | 17 +- data/transactions/logic/fields.go | 2 +- 4 files changed, 261 insertions(+), 301 deletions(-) diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index 55d212ce38..657866b97a 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -50,7 +50,7 @@ func typeEnumTableMarkdown(out io.Writer) { fmt.Fprintf(out, "| Index | \"Type\" string | Description |\n") fmt.Fprintf(out, "| --- | --- | --- |\n") for i, name := range logic.TxnTypeNames { - fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescription(name)) + fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescriptions[name]) } out.Write([]byte("\n")) } @@ -69,7 +69,7 @@ func integerConstantsTableMarkdown(out io.Writer) { fmt.Fprintf(out, "| Value | Constant name | Description |\n") fmt.Fprintf(out, "| --- | --- | --- |\n") for i, name := range logic.TxnTypeNames { - fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescription(name)) + fmt.Fprintf(out, "| %d | %s | %s |\n", i, markdownTableEscape(name), logic.TypeNameDescriptions[name]) } out.Write([]byte("\n")) } diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 9a4941c576..907e6a293e 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -22,197 +22,195 @@ import ( "github.com/algorand/go-algorand/protocol" ) -type stringString struct { - a string - b string -} - -func stringStringListToMap(they []stringString) map[string]string { - out := make(map[string]string, len(they)) - for _, v := range they { - out[v.a] = v.b - } - return out -} - // short description of every op -var opDocList = []stringString{ - {"err", "Error. Panic immediately. This is primarily a fencepost against accidental zero bytes getting compiled into programs."}, - {"sha256", "SHA256 hash of value X, yields [32]byte"}, - {"keccak256", "Keccak256 hash of value X, yields [32]byte"}, - {"sha512_256", "SHA512_256 hash of value X, yields [32]byte"}, - {"ed25519verify", "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey => {0 or 1}"}, - {"+", "A plus B. Panic on overflow."}, - {"-", "A minus B. Panic if B > A."}, - {"/", "A divided by B. Panic if B == 0."}, - {"*", "A times B. Panic on overflow."}, - {"<", "A less than B => {0 or 1}"}, - {">", "A greater than B => {0 or 1}"}, - {"<=", "A less than or equal to B => {0 or 1}"}, - {">=", "A greater than or equal to B => {0 or 1}"}, - {"&&", "A is not zero and B is not zero => {0 or 1}"}, - {"||", "A is not zero or B is not zero => {0 or 1}"}, - {"==", "A is equal to B => {0 or 1}"}, - {"!=", "A is not equal to B => {0 or 1}"}, - {"!", "X == 0 yields 1; else 0"}, - {"len", "yields length of byte value X"}, - {"itob", "converts uint64 X to big endian bytes"}, - {"btoi", "converts bytes X as big endian to uint64"}, - {"%", "A modulo B. Panic if B == 0."}, - {"|", "A bitwise-or B"}, - {"&", "A bitwise-and B"}, - {"^", "A bitwise-xor B"}, - {"~", "bitwise invert value X"}, - {"mulw", "A times B out to 128-bit long result as low (top) and high uint64 values on the stack"}, - {"addw", "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack"}, - {"intcblock", "prepare block of uint64 constants for use by intc"}, - {"intc", "push Ith constant from intcblock to stack"}, - {"intc_0", "push constant 0 from intcblock to stack"}, - {"intc_1", "push constant 1 from intcblock to stack"}, - {"intc_2", "push constant 2 from intcblock to stack"}, - {"intc_3", "push constant 3 from intcblock to stack"}, - {"pushint", "push immediate UINT to the stack as an integer"}, - {"bytecblock", "prepare block of byte-array constants for use by bytec"}, - {"bytec", "push Ith constant from bytecblock to stack"}, - {"bytec_0", "push constant 0 from bytecblock to stack"}, - {"bytec_1", "push constant 1 from bytecblock to stack"}, - {"bytec_2", "push constant 2 from bytecblock to stack"}, - {"bytec_3", "push constant 3 from bytecblock to stack"}, - {"pushbytes", "push the following program bytes to the stack"}, - {"arg", "push Nth LogicSig argument to stack"}, - {"arg_0", "push LogicSig argument 0 to stack"}, - {"arg_1", "push LogicSig argument 1 to stack"}, - {"arg_2", "push LogicSig argument 2 to stack"}, - {"arg_3", "push LogicSig argument 3 to stack"}, - {"txn", "push field F of current transaction to stack"}, - {"gtxn", "push field F of the Tth transaction in the current group"}, - {"gtxns", "push field F of the Ath transaction in the current group"}, - {"txna", "push Ith value of the array field F of the current transaction"}, - {"gtxna", "push Ith value of the array field F from the Tth transaction in the current group"}, - {"gtxnsa", "push Ith value of the array field F from the Ath transaction in the current group"}, - {"global", "push value from globals to stack"}, - {"load", "copy a value from scratch space to the stack"}, - {"store", "pop a value from the stack and store to scratch space"}, - {"bnz", "branch to TARGET if value X is not zero"}, - {"bz", "branch to TARGET if value X is zero"}, - {"b", "branch unconditionally to TARGET"}, - {"return", "use last value on stack as success value; end"}, - {"pop", "discard value X from stack"}, - {"dup", "duplicate last value on stack"}, - {"dup2", "duplicate two last values on stack: A, B -> A, B, A, B"}, - {"dig", "push the Nth value from the top of the stack. dig 0 is equivalent to dup"}, - {"swap", "swaps two last values on stack: A, B -> B, A"}, - {"select", "selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A)"}, - {"concat", "pop two byte-arrays A and B and join them, push the result"}, - {"substring", "pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails"}, - {"substring3", "pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails"}, - {"getbit", "pop a target A (integer or byte-array), and index B. Push the Bth bit of A."}, - {"setbit", "pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result"}, - {"getbyte", "pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer"}, - {"setbyte", "pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result"}, - {"balance", "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted."}, - {"min_balance", "get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes."}, - {"app_opted_in", "check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1}"}, - {"app_local_get", "read from account specified by Txn.Accounts[A] from local state of the current application key B => value"}, - {"app_local_get_ex", "read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1]"}, - {"app_global_get", "read key A from global state of a current application => value"}, - {"app_global_get_ex", "read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app"}, - {"app_local_put", "write to account specified by Txn.Accounts[A] to local state of a current application key B with value C"}, - {"app_global_put", "write key A and value B to global state of the current application"}, - {"app_local_del", "delete from account specified by Txn.Accounts[A] local state key B of the current application"}, - {"app_global_del", "delete key A from a global state of the current application"}, - {"asset_holding_get", "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}"}, - {"asset_params_get", "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}"}, - {"assert", "immediately fail unless value X is a non-zero number"}, +var opDocByName = map[string]string{ + "err": "Error. Panic immediately. This is primarily a fencepost against accidental zero bytes getting compiled into programs.", + "sha256": "SHA256 hash of value X, yields [32]byte", + "keccak256": "Keccak256 hash of value X, yields [32]byte", + "sha512_256": "SHA512_256 hash of value X, yields [32]byte", + "ed25519verify": "for (data A, signature B, pubkey C) verify the signature of (\"ProgData\" || program_hash || data) against the pubkey => {0 or 1}", + "+": "A plus B. Panic on overflow.", + "-": "A minus B. Panic if B > A.", + "/": "A divided by B. Panic if B == 0.", + "*": "A times B. Panic on overflow.", + "<": "A less than B => {0 or 1}", + ">": "A greater than B => {0 or 1}", + "<=": "A less than or equal to B => {0 or 1}", + ">=": "A greater than or equal to B => {0 or 1}", + "&&": "A is not zero and B is not zero => {0 or 1}", + "||": "A is not zero or B is not zero => {0 or 1}", + "==": "A is equal to B => {0 or 1}", + "!=": "A is not equal to B => {0 or 1}", + "!": "X == 0 yields 1; else 0", + "len": "yields length of byte value X", + "itob": "converts uint64 X to big endian bytes", + "btoi": "converts bytes X as big endian to uint64", + "%": "A modulo B. Panic if B == 0.", + "|": "A bitwise-or B", + "&": "A bitwise-and B", + "^": "A bitwise-xor B", + "~": "bitwise invert value X", + "mulw": "A times B out to 128-bit long result as low (top) and high uint64 values on the stack", + "addw": "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack", + "intcblock": "prepare block of uint64 constants for use by intc", + "intc": "push Ith constant from intcblock to stack", + "intc_0": "push constant 0 from intcblock to stack", + "intc_1": "push constant 1 from intcblock to stack", + "intc_2": "push constant 2 from intcblock to stack", + "intc_3": "push constant 3 from intcblock to stack", + "pushint": "push immediate UINT to the stack as an integer", + "bytecblock": "prepare block of byte-array constants for use by bytec", + "bytec": "push Ith constant from bytecblock to stack", + "bytec_0": "push constant 0 from bytecblock to stack", + "bytec_1": "push constant 1 from bytecblock to stack", + "bytec_2": "push constant 2 from bytecblock to stack", + "bytec_3": "push constant 3 from bytecblock to stack", + "pushbytes": "push the following program bytes to the stack", + "arg": "push Nth LogicSig argument to stack", + "arg_0": "push LogicSig argument 0 to stack", + "arg_1": "push LogicSig argument 1 to stack", + "arg_2": "push LogicSig argument 2 to stack", + "arg_3": "push LogicSig argument 3 to stack", + "txn": "push field F of current transaction to stack", + "gtxn": "push field F of the Tth transaction in the current group", + "gtxns": "push field F of the Ath transaction in the current group", + "txna": "push Ith value of the array field F of the current transaction", + "gtxna": "push Ith value of the array field F from the Tth transaction in the current group", + "gtxnsa": "push Ith value of the array field F from the Ath transaction in the current group", + "global": "push value from globals to stack", + "load": "copy a value from scratch space to the stack", + "store": "pop a value from the stack and store to scratch space", + "bnz": "branch to TARGET if value X is not zero", + "bz": "branch to TARGET if value X is zero", + "b": "branch unconditionally to TARGET", + "return": "use last value on stack as success value; end", + "pop": "discard value X from stack", + "dup": "duplicate last value on stack", + "dup2": "duplicate two last values on stack: A, B -> A, B, A, B", + "dig": "push the Nth value from the top of the stack. dig 0 is equivalent to dup", + "swap": "swaps two last values on stack: A, B -> B, A", + "select": "selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A)", + "concat": "pop two byte-arrays A and B and join them, push the result", + "substring": "pop a byte-array A. For immediate values in 0..255 S and E: extract a range of bytes from A starting at S up to but not including E, push the substring result. If E < S, or either is larger than the array length, the program fails", + "substring3": "pop a byte-array A and two integers B and C. Extract a range of bytes from A starting at B up to but not including C, push the substring result. If C < B, or either is larger than the array length, the program fails", + "getbit": "pop a target A (integer or byte-array), and index B. Push the Bth bit of A.", + "setbit": "pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result", + "getbyte": "pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer", + "setbyte": "pop a byte-array A, integer B, and small integer C (between 0..255). Set the Bth byte of A to C, and push the result", + "balance": "get balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. The balance is observed after the effects of previous transactions in the group, and after the fee for the current transaction is deducted.", + "min_balance": "get minimum required balance for the requested account specified by Txn.Accounts[A] in microalgos. A is specified as an account index in the Accounts field of the ApplicationCall transaction, zero index means the sender. Required balance is affected by [ASA](https://developer.algorand.org/docs/features/asa/#assets-overview) and [App](https://developer.algorand.org/docs/features/asc1/stateful/#minimum-balance-requirement-for-a-smart-contract) usage. When creating or opting into an app, the minimum balance grows before the app code runs, therefore the increase is visible there. When deleting or closing out, the minimum balance decreases after the app executes.", + "app_opted_in": "check if account specified by Txn.Accounts[A] opted in for the application B => {0 or 1}", + "app_local_get": "read from account specified by Txn.Accounts[A] from local state of the current application key B => value", + "app_local_get_ex": "read from account specified by Txn.Accounts[A] from local state of the application B key C => [*... stack*, value, 0 or 1]", + "app_global_get": "read key A from global state of a current application => value", + "app_global_get_ex": "read from application Txn.ForeignApps[A] global state key B => [*... stack*, value, 0 or 1]. A is specified as an account index in the ForeignApps field of the ApplicationCall transaction, zero index means this app", + "app_local_put": "write to account specified by Txn.Accounts[A] to local state of a current application key B with value C", + "app_global_put": "write key A and value B to global state of the current application", + "app_local_del": "delete from account specified by Txn.Accounts[A] local state key B of the current application", + "app_global_del": "delete key A from a global state of the current application", + "asset_holding_get": "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}", + "asset_params_get": "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}", + "assert": "immediately fail unless value X is a non-zero number", } -var opDocByName map[string]string - // OpDoc returns a description of the op func OpDoc(opName string) string { - if opDocByName == nil { - opDocByName = stringStringListToMap(opDocList) - } return opDocByName[opName] } // notes on immediate bytes following the opcode -var opcodeImmediateNoteList = []stringString{ - {"intcblock", "{varuint length} [{varuint value}, ...]"}, - {"intc", "{uint8 int constant index}"}, - {"pushint", "{varuint int}"}, - {"bytecblock", "{varuint length} [({varuint value length} bytes), ...]"}, - {"bytec", "{uint8 byte constant index}"}, - {"pushbytes", "{varuint length} {bytes}"}, - {"arg", "{uint8 arg index N}"}, - {"txn", "{uint8 transaction field index}"}, - {"gtxn", "{uint8 transaction group index} {uint8 transaction field index}"}, - {"gtxns", "{uint8 transaction field index}"}, - {"txna", "{uint8 transaction field index} {uint8 transaction field array index}"}, - {"gtxna", "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}"}, - {"gtxnsa", "{uint8 transaction field index} {uint8 transaction field array index}"}, - {"global", "{uint8 global field index}"}, - {"bnz", "{0..0x7fff forward branch offset, big endian}"}, - {"bz", "{0..0x7fff forward branch offset, big endian}"}, - {"b", "{0..0x7fff forward branch offset, big endian}"}, - {"load", "{uint8 position in scratch space to load from}"}, - {"store", "{uint8 position in scratch space to store to}"}, - {"substring", "{uint8 start position} {uint8 end position}"}, - {"dig", "{uint8 depth}"}, - {"asset_holding_get", "{uint8 asset holding field index}"}, - {"asset_params_get", "{uint8 asset params field index}"}, +var opcodeImmediateNote = map[string]string{ + "intcblock": "{varuint length} [{varuint value}, ...]", + "intc": "{uint8 int constant index}", + "pushint": "{varuint int}", + "bytecblock": "{varuint length} [({varuint value length} bytes), ...]", + "bytec": "{uint8 byte constant index}", + "pushbytes": "{varuint length} {bytes}", + "arg": "{uint8 arg index N}", + "txn": "{uint8 transaction field index}", + "gtxn": "{uint8 transaction group index} {uint8 transaction field index}", + "gtxns": "{uint8 transaction field index}", + "txna": "{uint8 transaction field index} {uint8 transaction field array index}", + "gtxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}", + "gtxnsa": "{uint8 transaction field index} {uint8 transaction field array index}", + "global": "{uint8 global field index}", + "bnz": "{0..0x7fff forward branch offset, big endian}", + "bz": "{0..0x7fff forward branch offset, big endian}", + "b": "{0..0x7fff forward branch offset, big endian}", + "load": "{uint8 position in scratch space to load from}", + "store": "{uint8 position in scratch space to store to}", + "substring": "{uint8 start position} {uint8 end position}", + "dig": "{uint8 depth}", + "asset_holding_get": "{uint8 asset holding field index}", + "asset_params_get": "{uint8 asset params field index}", +} +var opcodeImmediateNotes = map[string]string{ + "intcblock": "{varuint length} [{varuint value}, ...]", + "intc": "{uint8 int constant index}", + "pushint": "{varuint int}", + "bytecblock": "{varuint length} [({varuint value length} bytes), ...]", + "bytec": "{uint8 byte constant index}", + "pushbytes": "{varuint length} {bytes}", + "arg": "{uint8 arg index N}", + "txn": "{uint8 transaction field index}", + "gtxn": "{uint8 transaction group index} {uint8 transaction field index}", + "gtxns": "{uint8 transaction field index}", + "txna": "{uint8 transaction field index} {uint8 transaction field array index}", + "gtxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}", + "gtxnsa": "{uint8 transaction field index} {uint8 transaction field array index}", + "global": "{uint8 global field index}", + "bnz": "{0..0x7fff forward branch offset, big endian}", + "bz": "{0..0x7fff forward branch offset, big endian}", + "b": "{0..0x7fff forward branch offset, big endian}", + "load": "{uint8 position in scratch space to load from}", + "store": "{uint8 position in scratch space to store to}", + "substring": "{uint8 start position} {uint8 end position}", + "dig": "{uint8 depth}", + "asset_holding_get": "{uint8 asset holding field index}", + "asset_params_get": "{uint8 asset params field index}", } -var opcodeImmediateNotes map[string]string // OpImmediateNote returns a short string about immediate data which follows the op byte func OpImmediateNote(opName string) string { - if opcodeImmediateNotes == nil { - opcodeImmediateNotes = stringStringListToMap(opcodeImmediateNoteList) - } return opcodeImmediateNotes[opName] } // further documentation on the function of the opcode -var opDocExtraList = []stringString{ - {"ed25519verify", "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack."}, - {"bnz", "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)"}, - {"bz", "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`."}, - {"b", "See `bnz` for details on how branches work. `b` always jumps to the offset."}, - {"intcblock", "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script."}, - {"bytecblock", "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script."}, - {"*", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`."}, - {"+", "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`."}, - {"txn", "FirstValidTime causes the program to fail. The field is reserved for future use."}, - {"gtxn", "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`."}, - {"gtxns", "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction."}, - {"btoi", "`btoi` panics if the input is longer than 8 bytes."}, - {"concat", "`concat` panics if the result would be greater than 4096 bytes."}, - {"getbit", "see explanation of bit ordering in setbit"}, - {"setbit", "bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`"}, - {"app_opted_in", "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise."}, - {"app_local_get", "params: account index, state key. Return: value. The value is zero (of type uint64) if the key does not exist."}, - {"app_local_get_ex", "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist."}, - {"app_global_get_ex", "params: application index, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist."}, - {"app_global_get", "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist."}, - {"app_local_put", "params: account index, state key, value."}, - {"app_local_del", "params: account index, state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)"}, - {"app_global_del", "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)"}, - {"asset_holding_get", "params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherwise), value."}, - {"asset_params_get", "params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 otherwise), value."}, +var opDocExtras = map[string]string{ + "ed25519verify": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.", + "bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)", + "bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.", + "b": "See `bnz` for details on how branches work. `b` always jumps to the offset.", + "intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.", + "bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.", + "*": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.", + "+": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.", + "txn": "FirstValidTime causes the program to fail. The field is reserved for future use.", + "gtxn": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.", + "gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.", + "btoi": "`btoi` panics if the input is longer than 8 bytes.", + "concat": "`concat` panics if the result would be greater than 4096 bytes.", + "getbit": "see explanation of bit ordering in setbit", + "setbit": "bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`", + "app_opted_in": "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise.", + "app_local_get": "params: account index, state key. Return: value. The value is zero (of type uint64) if the key does not exist.", + "app_local_get_ex": "params: account index, application id, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", + "app_global_get_ex": "params: application index, state key. Return: did_exist flag (top of the stack, 1 if exist and 0 otherwise), value. The value is zero (of type uint64) if the key does not exist.", + "app_global_get": "params: state key. Return: value. The value is zero (of type uint64) if the key does not exist.", + "app_local_put": "params: account index, state key, value.", + "app_local_del": "params: account index, state key.\n\nDeleting a key which is already absent has no effect on the application local state. (In particular, it does _not_ cause the program to fail.)", + "app_global_del": "params: state key.\n\nDeleting a key which is already absent has no effect on the application global state. (In particular, it does _not_ cause the program to fail.)", + "asset_holding_get": "params: account index, asset id. Return: did_exist flag (1 if exist and 0 otherwise), value.", + "asset_params_get": "params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 otherwise), value.", } -var opDocExtras map[string]string - // OpDocExtra returns extra documentation text about an op func OpDocExtra(opName string) string { - if opDocExtras == nil { - opDocExtras = stringStringListToMap(opDocExtraList) - } return opDocExtras[opName] } // OpGroup is a grouping of ops for documentation purposes. -// e.g. "Arithmetic", ["+", "-", ...] +// e.g. "Arithmetic", ["+": "-", ...] type OpGroup struct { GroupName string Ops []string @@ -254,30 +252,19 @@ func OpAllCosts(opName string) []OpCost { return costs } -// see assembler.go TxnTypeNames -// also used to parse symbolic constants for `int` -var typeEnumDescriptions = []stringString{ - {string(protocol.UnknownTx), "Unknown type. Invalid transaction"}, - {string(protocol.PaymentTx), "Payment"}, - {string(protocol.KeyRegistrationTx), "KeyRegistration"}, - {string(protocol.AssetConfigTx), "AssetConfig"}, - {string(protocol.AssetTransferTx), "AssetTransfer"}, - {string(protocol.AssetFreezeTx), "AssetFreeze"}, - {string(protocol.ApplicationCallTx), "ApplicationCall"}, +// TypeNameDescription contains extra description about a low level +// protocol transaction Type string, and provide a friendlier type +// constant name in assembler. +var TypeNameDescriptions = map[string]string{ + string(protocol.UnknownTx): "Unknown type. Invalid transaction", + string(protocol.PaymentTx): "Payment", + string(protocol.KeyRegistrationTx): "KeyRegistration", + string(protocol.AssetConfigTx): "AssetConfig", + string(protocol.AssetTransferTx): "AssetTransfer", + string(protocol.AssetFreezeTx): "AssetFreeze", + string(protocol.ApplicationCallTx): "ApplicationCall", } -// TypeNameDescription returns extra description about a low level protocol transaction Type string -func TypeNameDescription(typeName string) string { - for _, ted := range typeEnumDescriptions { - if typeName == ted.a { - return ted.b - } - } - return "invalid type name" -} - -// see assembler.go TxnTypeNames -// also used to parse symbolic constants for `int` var onCompletionDescriptions = map[OnCompletionConstType]string{ NoOp: "Only execute the `ApprovalProgram` associated with this application ID, with no additional effects.", OptIn: "Before executing the `ApprovalProgram`, allocate local state for this application into the sender's account data.", @@ -299,85 +286,79 @@ func OnCompletionDescription(value uint64) string { // OnCompletionPreamble describes what the OnCompletion constants represent. const OnCompletionPreamble = "An application transaction must indicate the action to be taken following the execution of its approvalProgram or clearStateProgram. The constants below describe the available actions." -var txnFieldDocList = []stringString{ - {"Sender", "32 byte address"}, - {"Fee", "micro-Algos"}, - {"FirstValid", "round number"}, - {"FirstValidTime", "Causes program to fail; reserved for future use"}, - {"LastValid", "round number"}, - {"Receiver", "32 byte address"}, - {"Amount", "micro-Algos"}, - {"CloseRemainderTo", "32 byte address"}, - {"VotePK", "32 byte address"}, - {"SelectionPK", "32 byte address"}, - //{"VoteFirst", ""}, - //{"VoteLast", ""}, - {"TypeEnum", "See table below"}, - {"XferAsset", "Asset ID"}, - {"AssetAmount", "value in Asset's units"}, - {"AssetSender", "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset."}, - {"AssetReceiver", "32 byte address"}, - {"AssetCloseTo", "32 byte address"}, - {"GroupIndex", "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1"}, - {"TxID", "The computed ID for this transaction. 32 bytes."}, - {"ApplicationID", "ApplicationID from ApplicationCall transaction"}, - {"OnCompletion", "ApplicationCall transaction on completion action"}, - {"ApplicationArgs", "Arguments passed to the application in the ApplicationCall transaction"}, - {"NumAppArgs", "Number of ApplicationArgs"}, - {"Accounts", "Accounts listed in the ApplicationCall transaction"}, - {"NumAccounts", "Number of Accounts"}, - {"Assets", "Foreign Assets listed in the ApplicationCall transaction"}, - {"NumAssets", "Number of Assets"}, - {"Applications", "Foreign Apps listed in the ApplicationCall transaction"}, - {"NumApplications", "Number of Applications"}, - {"GlobalNumUint", "Number of global state integers in ApplicationCall"}, - {"GlobalNumByteSlice", "Number of global state byteslices in ApplicationCall"}, - {"LocalNumUint", "Number of local state integers in ApplicationCall"}, - {"LocalNumByteSlice", "Number of local state byteslices in ApplicationCall"}, - {"ApprovalProgram", "Approval program"}, - {"ClearStateProgram", "Clear state program"}, - {"RekeyTo", "32 byte Sender's new AuthAddr"}, - {"ConfigAsset", "Asset ID in asset config transaction"}, - {"ConfigAssetTotal", "Total number of units of this asset created"}, - {"ConfigAssetDecimals", "Number of digits to display after the decimal place when displaying the asset"}, - {"ConfigAssetDefaultFrozen", "Whether the asset's slots are frozen by default or not, 0 or 1"}, - {"ConfigAssetUnitName", "Unit name of the asset"}, - {"ConfigAssetName", "The asset name"}, - {"ConfigAssetURL", "URL"}, - {"ConfigAssetMetadataHash", "32 byte commitment to some unspecified asset metadata"}, - {"ConfigAssetManager", "32 byte address"}, - {"ConfigAssetReserve", "32 byte address"}, - {"ConfigAssetFreeze", "32 byte address"}, - {"ConfigAssetClawback", "32 byte address"}, - {"FreezeAsset", "Asset ID being frozen or un-frozen"}, - {"FreezeAssetAccount", "32 byte address of the account whose asset slot is being frozen or un-frozen"}, - {"FreezeAssetFrozen", "The new frozen value, 0 or 1"}, +var txnFieldDocs = map[string]string{ + "Sender": "32 byte address", + "Fee": "micro-Algos", + "FirstValid": "round number", + "FirstValidTime": "Causes program to fail; reserved for future use", + "LastValid": "round number", + "Receiver": "32 byte address", + "Amount": "micro-Algos", + "CloseRemainderTo": "32 byte address", + "VotePK": "32 byte address", + "SelectionPK": "32 byte address", + //"VoteFirst": "", + //"VoteLast": "", + "TypeEnum": "See table below", + "XferAsset": "Asset ID", + "AssetAmount": "value in Asset's units", + "AssetSender": "32 byte address. Causes clawback of all value of asset from AssetSender if Sender is the Clawback address of the asset.", + "AssetReceiver": "32 byte address", + "AssetCloseTo": "32 byte address", + "GroupIndex": "Position of this transaction within an atomic transaction group. A stand-alone transaction is implicitly element 0 in a group of 1", + "TxID": "The computed ID for this transaction. 32 bytes.", + "ApplicationID": "ApplicationID from ApplicationCall transaction", + "OnCompletion": "ApplicationCall transaction on completion action", + "ApplicationArgs": "Arguments passed to the application in the ApplicationCall transaction", + "NumAppArgs": "Number of ApplicationArgs", + "Accounts": "Accounts listed in the ApplicationCall transaction", + "NumAccounts": "Number of Accounts", + "Assets": "Foreign Assets listed in the ApplicationCall transaction", + "NumAssets": "Number of Assets", + "Applications": "Foreign Apps listed in the ApplicationCall transaction", + "NumApplications": "Number of Applications", + "GlobalNumUint": "Number of global state integers in ApplicationCall", + "GlobalNumByteSlice": "Number of global state byteslices in ApplicationCall", + "LocalNumUint": "Number of local state integers in ApplicationCall", + "LocalNumByteSlice": "Number of local state byteslices in ApplicationCall", + "ApprovalProgram": "Approval program", + "ClearStateProgram": "Clear state program", + "RekeyTo": "32 byte Sender's new AuthAddr", + "ConfigAsset": "Asset ID in asset config transaction", + "ConfigAssetTotal": "Total number of units of this asset created", + "ConfigAssetDecimals": "Number of digits to display after the decimal place when displaying the asset", + "ConfigAssetDefaultFrozen": "Whether the asset's slots are frozen by default or not, 0 or 1", + "ConfigAssetUnitName": "Unit name of the asset", + "ConfigAssetName": "The asset name", + "ConfigAssetURL": "URL", + "ConfigAssetMetadataHash": "32 byte commitment to some unspecified asset metadata", + "ConfigAssetManager": "32 byte address", + "ConfigAssetReserve": "32 byte address", + "ConfigAssetFreeze": "32 byte address", + "ConfigAssetClawback": "32 byte address", + "FreezeAsset": "Asset ID being frozen or un-frozen", + "FreezeAssetAccount": "32 byte address of the account whose asset slot is being frozen or un-frozen", + "FreezeAssetFrozen": "The new frozen value, 0 or 1", } -// TxnFieldDocs are notes on fields available by `txn` and `gtxn` -var txnFieldDocs map[string]string - // TxnFieldDocs are notes on fields available by `txn` and `gtxn` with extra versioning info if any func TxnFieldDocs() map[string]string { return fieldsDocWithExtra(txnFieldDocs, txnFieldSpecByName) } -var globalFieldDocList = []stringString{ - {"MinTxnFee", "micro Algos"}, - {"MinBalance", "micro Algos"}, - {"MaxTxnLife", "rounds"}, - {"ZeroAddress", "32 byte address of all zero bytes"}, - {"GroupSize", "Number of transactions in this atomic transaction group. At least 1"}, - {"LogicSigVersion", "Maximum supported TEAL version"}, - {"Round", "Current round number"}, - {"LatestTimestamp", "Last confirmed block UNIX timestamp. Fails if negative"}, - {"CurrentApplicationID", "ID of current application executing. Fails if no such application is executing"}, - {"CreatorAddress", "Address of the creator of the current application. Fails if no such application is executing"}, +var globalFieldDocs = map[string]string{ + "MinTxnFee": "micro Algos", + "MinBalance": "micro Algos", + "MaxTxnLife": "rounds", + "ZeroAddress": "32 byte address of all zero bytes", + "GroupSize": "Number of transactions in this atomic transaction group. At least 1", + "LogicSigVersion": "Maximum supported TEAL version", + "Round": "Current round number", + "LatestTimestamp": "Last confirmed block UNIX timestamp. Fails if negative", + "CurrentApplicationID": "ID of current application executing. Fails if no such application is executing", + "CreatorAddress": "Address of the creator of the current application. Fails if no such application is executing", } -// globalFieldDocs are notes on fields available in `global` -var globalFieldDocs map[string]string - // GlobalFieldDocs are notes on fields available in `global` with extra versioning info if any func GlobalFieldDocs() map[string]string { return fieldsDocWithExtra(globalFieldDocs, globalFieldSpecByName) @@ -406,34 +387,22 @@ func fieldsDocWithExtra(source map[string]string, ex extractor) map[string]strin return result } -var assetHoldingFieldDocList = []stringString{ - {"AssetBalance", "Amount of the asset unit held by this account"}, - {"AssetFrozen", "Is the asset frozen or not"}, -} - -// AssetHoldingFieldDocs are notes on fields available in `asset_holding_get` -var AssetHoldingFieldDocs map[string]string - -var assetParamsFieldDocList = []stringString{ - {"AssetTotal", "Total number of units of this asset"}, - {"AssetDecimals", "See AssetParams.Decimals"}, - {"AssetDefaultFrozen", "Frozen by default or not"}, - {"AssetUnitName", "Asset unit name"}, - {"AssetName", "Asset name"}, - {"AssetURL", "URL with additional info about the asset"}, - {"AssetMetadataHash", "Arbitrary commitment"}, - {"AssetManager", "Manager commitment"}, - {"AssetReserve", "Reserve address"}, - {"AssetFreeze", "Freeze address"}, - {"AssetClawback", "Clawback address"}, +var AssetHoldingFieldDocs = map[string]string{ + "AssetBalance": "Amount of the asset unit held by this account", + "AssetFrozen": "Is the asset frozen or not", } // AssetParamsFieldDocs are notes on fields available in `asset_params_get` -var AssetParamsFieldDocs map[string]string - -func init() { - txnFieldDocs = stringStringListToMap(txnFieldDocList) - globalFieldDocs = stringStringListToMap(globalFieldDocList) - AssetHoldingFieldDocs = stringStringListToMap(assetHoldingFieldDocList) - AssetParamsFieldDocs = stringStringListToMap(assetParamsFieldDocList) +var AssetParamsFieldDocs = map[string]string{ + "AssetTotal": "Total number of units of this asset", + "AssetDecimals": "See AssetParams.Decimals", + "AssetDefaultFrozen": "Frozen by default or not", + "AssetUnitName": "Asset unit name", + "AssetName": "Asset name", + "AssetURL": "URL with additional info about the asset", + "AssetMetadataHash": "Arbitrary commitment", + "AssetManager": "Manager commitment", + "AssetReserve": "Reserve address", + "AssetFreeze": "Freeze address", + "AssetClawback": "Clawback address", } diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go index 1a9558255f..343dc69f4f 100644 --- a/data/transactions/logic/doc_test.go +++ b/data/transactions/logic/doc_test.go @@ -27,12 +27,12 @@ func TestOpDocs(t *testing.T) { for _, op := range OpSpecs { opsSeen[op.Name] = false } - for _, od := range opDocList { - _, exists := opsSeen[od.a] + for name, _ := range opDocByName { + _, exists := opsSeen[name] if !exists { - t.Errorf("error: doc for op %#v that does not exist in OpSpecs", od.a) + t.Errorf("error: doc for op %#v that does not exist in OpSpecs", name) } - opsSeen[od.a] = true + opsSeen[name] = true } for op, seen := range opsSeen { if !seen { @@ -96,15 +96,6 @@ func TestOpAllCosts(t *testing.T) { } } -func TestTypeNameDescription(t *testing.T) { - require.Equal(t, len(TxnTypeNames), len(typeEnumDescriptions)) - for i, a := range TxnTypeNames { - b := TypeNameDescription(a) - require.Equal(t, b, typeEnumDescriptions[i].b) - } - require.Equal(t, "invalid type name", TypeNameDescription("invalid type name")) -} - func TestOnCompletionDescription(t *testing.T) { desc := OnCompletionDescription(0) require.Equal(t, "Only execute the `ApprovalProgram` associated with this application ID, with no additional effects.", desc) diff --git a/data/transactions/logic/fields.go b/data/transactions/logic/fields.go index eb3b956f19..41aecb4b9e 100644 --- a/data/transactions/logic/fields.go +++ b/data/transactions/logic/fields.go @@ -518,7 +518,7 @@ func init() { txnTypeConstToUint64 = make(map[string]uint64, len(TxnTypeNames)) for tt, v := range txnTypeIndexes { - symbol := TypeNameDescription(tt) + symbol := TypeNameDescriptions[tt] txnTypeConstToUint64[symbol] = v } From 7776ece14f00be34516a2af8d78b5aa931577385 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Thu, 11 Mar 2021 16:02:59 -0500 Subject: [PATCH 044/215] Typo-ish cleanups --- data/transactions/logic/doc.go | 26 -------------------------- data/transactions/logic/opcodes.go | 4 ++-- 2 files changed, 2 insertions(+), 28 deletions(-) diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 907e6a293e..6e3c757fbb 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -118,32 +118,6 @@ func OpDoc(opName string) string { return opDocByName[opName] } -// notes on immediate bytes following the opcode -var opcodeImmediateNote = map[string]string{ - "intcblock": "{varuint length} [{varuint value}, ...]", - "intc": "{uint8 int constant index}", - "pushint": "{varuint int}", - "bytecblock": "{varuint length} [({varuint value length} bytes), ...]", - "bytec": "{uint8 byte constant index}", - "pushbytes": "{varuint length} {bytes}", - "arg": "{uint8 arg index N}", - "txn": "{uint8 transaction field index}", - "gtxn": "{uint8 transaction group index} {uint8 transaction field index}", - "gtxns": "{uint8 transaction field index}", - "txna": "{uint8 transaction field index} {uint8 transaction field array index}", - "gtxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}", - "gtxnsa": "{uint8 transaction field index} {uint8 transaction field array index}", - "global": "{uint8 global field index}", - "bnz": "{0..0x7fff forward branch offset, big endian}", - "bz": "{0..0x7fff forward branch offset, big endian}", - "b": "{0..0x7fff forward branch offset, big endian}", - "load": "{uint8 position in scratch space to load from}", - "store": "{uint8 position in scratch space to store to}", - "substring": "{uint8 start position} {uint8 end position}", - "dig": "{uint8 depth}", - "asset_holding_get": "{uint8 asset holding field index}", - "asset_params_get": "{uint8 asset params field index}", -} var opcodeImmediateNotes = map[string]string{ "intcblock": "{varuint length} [{varuint value}, ...]", "intc": "{uint8 int constant index}", diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index ffe07d771f..641dbd4aba 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -51,7 +51,7 @@ func costly(cost int) opDetails { func immediates(name string, rest ...string) opDetails { num := 1 + len(rest) - immediates := make([]immediate, num, num) + immediates := make([]immediate, num) immediates[0] = immediate{name, immByte} for i, n := range rest { immediates[i+1] = immediate{n, immByte} @@ -112,7 +112,7 @@ var anyIntInt = StackTypes{StackAny, StackUint64, StackUint64} // Any changes should be reflected in README_in.md which serves as the language spec. // // Note: assembly can specialize an Any return type if known at -// assembly-time, with ops.tspecify() +// assembly-time, with ops.returns() var OpSpecs = []OpSpec{ {0x00, "err", opErr, asmDefault, disDefault, nil, nil, 1, modeAny, opDefault}, {0x01, "sha256", opSHA256, asmDefault, disDefault, oneBytes, oneBytes, 1, modeAny, costly(7)}, From 8d7fce255316c41d4836f8e302c7c2211df08e0b Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 11 Mar 2021 21:06:17 -0500 Subject: [PATCH 045/215] Add DisableLocalhostConnectionRateLimit to config --- config/config.go | 7 ++- config/local_defaults.go | 3 +- installer/config.json.example | 3 +- network/requestTracker.go | 20 ++++++- network/requestTracker_test.go | 19 +++--- test/testdata/configs/config-v16.json | 85 +++++++++++++++++++++++++++ 6 files changed, 124 insertions(+), 13 deletions(-) create mode 100644 test/testdata/configs/config-v16.json diff --git a/config/config.go b/config/config.go index 4c49346813..92aa673deb 100644 --- a/config/config.go +++ b/config/config.go @@ -63,7 +63,7 @@ type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. This field tag must be updated any time we add a new version. - Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15"` + Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16"` // environmental (may be overridden) // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks @@ -372,6 +372,11 @@ type Local struct { // On networks that doesn't have archive servers, this becomes a no-op, as the catchup service would have no // archive server to pick from, and therefore automatically selects one of the relay nodes. EnableCatchupFromArchiveServers bool `version[15]:"false"` + + // DisableLocalhostConnectionRateLimit controls whether the incoming connection rate limit would apply for + // connections that are originating from the local machine. Setting this to "true", allow to create large + // local-machine networks that won't trip the incoming connection limit observed by relays. + DisableLocalhostConnectionRateLimit bool `version[16]:"true"` } // Filenames of config files within the configdir (e.g. ~/.algorand) diff --git a/config/local_defaults.go b/config/local_defaults.go index c62985f308..4bf1b8f140 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -20,7 +20,7 @@ package config var defaultLocal = Local{ - Version: 15, + Version: 16, AccountsRebuildSynchronousMode: 1, AnnounceParticipationKey: true, Archival: false, @@ -41,6 +41,7 @@ var defaultLocal = Local{ DNSBootstrapID: ".algorand.network", DNSSecurityFlags: 1, DeadlockDetection: 0, + DisableLocalhostConnectionRateLimit: true, DisableOutgoingConnectionThrottling: false, EnableAgreementReporting: false, EnableAgreementTimeMetrics: false, diff --git a/installer/config.json.example b/installer/config.json.example index 0c216be644..39c96b11a6 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -1,5 +1,5 @@ { - "Version": 15, + "Version": 16, "AccountsRebuildSynchronousMode": 1, "AnnounceParticipationKey": true, "Archival": false, @@ -20,6 +20,7 @@ "DNSBootstrapID": ".algorand.network", "DNSSecurityFlags": 1, "DeadlockDetection": 0, + "DisableLocalhostConnectionRateLimit": true, "DisableOutgoingConnectionThrottling": false, "EnableAgreementReporting": false, "EnableAgreementTimeMetrics": false, diff --git a/network/requestTracker.go b/network/requestTracker.go index 2978f83bad..724dfd2a99 100644 --- a/network/requestTracker.go +++ b/network/requestTracker.go @@ -285,8 +285,11 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) { rt.hostRequests.pruneRequests(rateLimitingWindowStartTime) originConnections := rt.hostRequests.countOriginConnections(trackerRequest.remoteHost, rateLimitingWindowStartTime) + remoteHostIsNonLocal := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackerRequest.remoteHost)) + connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 + // check the number of connections - if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 { + if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && remoteHostIsNonLocal { rt.hostRequestsMu.Unlock() networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_tcp_rate_limit"}) rt.log.With("connection", "tcp").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate") @@ -448,7 +451,10 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http. delete(rt.httpConnections, localAddr) }() - if originConnections > rt.config.ConnectionsRateLimitingCount && rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 { + remoteHostIsNonLocal := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackedRequest.remoteHost)) + connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 + + if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && remoteHostIsNonLocal { networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_rate_limit"}) rt.log.With("connection", "http").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate") rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent, @@ -501,3 +507,13 @@ func (rt *RequestTracker) getForwardedConnectionAddress(header http.Header) (ip } return } + +// isLocalhost returns true if the given host is a localhost address. +func isLocalhost(host string) bool { + for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} { + if host == v { + return true + } + } + return false +} diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 0e3576d13b..e6d8c4ecd1 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -75,17 +75,20 @@ func TestRateLimiting(t *testing.T) { } log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) + testConfig := defaultConfig + // this test is conducted locally, so we want to treat all hosts the same for counting incoming requests. + testConfig.DisableLocalhostConnectionRateLimit = false wn := &WebsocketNetwork{ log: log, - config: defaultConfig, + config: testConfig, phonebook: MakePhonebook(1, 1), GenesisID: "go-test-network-genesis", NetworkID: config.Devtestnet, } // increase the IncomingConnectionsLimit/MaxConnectionsPerIP limits, since we don't want to test these. - wn.config.IncomingConnectionsLimit = int(defaultConfig.ConnectionsRateLimitingCount) * 5 - wn.config.MaxConnectionsPerIP += int(defaultConfig.ConnectionsRateLimitingCount) * 5 + wn.config.IncomingConnectionsLimit = int(testConfig.ConnectionsRateLimitingCount) * 5 + wn.config.MaxConnectionsPerIP += int(testConfig.ConnectionsRateLimitingCount) * 5 wn.setup() wn.eventualReadyDelay = time.Second @@ -99,10 +102,10 @@ func TestRateLimiting(t *testing.T) { addrA, postListen := netA.Address() require.Truef(t, postListen, "Listening network failed to start") - noAddressConfig := defaultConfig + noAddressConfig := testConfig noAddressConfig.NetAddress = "" - clientsCount := int(defaultConfig.ConnectionsRateLimitingCount + 5) + clientsCount := int(testConfig.ConnectionsRateLimitingCount + 5) networks := make([]*WebsocketNetwork, clientsCount) phonebooks := make([]Phonebook, clientsCount) @@ -121,7 +124,7 @@ func TestRateLimiting(t *testing.T) { }(networks[i], i) } - deadline := time.Now().Add(time.Duration(defaultConfig.ConnectionsRateLimitingWindowSeconds) * time.Second) + deadline := time.Now().Add(time.Duration(testConfig.ConnectionsRateLimitingWindowSeconds) * time.Second) for i := 0; i < clientsCount; i++ { networks[i].Start() @@ -151,13 +154,13 @@ func TestRateLimiting(t *testing.T) { // wait abit longer. } } - if connectedClients >= int(defaultConfig.ConnectionsRateLimitingCount) { + if connectedClients >= int(testConfig.ConnectionsRateLimitingCount) { timedOut = time.Now().After(deadline) break } } if !timedOut { // test to see that at least some of the clients have seen 429 - require.Equal(t, int(defaultConfig.ConnectionsRateLimitingCount), connectedClients) + require.Equal(t, int(testConfig.ConnectionsRateLimitingCount), connectedClients) } } diff --git a/test/testdata/configs/config-v16.json b/test/testdata/configs/config-v16.json new file mode 100644 index 0000000000..39c96b11a6 --- /dev/null +++ b/test/testdata/configs/config-v16.json @@ -0,0 +1,85 @@ +{ + "Version": 16, + "AccountsRebuildSynchronousMode": 1, + "AnnounceParticipationKey": true, + "Archival": false, + "BaseLoggerDebugLevel": 4, + "BroadcastConnectionsLimit": -1, + "CadaverSizeTarget": 1073741824, + "CatchpointFileHistoryLength": 365, + "CatchpointInterval": 10000, + "CatchpointTracking": 0, + "CatchupBlockDownloadRetryAttempts": 1000, + "CatchupFailurePeerRefreshRate": 10, + "CatchupGossipBlockFetchTimeoutSec": 4, + "CatchupHTTPBlockFetchTimeoutSec": 4, + "CatchupLedgerDownloadRetryAttempts": 50, + "CatchupParallelBlocks": 16, + "ConnectionsRateLimitingCount": 60, + "ConnectionsRateLimitingWindowSeconds": 1, + "DNSBootstrapID": ".algorand.network", + "DNSSecurityFlags": 1, + "DeadlockDetection": 0, + "DisableLocalhostConnectionRateLimit": true, + "DisableOutgoingConnectionThrottling": false, + "EnableAgreementReporting": false, + "EnableAgreementTimeMetrics": false, + "EnableAssembleStats": false, + "EnableBlockService": false, + "EnableCatchupFromArchiveServers": false, + "EnableDeveloperAPI": false, + "EnableGossipBlockService": true, + "EnableIncomingMessageFilter": false, + "EnableLedgerService": false, + "EnableMetricReporting": false, + "EnableOutgoingNetworkMessageFiltering": true, + "EnablePingHandler": true, + "EnableProcessBlockStats": false, + "EnableProfiler": false, + "EnableRequestLogger": false, + "EnableTopAccountsReporting": false, + "EndpointAddress": "127.0.0.1:0", + "FallbackDNSResolverAddress": "", + "ForceRelayMessages": false, + "GossipFanout": 4, + "IncomingConnectionsLimit": 10000, + "IncomingMessageFilterBucketCount": 5, + "IncomingMessageFilterBucketSize": 512, + "IsIndexerActive": false, + "LedgerSynchronousMode": 2, + "LogArchiveMaxAge": "", + "LogArchiveName": "node.archive.log", + "LogSizeLimit": 1073741824, + "MaxCatchpointDownloadDuration": 7200000000000, + "MaxConnectionsPerIP": 30, + "MinCatchpointFileDownloadBytesPerSecond": 20480, + "NetAddress": "", + "NetworkMessageTraceServer": "", + "NetworkProtocolVersion": "", + "NodeExporterListenAddress": ":9100", + "NodeExporterPath": "./node_exporter", + "OptimizeAccountsDatabaseOnStartup": false, + "OutgoingMessageFilterBucketCount": 3, + "OutgoingMessageFilterBucketSize": 128, + "PeerConnectionsUpdateInterval": 3600, + "PeerPingPeriodSeconds": 0, + "PriorityPeers": {}, + "PublicAddress": "", + "ReconnectTime": 60000000000, + "ReservedFDs": 256, + "RestReadTimeoutSeconds": 15, + "RestWriteTimeoutSeconds": 120, + "RunHosted": false, + "SuggestedFeeBlockHistory": 3, + "SuggestedFeeSlidingWindowSize": 50, + "TLSCertFile": "", + "TLSKeyFile": "", + "TelemetryToLog": true, + "TxPoolExponentialIncreaseFactor": 2, + "TxPoolSize": 15000, + "TxSyncIntervalSeconds": 60, + "TxSyncServeResponseSize": 1000000, + "TxSyncTimeoutSeconds": 30, + "UseXForwardedForAddressField": "", + "VerifiedTranscationsCacheSize": 30000 +} From 86118641106d59001d508bfcaffed51dabaed75b Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 11 Mar 2021 21:36:02 -0500 Subject: [PATCH 046/215] add unit test --- network/requestTracker_test.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index e6d8c4ecd1..25fc78f414 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -164,3 +164,14 @@ func TestRateLimiting(t *testing.T) { require.Equal(t, int(testConfig.ConnectionsRateLimitingCount), connectedClients) } } + +func TestIsLocalHost(t *testing.T) { + require.True(t, isLocalhost("localhost")) + require.True(t, isLocalhost("127.0.0.1")) + require.True(t, isLocalhost("[::1]")) + require.True(t, isLocalhost("::1")) + require.False(t, isLocalhost("192.168.0.1")) + require.False(t, isLocalhost("")) + require.False(t, isLocalhost("0.0.0.0")) + require.False(t, isLocalhost("127.0.0.0")) +} From b902b89bd3e0216e0e17e9b7b590b3db5d2c7103 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 12 Mar 2021 10:25:40 -0500 Subject: [PATCH 047/215] Provide a fallback to use the default consensus parameters in case the agreement is running ahead of the ledger. --- agreement/demux.go | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/agreement/demux.go b/agreement/demux.go index 970a3a4c11..ebad85a9c2 100644 --- a/agreement/demux.go +++ b/agreement/demux.go @@ -238,12 +238,19 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat deadlineCh := s.Clock.TimeoutAt(deadline) var fastDeadlineCh <-chan time.Time - proto, err := d.ledger.ConsensusVersion(ParamsRound(currentRound)) - if err == nil && config.Consensus[proto].FastPartitionRecovery { - fastDeadlineCh = s.Clock.TimeoutAt(fastDeadline) + fastPartitionRecoveryEnabled := false + if proto, err := d.ledger.ConsensusVersion(ParamsRound(currentRound)); err != nil { + logging.Base().Warnf("demux: could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err) + // this might happen during catchup, since the Ledger.Wait fires as soon as a new block is recieved by the ledger, which could be + // far before it's being committed. In these cases, it should be safe to default to the current consensus version. On subsequent + // iterations, it will get "corrected" since the ledger would finish flushing the blocks to disk. + fastPartitionRecoveryEnabled = config.Consensus[protocol.ConsensusCurrentVersion].FastPartitionRecovery + } else { + fastPartitionRecoveryEnabled = config.Consensus[proto].FastPartitionRecovery } - if err != nil { - logging.Base().Errorf("could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err) + + if fastPartitionRecoveryEnabled { + fastDeadlineCh = s.Clock.TimeoutAt(fastDeadline) } d.UpdateEventsQueue(eventQueueDemux, 0) From 233275e256372840b6dbf88e9259e6c142c29f3a Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 12 Mar 2021 10:41:25 -0500 Subject: [PATCH 048/215] Improve ConsensusVersion error output to include a correct Committed round. --- data/ledger.go | 6 +++--- ledger/README.md | 3 ++- ledger/blockqueue.go | 4 ++-- ledger/ledger.go | 6 ++++-- ledger/ledger_test.go | 4 ++-- 5 files changed, 13 insertions(+), 10 deletions(-) diff --git a/data/ledger.go b/data/ledger.go index 9da66ac12c..02de26a2ea 100644 --- a/data/ledger.go +++ b/data/ledger.go @@ -322,7 +322,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er return blockhdr.UpgradeState.CurrentProtocol, nil } // try to see if we can figure out what the version would be. - latestRound := l.Latest() + latestCommittedRound, latestRound := l.LatestCommitted() // if the request round was for an older round, then just say the we don't know. if r < latestRound { return "", err @@ -344,7 +344,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er return latestBlockhdr.CurrentProtocol, nil } // otherwise, we can't really tell. - return "", ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestRound} + return "", ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestCommittedRound} } // in this case, we do have a protocol upgrade taking place. if r < latestBlockhdr.NextProtocolSwitchOn { @@ -356,7 +356,7 @@ func (l *Ledger) ConsensusVersion(r basics.Round) (protocol.ConsensusVersion, er if r == latestBlockhdr.NextProtocolSwitchOn && latestBlockhdr.Round >= latestBlockhdr.NextProtocolVoteBefore { return latestBlockhdr.NextProtocol, nil } - err = ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestRound} + err = ledgercore.ErrNoEntry{Round: r, Latest: latestRound, Committed: latestCommittedRound} } // otherwise, we can't really tell what the protocol version would be at round r. return "", err diff --git a/ledger/README.md b/ledger/README.md index 06d3010f34..ec4835bd54 100644 --- a/ledger/README.md +++ b/ledger/README.md @@ -26,7 +26,8 @@ The ledger exposes the following functions for managing the blocks: - `Latest()` returns the last block added to the ledger. -- `LatestCommitted()` returns the last block written to durable storage. +- `LatestCommitted()` returns the last block written to durable storage + as well as the latest block added to the ledger. - `Block(round)` returns the block for `round`, or `ErrNoEntry` if no such block has been added. Similarly, `BlockCert(round)` will return diff --git a/ledger/blockqueue.go b/ledger/blockqueue.go index e23c98e726..25df6043ac 100644 --- a/ledger/blockqueue.go +++ b/ledger/blockqueue.go @@ -173,10 +173,10 @@ func (bq *blockQueue) latest() basics.Round { return bq.lastCommitted + basics.Round(len(bq.q)) } -func (bq *blockQueue) latestCommitted() basics.Round { +func (bq *blockQueue) latestCommitted() (basics.Round, basics.Round) { bq.mu.Lock() defer bq.mu.Unlock() - return bq.lastCommitted + return bq.lastCommitted, bq.lastCommitted + basics.Round(len(bq.q)) } func (bq *blockQueue) putBlock(blk bookkeeping.Block, cert agreement.Certificate) error { diff --git a/ledger/ledger.go b/ledger/ledger.go index f264052ad4..19c477e61f 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -504,8 +504,10 @@ func (l *Ledger) Latest() basics.Round { // LatestCommitted returns the last block round number written to // persistent storage. This block, and all previous blocks, are -// guaranteed to be available after a crash. -func (l *Ledger) LatestCommitted() basics.Round { +// guaranteed to be available after a crash. In addition, it returns +// the latest block added to the ledger ( which will be flushed +// to persistent storage later on ) +func (l *Ledger) LatestCommitted() (basics.Round, basics.Round) { return l.blockQ.latestCommitted() } diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 1080d87b1f..54b2f0a729 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -1409,8 +1409,8 @@ func TestLedgerReload(t *testing.T) { require.NoError(t, err) // if we reloaded it before it got committed, we need to roll back the round counter. - if l.LatestCommitted() != blk.BlockHeader.Round { - blk.BlockHeader.Round = l.LatestCommitted() + if latestCommitted, _ := l.LatestCommitted(); latestCommitted != blk.BlockHeader.Round { + blk.BlockHeader.Round = latestCommitted } } if i%13 == 0 { From 1325a23d51ea65bf789d42c84e8b9a2a0e26b0ab Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 12 Mar 2021 11:40:42 -0500 Subject: [PATCH 049/215] update per reviewer's comments. --- network/requestTracker.go | 10 +++++----- network/requestTracker_test.go | 1 + 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/network/requestTracker.go b/network/requestTracker.go index 724dfd2a99..4d2959a414 100644 --- a/network/requestTracker.go +++ b/network/requestTracker.go @@ -285,11 +285,11 @@ func (rt *RequestTracker) Accept() (conn net.Conn, err error) { rt.hostRequests.pruneRequests(rateLimitingWindowStartTime) originConnections := rt.hostRequests.countOriginConnections(trackerRequest.remoteHost, rateLimitingWindowStartTime) - remoteHostIsNonLocal := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackerRequest.remoteHost)) + rateLimitedRemoteHost := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackerRequest.remoteHost)) connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 // check the number of connections - if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && remoteHostIsNonLocal { + if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && rateLimitedRemoteHost { rt.hostRequestsMu.Unlock() networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_tcp_rate_limit"}) rt.log.With("connection", "tcp").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate") @@ -451,10 +451,10 @@ func (rt *RequestTracker) ServeHTTP(response http.ResponseWriter, request *http. delete(rt.httpConnections, localAddr) }() - remoteHostIsNonLocal := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackedRequest.remoteHost)) + rateLimitedRemoteHost := (!rt.config.DisableLocalhostConnectionRateLimit) || (!isLocalhost(trackedRequest.remoteHost)) connectionLimitEnabled := rt.config.ConnectionsRateLimitingWindowSeconds > 0 && rt.config.ConnectionsRateLimitingCount > 0 - if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && remoteHostIsNonLocal { + if originConnections > rt.config.ConnectionsRateLimitingCount && connectionLimitEnabled && rateLimitedRemoteHost { networkConnectionsDroppedTotal.Inc(map[string]string{"reason": "incoming_connection_per_ip_rate_limit"}) rt.log.With("connection", "http").With("count", originConnections).Debugf("Rejected connection due to excessive connections attempt rate") rt.log.EventWithDetails(telemetryspec.Network, telemetryspec.ConnectPeerFailEvent, @@ -510,7 +510,7 @@ func (rt *RequestTracker) getForwardedConnectionAddress(header http.Header) (ip // isLocalhost returns true if the given host is a localhost address. func isLocalhost(host string) bool { - for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1"} { + for _, v := range []string{"localhost", "127.0.0.1", "[::1]", "::1", "[::]"} { if host == v { return true } diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 25fc78f414..254cd06b70 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -170,6 +170,7 @@ func TestIsLocalHost(t *testing.T) { require.True(t, isLocalhost("127.0.0.1")) require.True(t, isLocalhost("[::1]")) require.True(t, isLocalhost("::1")) + require.True(t, isLocalhost("[::]")) require.False(t, isLocalhost("192.168.0.1")) require.False(t, isLocalhost("")) require.False(t, isLocalhost("0.0.0.0")) From fff5673b45a15d8213304bb65aaed43a6c5c51ad Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 12 Mar 2021 13:42:01 -0500 Subject: [PATCH 050/215] update comments --- ledger/README.md | 2 +- ledger/ledger.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/ledger/README.md b/ledger/README.md index ec4835bd54..999950ee52 100644 --- a/ledger/README.md +++ b/ledger/README.md @@ -27,7 +27,7 @@ The ledger exposes the following functions for managing the blocks: - `Latest()` returns the last block added to the ledger. - `LatestCommitted()` returns the last block written to durable storage - as well as the latest block added to the ledger. + as well as the round of the latest block added to the ledger. - `Block(round)` returns the block for `round`, or `ErrNoEntry` if no such block has been added. Similarly, `BlockCert(round)` will return diff --git a/ledger/ledger.go b/ledger/ledger.go index 19c477e61f..9fc57cbf82 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -505,8 +505,8 @@ func (l *Ledger) Latest() basics.Round { // LatestCommitted returns the last block round number written to // persistent storage. This block, and all previous blocks, are // guaranteed to be available after a crash. In addition, it returns -// the latest block added to the ledger ( which will be flushed -// to persistent storage later on ) +// the latest block round number added to the ledger ( which will be +// flushed to persistent storage later on ) func (l *Ledger) LatestCommitted() (basics.Round, basics.Round) { return l.blockQ.latestCommitted() } From 485ff9edc5a714b90dcf28e45711394c3c43a7d6 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 12 Mar 2021 14:34:34 -0500 Subject: [PATCH 051/215] Add dummy change to trigger github. --- network/requestTracker_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 254cd06b70..067841abc8 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -76,7 +76,7 @@ func TestRateLimiting(t *testing.T) { log := logging.TestingLog(t) log.SetLevel(logging.Level(defaultConfig.BaseLoggerDebugLevel)) testConfig := defaultConfig - // this test is conducted locally, so we want to treat all hosts the same for counting incoming requests. + // This test is conducted locally, so we want to treat all hosts the same for counting incoming requests. testConfig.DisableLocalhostConnectionRateLimit = false wn := &WebsocketNetwork{ log: log, From dbedc9d784fb72da85ed37e356ef059f3bb6995b Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Fri, 12 Mar 2021 15:20:55 -0500 Subject: [PATCH 052/215] make sanity --- daemon/kmd/lib/kmdapi/requests.go | 2 +- data/transactions/logic/doc.go | 5 ++++- data/transactions/logic/doc_test.go | 2 +- test/e2e-go/features/catchup/basicCatchup_test.go | 4 ++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/daemon/kmd/lib/kmdapi/requests.go b/daemon/kmd/lib/kmdapi/requests.go index 0411dddbff..2f541deb4a 100644 --- a/daemon/kmd/lib/kmdapi/requests.go +++ b/daemon/kmd/lib/kmdapi/requests.go @@ -165,7 +165,7 @@ type APIV1POSTTransactionSignRequest struct { // Note: SDK and goal usually generate `SignedTxn` objects // in that case, the field `txn` / `Transaction` of the // generated `SignedTxn` object needs to be used - // + // // swagger:strfmt byte Transaction []byte `json:"transaction"` PublicKey crypto.PublicKey `json:"public_key"` diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 6e3c757fbb..2e8fa52f8f 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -198,6 +198,8 @@ var OpGroupList = []OpGroup{ {"State Access", []string{"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}}, } +// OpCost indicates the cost of an operation over the range of +// LogicVersions from From to To. type OpCost struct { From int To int @@ -226,7 +228,7 @@ func OpAllCosts(opName string) []OpCost { return costs } -// TypeNameDescription contains extra description about a low level +// TypeNameDescriptions contains extra description about a low level // protocol transaction Type string, and provide a friendlier type // constant name in assembler. var TypeNameDescriptions = map[string]string{ @@ -361,6 +363,7 @@ func fieldsDocWithExtra(source map[string]string, ex extractor) map[string]strin return result } +// AssetParamsFieldDocs are notes on fields available in `asset_holding_get` var AssetHoldingFieldDocs = map[string]string{ "AssetBalance": "Amount of the asset unit held by this account", "AssetFrozen": "Is the asset frozen or not", diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go index 343dc69f4f..adb136188c 100644 --- a/data/transactions/logic/doc_test.go +++ b/data/transactions/logic/doc_test.go @@ -27,7 +27,7 @@ func TestOpDocs(t *testing.T) { for _, op := range OpSpecs { opsSeen[op.Name] = false } - for name, _ := range opDocByName { + for name := range opDocByName { _, exists := opsSeen[name] if !exists { t.Errorf("error: doc for op %#v that does not exist in OpSpecs", name) diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index 7eb7a9b0c9..cfe350e298 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -83,7 +83,7 @@ func TestCatchupOverGossip(t *testing.T) { // ledger node upgraded version, fetcher node upgraded version // Run with the default values. Instead of "", pass the default value - // to exercise loading it from the config file. + // to exercise loading it from the config file. runCatchupOverGossip(t, supportedVersions[0], supportedVersions[0]) for i := 1; i < len(supportedVersions); i++ { runCatchupOverGossip(t, supportedVersions[i], "") @@ -111,7 +111,7 @@ func runCatchupOverGossip(t *testing.T, // distribution for catchup so this is fine. fixture.SetupNoStart(t, filepath.Join("nettemplates", "TwoNodes100Second.json")) - if ledgerNodeDowngradeTo != ""{ + if ledgerNodeDowngradeTo != "" { // Force the node to only support v1 dir, err := fixture.GetNodeDir("Node") a.NoError(err) From f6dd31b52827b64d663d732bca218c825c76c98f Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Sat, 13 Mar 2021 19:34:16 -0500 Subject: [PATCH 053/215] Check compile and dryrun porgrams against consensus limits --- cmd/goal/clerk.go | 17 +++++++++++++++++ data/transactions/logic/assembler.go | 5 +++++ 2 files changed, 22 insertions(+) diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index 31a0ce8e89..0768232cf7 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -913,6 +913,17 @@ func assembleFile(fname string) (program []byte) { ops.ReportProblems(fname) reportErrorf("%s: %s", fname, err) } + _, params := getProto(protoVersion) + if ops.HasStatefulOps { + if len(ops.Program) > params.MaxAppProgramLen { + reportErrorf("%s: app program size too large: %d > %d", fname, len(ops.Program), params.MaxAppProgramLen) + } + } else { + if uint64(len(ops.Program)) > params.LogicSigMaxSize { + reportErrorf("%s: logsig program size too large: %d > %d", fname, len(ops.Program), params.LogicSigMaxSize) + } + } + return ops.Program } @@ -1056,11 +1067,17 @@ var dryrunCmd = &cobra.Command{ if txn.Lsig.Blank() { continue } + if uint64(txn.Lsig.Len()) > params.LogicSigMaxSize { + reportErrorf("program size too large: %d > %d", len(txn.Lsig.Logic), params.LogicSigMaxSize) + } ep := logic.EvalParams{Txn: &txn, Proto: ¶ms, GroupIndex: i, TxnGroup: txgroup} cost, err := logic.Check(txn.Lsig.Logic, ep) if err != nil { reportErrorf("program failed Check: %s", err) } + if uint64(cost) > params.LogicSigMaxCost { + reportErrorf("program cost too large: %d > %d", cost, params.LogicSigMaxCost) + } sb := strings.Builder{} ep = logic.EvalParams{ Txn: &txn, diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 99e2d28e3e..ec573c1a16 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -80,6 +80,8 @@ type OpStream struct { // map opcode offsets to source line OffsetToLine map[int]int + + HasStatefulOps bool } // GetVersion returns the LogicSigVersion we're building to @@ -1133,6 +1135,9 @@ func (ops *OpStream) assemble(fin io.Reader) error { if ok { ops.trace("%3d: %s\t", ops.sourceLine, opstring) ops.RecordSourceLine() + if spec.Modes == runModeApplication { + ops.HasStatefulOps = true + } spec.asm(ops, &spec, fields[1:]) ops.trace("\n") continue From e49ac42eb06023eeab6107539459e1e23056d97e Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sun, 14 Mar 2021 15:56:23 -0400 Subject: [PATCH 054/215] Fix random e2e test failuire --- test/e2e-go/features/transactions/accountv2_test.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go index 93fe6caf5c..53cfd19a46 100644 --- a/test/e2e-go/features/transactions/accountv2_test.go +++ b/test/e2e-go/features/transactions/accountv2_test.go @@ -162,9 +162,11 @@ int 1 a.NoError(err) round, err = client.CurrentRound() a.NoError(err) - _, err = client.BroadcastTransaction(signedTxn) + txid, err := client.BroadcastTransaction(signedTxn) a.NoError(err) - client.WaitForRound(round + 2) + // ensure transaction is accepted into a block within 5 rounds. + confirmed := fixture.WaitForAllTxnsToConfirm(round+5, map[string]string{txid: signedTxn.Txn.Sender.String()}) + a.True(confirmed) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) @@ -207,7 +209,7 @@ int 1 a.NoError(err) round, err = client.CurrentRound() a.NoError(err) - _, err = client.BroadcastTransaction(signedTxn) + txid, err = client.BroadcastTransaction(signedTxn) a.NoError(err) _, err = client.WaitForRound(round + 3) a.NoError(err) @@ -215,6 +217,9 @@ int 1 resp, err := client.GetPendingTransactions(2) a.NoError(err) a.Equal(uint64(0), resp.TotalTxns) + txinfo, err := client.TransactionInformation(signedTxn.Txn.Sender.String(), txid) + a.NoError(err) + a.True(txinfo.ConfirmedRound != 0) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) @@ -257,7 +262,7 @@ int 1 a.Equal(uint64(1), value.Uint) // 2 global state update in total, 1 local state updates - checkEvalDelta(t, &client, round, round+5, 2, 1) + checkEvalDelta(t, &client, round+2, round+5, 2, 1) a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) From 612223ccee0583859e0affa9e8d02d7fc309cd20 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Mon, 15 Mar 2021 13:10:11 -0400 Subject: [PATCH 055/215] GPATH-less running of expect tests --- test/e2e-go/cli/goal/expect/README.md | 2 +- test/framework/fixtures/expectFixture.go | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/test/e2e-go/cli/goal/expect/README.md b/test/e2e-go/cli/goal/expect/README.md index 3053632c28..ea3dd026dd 100644 --- a/test/e2e-go/cli/goal/expect/README.md +++ b/test/e2e-go/cli/goal/expect/README.md @@ -56,7 +56,7 @@ There are three (optional) environment variables that can be used to control the set the filter to be `export TESTFILTER=[b,c]ar`. - Defaults to all tests (`.*`). -NOTE: the file name shoud have the suffix: "Test.exp" +NOTE: the file name should have the suffix: "Test.exp" To run the Goal Expect test, run the following command from the top level go-algorand directory: diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go index 4150944093..57687f46d3 100644 --- a/test/framework/fixtures/expectFixture.go +++ b/test/framework/fixtures/expectFixture.go @@ -24,6 +24,7 @@ import ( "path" "path/filepath" "regexp" + "runtime" "strings" "testing" @@ -55,7 +56,9 @@ func (ef *ExpectFixture) initialize(t *testing.T) (err error) { } ef.testDataDir = os.Getenv("TESTDATADIR") if ef.testDataDir == "" { - ef.testDataDir = os.ExpandEnv("${GOPATH}/src/github.com/algorand/go-algorand/test/testdata") + // Default to test/testdata in the source tree being tested + _, path, _, _ := runtime.Caller(0) + ef.testDataDir = filepath.Join(filepath.Dir(path), "../../testdata") } ef.testFilter = os.Getenv("TESTFILTER") From 3e3c2fc91828294e4c88ed58d4b8ebb7cf7d6907 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Mon, 15 Mar 2021 20:43:42 -0400 Subject: [PATCH 056/215] Move messages into messages.go, add expect tests --- cmd/goal/clerk.go | 4 +- cmd/goal/messages.go | 3 + .../cli/goal/expect/tealConsensusTest.exp | 106 ++++++++++++++++++ 3 files changed, 111 insertions(+), 2 deletions(-) create mode 100755 test/e2e-go/cli/goal/expect/tealConsensusTest.exp diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index 0768232cf7..228318fbae 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -916,11 +916,11 @@ func assembleFile(fname string) (program []byte) { _, params := getProto(protoVersion) if ops.HasStatefulOps { if len(ops.Program) > params.MaxAppProgramLen { - reportErrorf("%s: app program size too large: %d > %d", fname, len(ops.Program), params.MaxAppProgramLen) + reportErrorf(tealAppSize, fname, len(ops.Program), params.MaxAppProgramLen) } } else { if uint64(len(ops.Program)) > params.LogicSigMaxSize { - reportErrorf("%s: logsig program size too large: %d > %d", fname, len(ops.Program), params.LogicSigMaxSize) + reportErrorf(tealLogicSigSize, fname, len(ops.Program), params.LogicSigMaxSize) } } diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go index e503f3aa2b..9ba686219f 100644 --- a/cmd/goal/messages.go +++ b/cmd/goal/messages.go @@ -156,6 +156,9 @@ const ( tealsignTooManyArg = "--set-lsig-arg-idx too large, maximum of %d arguments" tealsignInfoWroteSig = "Wrote signature for %s to LSig.Args[%d]" + tealLogicSigSize = "%s: logicsig program size too large: %d > %d" + tealAppSize = "%s: app program size too large: %d > %d" + // Wallet infoRecoveryPrompt = "Please type your recovery mnemonic below, and hit return when you are done: " infoChoosePasswordPrompt = "Please choose a password for wallet '%s': " diff --git a/test/e2e-go/cli/goal/expect/tealConsensusTest.exp b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp new file mode 100755 index 0000000000..1f26e78654 --- /dev/null +++ b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp @@ -0,0 +1,106 @@ +#!/usr/bin/expect -f +set err 0 +log_user 1 + +# put a TEAL program into f, with at least size and cost. +proc teal {f v size cost {prefix ""}} { + set CHAN [open $f w] + puts $CHAN "#pragma version $v\n" + puts $CHAN $prefix + for {set i 5} {$i < $size} {incr i 2} { + puts $CHAN "int 1\npop\n" + } + + if {$cost > [expr $size * 2]} { + puts $CHAN "byte 0x1234\n" + for {set i [expr $size * 2]} {$i < $cost} {incr i 130} { + puts $CHAN "keccak256\n" + } + puts $CHAN "pop\n" + } + puts $CHAN "int 1\n" + close $CHAN +} + +if { [catch { + source goalExpectCommon.exp + set TEST_ALGO_DIR [lindex $argv 0] + set TEST_DATA_DIR [lindex $argv 1] + + puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" + puts "TEST_DATA_DIR: $TEST_DATA_DIR" + + set TIME_STAMP [clock seconds] + + set TEST_ROOT_DIR $TEST_ALGO_DIR/root + set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ + set NETWORK_NAME test_net_expect_$TIME_STAMP + set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50Each.json" + + # Create network + ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + + # Start network + ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR + + + # Test various program length limits during compile + + teal "$TEST_ROOT_DIR/small-sig.teal" 2 100 1 + spawn goal clerk compile "$TEST_ROOT_DIR/small-sig.teal" + expect { + -re {[A-Z2-9]{58}} { set SMALL_SIG $expect_out(0,string) } + "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } + } + + teal "$TEST_ROOT_DIR/big-sig.teal" 2 1001 1 + spawn goal clerk compile "$TEST_ROOT_DIR/big-sig.teal" + expect { + -re {[A-Z2-9]{58}} { ::AlgorandGoal::Abort "hash" } + -re {.*logicsig program size too large} { puts "bigsigcheck: pass" } + "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } + } + + teal "$TEST_ROOT_DIR/barely-fits-app.teal" 2 1001 1 "int 0\nbalance\npop\n" + spawn goal clerk compile "$TEST_ROOT_DIR/barely-fits-app.teal" + expect { + -re {[A-Z2-9]{58}} { puts "hash $expect_out(0,string)" } + "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } + } + + teal "$TEST_ROOT_DIR/big-app.teal" 2 1025 1 "int 0\nbalance\npop\n" + spawn goal clerk compile "$TEST_ROOT_DIR/big-app.teal" + expect { + -re {[A-Z2-9]{58}} { ::AlgorandGoal::Abort "hash" } + -re {.*app program size too large} { puts "bigappcheck: pass" } + "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } + } + + # Test cost limits during dryrun + exec goal clerk send -F "$TEST_ROOT_DIR/small-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/small-sig.tx + spawn goal clerk dryrun -t $TEST_ROOT_DIR/small-sig.tx + expect { + " - pass -" { puts "smallsig dryrun pass" } + "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) } + "too large" { ::AlgorandGoal::Abort $expect_out(buffer) } + } + + teal "$TEST_ROOT_DIR/slow-sig.teal" 2 1 20001 + exec goal clerk compile "$TEST_ROOT_DIR/slow-sig.teal" + exec goal clerk send -F "$TEST_ROOT_DIR/slow-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/slow-sig.tx + spawn goal clerk dryrun -t $TEST_ROOT_DIR/slow-sig.tx + expect { + "program cost too large" {puts "slowsig pass"} + "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } + } + + # Shutdown the network + ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR + + puts "TEAL Consensus Test Successful" + + exit 0 + +} EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in tealConsensusTest: $EXCEPTION" +} From 708f5daf549e209ebf6337f9199ef2e8b24b2370 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Mon, 15 Mar 2021 20:47:41 -0400 Subject: [PATCH 057/215] doc typo --- data/transactions/logic/doc.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 2e8fa52f8f..22dcdb66b9 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -363,7 +363,7 @@ func fieldsDocWithExtra(source map[string]string, ex extractor) map[string]strin return result } -// AssetParamsFieldDocs are notes on fields available in `asset_holding_get` +// AssetHoldingFieldDocs are notes on fields available in `asset_holding_get` var AssetHoldingFieldDocs = map[string]string{ "AssetBalance": "Amount of the asset unit held by this account", "AssetFrozen": "Is the asset frozen or not", From adb8861e2cfc3f03f4678fbfd1e2fed49b905750 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Tue, 16 Mar 2021 09:45:25 -0400 Subject: [PATCH 058/215] enum types for gtxns(a) op --- cmd/opdoc/opdoc.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go index 657866b97a..ef624a0956 100644 --- a/cmd/opdoc/opdoc.go +++ b/cmd/opdoc/opdoc.go @@ -225,13 +225,13 @@ type LanguageSpec struct { } func argEnum(name string) []string { - if name == "txn" || name == "gtxn" { + if name == "txn" || name == "gtxn" || name == "gtxns" { return logic.TxnFieldNames } if name == "global" { return logic.GlobalFieldNames } - if name == "txna" || name == "gtxna" { + if name == "txna" || name == "gtxna" || name == "gtxnsa" { return logic.TxnaFieldNames } if name == "asset_holding_get" { @@ -266,13 +266,13 @@ func typeString(types []logic.StackType) string { } func argEnumTypes(name string) string { - if name == "txn" || name == "gtxn" { + if name == "txn" || name == "gtxn" || name == "gtxns" { return typeString(logic.TxnFieldTypes) } if name == "global" { return typeString(logic.GlobalFieldTypes) } - if name == "txna" || name == "gtxna" { + if name == "txna" || name == "gtxna" || name == "gtxnsa" { return typeString(logic.TxnaFieldTypes) } if name == "asset_holding_get" { From 5e6768fdb80803fd1ec8c4aefb616750a8e95251 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Tue, 16 Mar 2021 12:08:36 -0400 Subject: [PATCH 059/215] Optimize bloom filter memory allocations. --- util/bloom/bloom.go | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index 21ec4454f6..2a94835676 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -18,9 +18,11 @@ const maxHashes = uint32(32) // Filter represents the state of the Bloom filter type Filter struct { - numHashes uint32 - data []byte - prefix [4]byte + numHashes uint32 + data []byte + prefix [4]byte + hashStagingBuffer []uint32 + preimageStagingBuffer []byte } // New creates a new Bloom filter @@ -55,18 +57,24 @@ func Optimal(numElements int, falsePositiveRate float64) (sizeBits int, numHashe // Set marks x as present in the filter func (f *Filter) Set(x []byte) { - withPrefix := append(f.prefix[:], x...) - hs := hash(withPrefix, f.numHashes) + withPrefix := f.preimageStagingBuffer + withPrefix = append(withPrefix, f.prefix[:]...) + withPrefix = append(withPrefix, x...) + hs := f.hash(withPrefix, f.numHashes) n := uint32(len(f.data) * 8) for _, h := range hs { f.set(h % n) } + f.preimageStagingBuffer = withPrefix[:0] } // Test checks whether x is present in the filter func (f *Filter) Test(x []byte) bool { - withPrefix := append(f.prefix[:], x...) - hs := hash(withPrefix, f.numHashes) + withPrefix := f.preimageStagingBuffer + withPrefix = append(withPrefix, f.prefix[:]...) + withPrefix = append(withPrefix, x...) + hs := f.hash(withPrefix, f.numHashes) + f.preimageStagingBuffer = withPrefix[:0] n := uint32(len(f.data) * 8) for _, h := range hs { if !f.test(h % n) { @@ -140,8 +148,14 @@ func UnmarshalJSON(data []byte) (*Filter, error) { // Previously, we used the hashing method described in this paper: // http://www.eecs.harvard.edu/~michaelm/postscripts/rsa2008.pdf // but this gave us bad false positive rates for small bloom filters. -func hash(x []byte, nhash uint32) []uint32 { - res := make([]uint32, nhash+3) +func (f *Filter) hash(x []byte, nhash uint32) []uint32 { + res := f.hashStagingBuffer + if cap(res) < int(nhash+3) { + f.hashStagingBuffer = make([]uint32, nhash+3) + res = f.hashStagingBuffer + } else { + res = res[:nhash+3] + } for i := uint32(0); i < (nhash+3)/4; i++ { h1, h2 := siphash.Hash128(uint64(i), 666666, x) From 8c3e0ef86ef3446d85dd45943a7f0fec051eb8c1 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Tue, 16 Mar 2021 12:28:47 -0400 Subject: [PATCH 060/215] Add unit test to confirm. --- util/bloom/bloom_test.go | 62 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 62 insertions(+) diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go index f755fb62af..04c3e982aa 100644 --- a/util/bloom/bloom_test.go +++ b/util/bloom/bloom_test.go @@ -14,6 +14,8 @@ import ( "testing" "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/crypto" ) func TestBitset(t *testing.T) { @@ -247,3 +249,63 @@ func TestBinaryMarshalLength(t *testing.T) { } } } + +func TestBloomFilterMemoryConsumption(t *testing.T) { + t.Run("Set", func(t *testing.T) { + result := testing.Benchmark(func(b *testing.B) { + // start this test with 10K iterations. + if b.N < 10000 { + b.N = 10000 + } + sizeBits, numHashes := Optimal(b.N, 0.01) + prefix := uint32(0) + bf := New(sizeBits, numHashes, prefix) + + dataset := make([][]byte, b.N) + for n := 0; n < b.N; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + dataset[n] = hash[:] + } + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + bf.Set(dataset[n]) + } + }) + + // make sure the memory allocated is less than 1 byte / iteration. + require.LessOrEqual(t, uint64(result.MemBytes), uint64(result.N)) + }) + t.Run("Test", func(t *testing.T) { + result := testing.Benchmark(func(b *testing.B) { + // start this test with 10K iterations. + if b.N < 10000 { + b.N = 10000 + } + sizeBits, numHashes := Optimal(b.N, 0.01) + prefix := uint32(0) + bf := New(sizeBits, numHashes, prefix) + + dataset := make([][]byte, b.N) + for n := 0; n < b.N; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + dataset[n] = hash[:] + } + + // set half of them. + for n := 0; n < b.N/2; n++ { + bf.Set(dataset[n]) + } + + b.ReportAllocs() + b.ResetTimer() + for n := 0; n < b.N; n++ { + bf.Test(dataset[n]) + } + }) + + // make sure the memory allocated is less than 1 byte / iteration. + require.LessOrEqual(t, uint64(result.MemBytes), uint64(result.N)) + }) +} From e14fa2320becf88d8a849c2eb53b96c9c9d2ffd1 Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Tue, 16 Mar 2021 12:57:54 -0400 Subject: [PATCH 061/215] Update tealdbg frontend for TEAL 3 --- cmd/tealdbg/cdtState.go | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/cmd/tealdbg/cdtState.go b/cmd/tealdbg/cdtState.go index 0462ed119a..8919029a43 100644 --- a/cmd/tealdbg/cdtState.go +++ b/cmd/tealdbg/cdtState.go @@ -362,7 +362,9 @@ func prepareTxn(txn *transactions.Transaction, groupIndex int) []fieldDesc { for field, name := range logic.TxnFieldNames { if field == int(logic.FirstValidTime) || field == int(logic.Accounts) || - field == int(logic.ApplicationArgs) { + field == int(logic.ApplicationArgs) || + field == int(logic.Assets) || + field == int(logic.Applications) { continue } var value string @@ -716,7 +718,7 @@ func makeTxnImpl(txn *transactions.Transaction, groupIndex int, preview bool) (d desc = append(desc, makePrimitive(field)) } - for _, fieldIdx := range []logic.TxnField{logic.ApplicationArgs, logic.Accounts} { + for _, fieldIdx := range []logic.TxnField{logic.ApplicationArgs, logic.Accounts, logic.Assets, logic.Applications} { fieldID := encodeTxnArrayField(groupIndex, int(fieldIdx)) var length int switch logic.TxnField(fieldIdx) { @@ -724,6 +726,10 @@ func makeTxnImpl(txn *transactions.Transaction, groupIndex int, preview bool) (d length = len(txn.Accounts) + 1 case logic.ApplicationArgs: length = len(txn.ApplicationArgs) + case logic.Assets: + length = len(txn.ForeignAssets) + case logic.Applications: + length = len(txn.ForeignApps) + 1 } field := makeArray(logic.TxnFieldNames[fieldIdx], length, fieldID) if preview { @@ -768,6 +774,10 @@ func makeTxnArrayField(s *cdtState, groupIndex int, fieldIdx int) (desc []cdt.Ru length = len(txn.Accounts) + 1 case logic.ApplicationArgs: length = len(txn.ApplicationArgs) + case logic.Assets: + length = len(txn.ForeignAssets) + case logic.Applications: + length = len(txn.ForeignApps) + 1 } elems := txnFieldToArrayFieldDesc(&txn, groupIndex, logic.TxnField(fieldIdx), length) From 0981f0fae5744dea4050e2771477e2104560119d Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Tue, 16 Mar 2021 13:13:55 -0400 Subject: [PATCH 062/215] Add newline to pushbytes disassembly --- data/transactions/logic/assembler.go | 2 +- data/transactions/logic/assembler_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 99e2d28e3e..9d3a78bbef 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -1642,7 +1642,7 @@ func disPushBytes(dis *disassembleState, spec *OpSpec) { return } bytes := dis.program[pos:end] - _, dis.err = fmt.Fprintf(dis.out, "%s 0x%s", spec.Name, hex.EncodeToString(bytes)) + _, dis.err = fmt.Fprintf(dis.out, "%s 0x%s\n", spec.Name, hex.EncodeToString(bytes)) dis.nextpc = int(end) } func checkPushBytes(cx *evalContext) int { diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 762335193a..9865db6424 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -1174,6 +1174,24 @@ func TestDisassembleGtxna(t *testing.T) { require.Equal(t, gtxnaSample, disassembled) } +func TestDisassemblePushConst(t *testing.T) { + t.Parallel() + // check pushint and pushbytes are properly disassembled + intSample := fmt.Sprintf("// version %d\npushint 1\n", AssemblerMaxVersion) + ops, err := AssembleStringWithVersion(intSample, AssemblerMaxVersion) + require.NoError(t, err) + disassembled, err := Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, intSample, disassembled) + + bytesSample := fmt.Sprintf("// version %d\npushbytes 0x01\n", AssemblerMaxVersion) + ops, err = AssembleStringWithVersion(bytesSample, AssemblerMaxVersion) + require.NoError(t, err) + disassembled, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Equal(t, bytesSample, disassembled) +} + func TestDisassembleLastLabel(t *testing.T) { t.Parallel() From 5015c44c2deb18be1beac4f76496eb355ed0f348 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Tue, 16 Mar 2021 13:26:58 -0400 Subject: [PATCH 063/215] improve unit test performance --- util/bloom/bloom_test.go | 57 +++++++++++++++++++++------------------- 1 file changed, 30 insertions(+), 27 deletions(-) diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go index 04c3e982aa..8d6e862e85 100644 --- a/util/bloom/bloom_test.go +++ b/util/bloom/bloom_test.go @@ -252,25 +252,27 @@ func TestBinaryMarshalLength(t *testing.T) { func TestBloomFilterMemoryConsumption(t *testing.T) { t.Run("Set", func(t *testing.T) { + N := 1000000 + sizeBits, numHashes := Optimal(N, 0.01) + prefix := uint32(0) + bf := New(sizeBits, numHashes, prefix) + + dataset := make([][]byte, N) + for n := 0; n < N; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + dataset[n] = hash[:] + } + result := testing.Benchmark(func(b *testing.B) { // start this test with 10K iterations. if b.N < 10000 { b.N = 10000 } - sizeBits, numHashes := Optimal(b.N, 0.01) - prefix := uint32(0) - bf := New(sizeBits, numHashes, prefix) - - dataset := make([][]byte, b.N) - for n := 0; n < b.N; n++ { - hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) - dataset[n] = hash[:] - } b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { - bf.Set(dataset[n]) + bf.Set(dataset[n%N]) } }) @@ -278,30 +280,31 @@ func TestBloomFilterMemoryConsumption(t *testing.T) { require.LessOrEqual(t, uint64(result.MemBytes), uint64(result.N)) }) t.Run("Test", func(t *testing.T) { + N := 1000000 + sizeBits, numHashes := Optimal(N, 0.01) + prefix := uint32(0) + bf := New(sizeBits, numHashes, prefix) + + dataset := make([][]byte, N) + for n := 0; n < N; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + dataset[n] = hash[:] + } + + // set half of them. + for n := 0; n < N/2; n++ { + bf.Set(dataset[n]) + } result := testing.Benchmark(func(b *testing.B) { // start this test with 10K iterations. - if b.N < 10000 { - b.N = 10000 - } - sizeBits, numHashes := Optimal(b.N, 0.01) - prefix := uint32(0) - bf := New(sizeBits, numHashes, prefix) - - dataset := make([][]byte, b.N) - for n := 0; n < b.N; n++ { - hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) - dataset[n] = hash[:] - } - - // set half of them. - for n := 0; n < b.N/2; n++ { - bf.Set(dataset[n]) + if b.N < 1000000 { + b.N = 1000000 } b.ReportAllocs() b.ResetTimer() for n := 0; n < b.N; n++ { - bf.Test(dataset[n]) + bf.Test(dataset[n%N]) } }) From 4e0d19258845ddbfe0c446fe9abe5b9bf4721b31 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy Date: Tue, 16 Mar 2021 16:37:10 -0400 Subject: [PATCH 064/215] tealdbg: listen on specified address --- cmd/tealdbg/main.go | 6 +- cmd/tealdbg/server.go | 4 +- cmd/tealdbg/server_test.go | 2 +- .../e2e-go/cli/tealdbg/expect/tealdbgTest.exp | 84 ++++++------------- 4 files changed, 31 insertions(+), 65 deletions(-) diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go index 96763d3ab1..60a090cccc 100644 --- a/cmd/tealdbg/main.go +++ b/cmd/tealdbg/main.go @@ -135,6 +135,7 @@ var roundNumber uint64 var timestamp int64 var runMode runModeValue = runModeValue{makeCobraStringValue("auto", []string{"signature", "application"})} var port int +var iface string var noFirstRun bool var noBrowserCheck bool var noSourceMap bool @@ -146,6 +147,7 @@ var listenForDrReq bool func init() { rootCmd.PersistentFlags().VarP(&frontend, "frontend", "f", "Frontend to use: "+frontend.AllowedString()) rootCmd.PersistentFlags().IntVar(&port, "remote-debugging-port", 9392, "Port to listen on") + rootCmd.PersistentFlags().StringVar(&iface, "listen", "127.0.0.1", "Network interface to listen on") rootCmd.PersistentFlags().BoolVar(&noFirstRun, "no-first-run", false, "") rootCmd.PersistentFlags().MarkHidden("no-first-run") rootCmd.PersistentFlags().BoolVar(&noBrowserCheck, "no-default-browser-check", false, "") @@ -172,7 +174,7 @@ func init() { } func debugRemote() { - ds := makeDebugServer(port, &frontend, nil) + ds := makeDebugServer(iface, port, &frontend, nil) err := ds.startRemote() if err != nil { log.Fatalln(err.Error()) @@ -277,7 +279,7 @@ func debugLocal(args []string) { ListenForDrReq: listenForDrReq, } - ds := makeDebugServer(port, &frontend, &dp) + ds := makeDebugServer(iface, port, &frontend, &dp) err = ds.startDebug() if err != nil { diff --git a/cmd/tealdbg/server.go b/cmd/tealdbg/server.go index c47208a056..f207285948 100644 --- a/cmd/tealdbg/server.go +++ b/cmd/tealdbg/server.go @@ -90,11 +90,11 @@ type FrontendFactory interface { Make(router *mux.Router, appAddress string) (da DebugAdapter) } -func makeDebugServer(port int, ff FrontendFactory, dp *DebugParams) DebugServer { +func makeDebugServer(iface string, port int, ff FrontendFactory, dp *DebugParams) DebugServer { debugger := MakeDebugger() router := mux.NewRouter() - appAddress := fmt.Sprintf("127.0.0.1:%d", port) + appAddress := fmt.Sprintf("%s:%d", iface, port) da := ff.Make(router, appAddress) debugger.AddAdapter(da) diff --git a/cmd/tealdbg/server_test.go b/cmd/tealdbg/server_test.go index 99ff96af72..aa138b30b3 100644 --- a/cmd/tealdbg/server_test.go +++ b/cmd/tealdbg/server_test.go @@ -111,7 +111,7 @@ func serverTestImpl(t *testing.T, run func(t *testing.T, ds *DebugServer) bool, var ds DebugServer for attempt < 5 && !started { port = rand.Intn(maxPortNum-minPortNum) + minPortNum - ds = makeDebugServer(port, &mockFactory{}, dp) + ds = makeDebugServer("127.0.0.1", port, &mockFactory{}, dp) started = run(t, &ds) attempt++ } diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp index 7be2f90668..49367fec93 100644 --- a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp +++ b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp @@ -2,29 +2,14 @@ set err 0 log_user 1 -if { [catch { - - set TEST_ALGO_DIR [lindex $argv 0] - set timeout 30 - - set TEST_DIR $TEST_ALGO_DIR - exec mkdir -p $TEST_DIR - - set TEAL_PROG_FILE "$TEST_DIR/trivial.teal" - - # this is ConsensusV25 - set PROTOCOL_VERSION_2 "https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466" - - # this is ConsensusV26 - set PROTOCOL_VERSION_3 "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff" - - # run the test using version 2: - - exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE +# workaround for scoping issue in TestTealdbg and setting URL inside expect_background's re scope +set URL "" +proc TestTealdbg { TEAL_PROG_FILE PROTOCOL_VERSION ARGS } { + variable URL set URL "" set PASSED 0 - spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_2 + spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION {*}[lrange $ARGS 0 end] expect_background { timeout { puts "tealdbg debug timed out"; exit 1 } -re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); } @@ -63,53 +48,32 @@ if { [catch { puts "Shutting down tealdbg" close -i $tealdbg_spawn_id - exec rm $TEAL_PROG_FILE +} - # run the test using version 3: +if { [catch { - exec printf "#pragma version 3\nint 1\ndup\n+\n" > $TEAL_PROG_FILE + set TEST_ALGO_DIR [lindex $argv 0] + set timeout 30 - set URL "" - set PASSED 0 - spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_3 - expect_background { - timeout { puts "tealdbg debug timed out"; exit 1 } - -re {CDT debugger listening on: (ws://[.a-z0-9:/]+)} { set URL $expect_out(1,string); } - eof { - catch wait result - if { [lindex $result 3] != 0 } { - puts "returned error code is [lindex $result 3]" - exit 1 - } - } - } - set tealdbg_spawn_id $spawn_id + set TEST_DIR $TEST_ALGO_DIR + exec mkdir -p $TEST_DIR - # wait until URL is set or timeout - set it 0 - while { $it < 10 && $URL == "" } { - set it [expr {$it + 1}] - sleep 1 - } - if { $URL == "" } { - puts "ERROR: URL is not set after timeout" - exit 1 - } + set TEAL_PROG_FILE "$TEST_DIR/trivial.teal" + # this is ConsensusV25 + set PROTOCOL_VERSION_2 "https://github.com/algorandfoundation/specs/tree/bea19289bf41217d2c0af30522fa222ef1366466" - spawn cdtmock $URL - expect { - timeout { puts "cdt-mock debug timed out"; exit 1 } - -re {Debugger.paused} { set PASSED 1; } - eof { catch wait result; if { [lindex $result 3] == 0 } { puts "Expected non-zero exit code"; exit [lindex $result 3] } } - } + # this is ConsensusV26 + set PROTOCOL_VERSION_3 "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff" - if { $PASSED == 0 } { - puts "ERROR: have not found 'Debugger.paused' in cdtmock output" - exit 1 - } + # run the test using version 2: + exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE + TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_2 "" - puts "Shutting down tealdbg" - close -i $tealdbg_spawn_id + # run the test using version 3: + exec printf "#pragma version 3\nint 1\ndup\n+\n" > $TEAL_PROG_FILE + TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_3 "--remote-debugging-port 9392 --listen 127.0.0.1" + + exec rm $TEAL_PROG_FILE } EXCEPTION ] } { puts "ERROR in teadbgTest: $EXCEPTION" From ae5abd19c961c2e56f84bba05b6b01c386e96e8d Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 17 Mar 2021 10:57:13 -0400 Subject: [PATCH 065/215] update per review --- util/bloom/bloom.go | 43 ++++++++++++++++++++-------------------- util/bloom/bloom_test.go | 38 +++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+), 21 deletions(-) diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index 2a94835676..bc498082d8 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -29,10 +29,14 @@ type Filter struct { func New(sizeBits int, numHashes uint32, prefix uint32) *Filter { m := (sizeBits + 7) / 8 filter := Filter{ - numHashes: numHashes, - data: make([]byte, m), + numHashes: numHashes, + data: make([]byte, m), + preimageStagingBuffer: make([]byte, 0, 4+32), + hashStagingBuffer: make([]uint32, numHashes+3), } binary.BigEndian.PutUint32(filter.prefix[:], prefix) + copy(filter.preimageStagingBuffer, filter.prefix[:]) + filter.preimageStagingBuffer = filter.preimageStagingBuffer[:len(filter.prefix)] return &filter } @@ -55,26 +59,29 @@ func Optimal(numElements int, falsePositiveRate float64) (sizeBits int, numHashe return int(math.Ceil(m)), numHashes } +// makePreimage creates the preimage we use for a byte-array before hashing it. +func (f *Filter) makePreimage(x []byte) (preimage []byte) { + preimage = f.preimageStagingBuffer + preimage = append(preimage, x...) + return +} + // Set marks x as present in the filter func (f *Filter) Set(x []byte) { - withPrefix := f.preimageStagingBuffer - withPrefix = append(withPrefix, f.prefix[:]...) - withPrefix = append(withPrefix, x...) - hs := f.hash(withPrefix, f.numHashes) + withPrefix := f.makePreimage(x) + hs := f.hash(withPrefix) + f.preimageStagingBuffer = withPrefix[:len(f.prefix)] n := uint32(len(f.data) * 8) for _, h := range hs { f.set(h % n) } - f.preimageStagingBuffer = withPrefix[:0] } // Test checks whether x is present in the filter func (f *Filter) Test(x []byte) bool { - withPrefix := f.preimageStagingBuffer - withPrefix = append(withPrefix, f.prefix[:]...) - withPrefix = append(withPrefix, x...) - hs := f.hash(withPrefix, f.numHashes) - f.preimageStagingBuffer = withPrefix[:0] + withPrefix := f.makePreimage(x) + hs := f.hash(withPrefix) + f.preimageStagingBuffer = withPrefix[:len(f.prefix)] n := uint32(len(f.data) * 8) for _, h := range hs { if !f.test(h % n) { @@ -148,16 +155,10 @@ func UnmarshalJSON(data []byte) (*Filter, error) { // Previously, we used the hashing method described in this paper: // http://www.eecs.harvard.edu/~michaelm/postscripts/rsa2008.pdf // but this gave us bad false positive rates for small bloom filters. -func (f *Filter) hash(x []byte, nhash uint32) []uint32 { +func (f *Filter) hash(x []byte) []uint32 { res := f.hashStagingBuffer - if cap(res) < int(nhash+3) { - f.hashStagingBuffer = make([]uint32, nhash+3) - res = f.hashStagingBuffer - } else { - res = res[:nhash+3] - } - for i := uint32(0); i < (nhash+3)/4; i++ { + for i := uint32(0); i < (f.numHashes+3)/4; i++ { h1, h2 := siphash.Hash128(uint64(i), 666666, x) res[i*4] = uint32(h1) @@ -166,7 +167,7 @@ func (f *Filter) hash(x []byte, nhash uint32) []uint32 { res[i*4+3] = uint32(h2 >> 32) } - return res[:nhash] + return res[:f.numHashes] } func (f *Filter) test(bit uint32) bool { diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go index 8d6e862e85..dd59a8b9e3 100644 --- a/util/bloom/bloom_test.go +++ b/util/bloom/bloom_test.go @@ -312,3 +312,41 @@ func TestBloomFilterMemoryConsumption(t *testing.T) { require.LessOrEqual(t, uint64(result.MemBytes), uint64(result.N)) }) } + +func BenchmarkBloomFilterSet(b *testing.B) { + bfElements := 1000000 + sizeBits, numHashes := Optimal(bfElements, 0.01) + prefix := uint32(0) + bf := New(sizeBits, numHashes, prefix) + dataset := make([][]byte, bfElements) + for n := 0; n < bfElements; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + dataset[n] = hash[:] + } + + b.ResetTimer() + for x := 0; x < b.N; x++ { + bf.Set(dataset[x%bfElements]) + } +} + +func BenchmarkBloomFilterTest(b *testing.B) { + bfElements := 1000000 + sizeBits, numHashes := Optimal(bfElements, 0.01) + prefix := uint32(0) + bf := New(sizeBits, numHashes, prefix) + dataset := make([][]byte, bfElements) + for n := 0; n < bfElements; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + dataset[n] = hash[:] + } + // set half of them. + for n := 0; n < bfElements/2; n++ { + bf.Set(dataset[n]) + } + + b.ResetTimer() + for x := 0; x < b.N; x++ { + bf.Test(dataset[x%bfElements]) + } +} From 731f711d3d5e8e938a7f3e495be91bb7758b9c18 Mon Sep 17 00:00:00 2001 From: Shiqi Zheng Date: Wed, 17 Mar 2021 09:56:06 -0400 Subject: [PATCH 066/215] skip printing the logs when program is in terminated state. --- test/scripts/e2e_client_runner.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py index 7b1d2d0693..7dd3cf1d4a 100755 --- a/test/scripts/e2e_client_runner.py +++ b/test/scripts/e2e_client_runner.py @@ -113,11 +113,22 @@ def _script_thread_inner(runset, scriptname): runset.running(scriptname, p) timeout = read_script_for_timeout(scriptname) try: + # Popen return code: https://docs.python.org/3/library/subprocess.html#subprocess.Popen.returncode + # 0 means success. A negative value -N indicates that the child was terminated by signal N (POSIX only). retcode = p.wait(timeout) except subprocess.TimeoutExpired as te: sys.stderr.write('{}\n'.format(te)) retcode = -1 dt = time.time() - start + + if retcode != 0: + logger.debug("script file: ", scriptname, "retcode: ", retcode) + + if runset.terminated: + logger.info('Program terminated before %s finishes.', scriptname) + runset.done(scriptname, False, dt) + return + if retcode != 0: with runset.lock: logger.error('%s failed in %f seconds', scriptname, dt) From ec9a1860fdb68b63d4cb9ddb8076537fa33fdfc4 Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Wed, 17 Mar 2021 14:18:41 -0400 Subject: [PATCH 067/215] remove unneccessary lines --- test/scripts/e2e_client_runner.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py index 7dd3cf1d4a..39f4885b8f 100755 --- a/test/scripts/e2e_client_runner.py +++ b/test/scripts/e2e_client_runner.py @@ -113,16 +113,12 @@ def _script_thread_inner(runset, scriptname): runset.running(scriptname, p) timeout = read_script_for_timeout(scriptname) try: - # Popen return code: https://docs.python.org/3/library/subprocess.html#subprocess.Popen.returncode - # 0 means success. A negative value -N indicates that the child was terminated by signal N (POSIX only). retcode = p.wait(timeout) except subprocess.TimeoutExpired as te: sys.stderr.write('{}\n'.format(te)) retcode = -1 dt = time.time() - start - if retcode != 0: - logger.debug("script file: ", scriptname, "retcode: ", retcode) if runset.terminated: logger.info('Program terminated before %s finishes.', scriptname) From 06aa0ba42b21100bef2338cc7e7469017eb3ac30 Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Wed, 17 Mar 2021 15:51:22 -0400 Subject: [PATCH 068/215] Fixes for pending txn endpoint --- daemon/algod/api/server/v2/handlers.go | 14 ++++---- .../algod/api/server/v2/test/handlers_test.go | 34 ++++++++++++++++--- daemon/algod/api/server/v2/test/helpers.go | 3 +- 3 files changed, 38 insertions(+), 13 deletions(-) diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index aaed6a9307..2509761a19 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -572,7 +572,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format return badRequest(ctx, err, errFailedParsingFormatOption, v2.Log) } - txns, err := v2.Node.GetPendingTxnsFromPool() + txnPool, err := v2.Node.GetPendingTxnsFromPool() if err != nil { return internalError(ctx, err, errFailedLookingUpTransactionPool, v2.Log) } @@ -584,10 +584,10 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format } // Convert transactions to msgp / json strings - txnArray := make([]transactions.SignedTxn, 0) - for _, txn := range txns { + topTxns := make([]transactions.SignedTxn, 0) + for _, txn := range txnPool { // break out if we've reached the max number of transactions - if max != nil && uint64(len(txnArray)) >= *max { + if max != nil && *max != 0 && uint64(len(topTxns)) >= *max { break } @@ -596,7 +596,7 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format continue } - txnArray = append(txnArray, txn) + topTxns = append(topTxns, txn) } // Encoding wasn't working well without embedding "real" objects. @@ -604,8 +604,8 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format TopTransactions []transactions.SignedTxn `json:"top-transactions"` TotalTransactions uint64 `json:"total-transactions"` }{ - TopTransactions: txnArray, - TotalTransactions: uint64(len(txnArray)), + TopTransactions: topTxns, + TotalTransactions: uint64(len(txnPool)), } data, err := encode(handle, response) diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go index dcfd4e9a6f..da98d6549a 100644 --- a/daemon/algod/api/server/v2/test/handlers_test.go +++ b/daemon/algod/api/server/v2/test/handlers_test.go @@ -189,21 +189,45 @@ func TestPendingTransactionInformation(t *testing.T) { pendingTransactionInformationTest(t, 0, "bad format", 400) } -func getPendingTransactionsTest(t *testing.T, format string, expectedCode int) { +func getPendingTransactionsTest(t *testing.T, format string, max uint64, expectedCode int) { handler, c, rec, _, _, releasefunc := setupTestForMethodGet(t) defer releasefunc() - params := generatedV2.GetPendingTransactionsParams{Format: &format} + params := generatedV2.GetPendingTransactionsParams{Format: &format, Max: &max} err := handler.GetPendingTransactions(c, params) require.NoError(t, err) require.Equal(t, expectedCode, rec.Code) + if format == "json" && rec.Code == 200 { + var response generatedV2.PendingTransactionsResponse + + data := rec.Body.Bytes() + err = protocol.DecodeJSON(data, &response) + require.NoError(t, err, string(data)) + + if max == 0 || max >= uint64(len(txnPoolGolden)) { + // all pending txns should be returned + require.Equal(t, uint64(len(response.TopTransactions)), uint64(len(txnPoolGolden))) + } else { + // only max txns should be returned + require.Equal(t, uint64(len(response.TopTransactions)), max) + } + + require.Equal(t, response.TotalTransactions, uint64(len(txnPoolGolden))) + require.GreaterOrEqual(t, response.TotalTransactions, uint64(len(response.TopTransactions))) + } } func TestPendingTransactions(t *testing.T) { t.Parallel() - getPendingTransactionsTest(t, "json", 200) - getPendingTransactionsTest(t, "msgpack", 200) - getPendingTransactionsTest(t, "bad format", 400) + getPendingTransactionsTest(t, "json", 0, 200) + getPendingTransactionsTest(t, "json", 1, 200) + getPendingTransactionsTest(t, "json", 2, 200) + getPendingTransactionsTest(t, "json", 3, 200) + getPendingTransactionsTest(t, "msgpack", 0, 200) + getPendingTransactionsTest(t, "msgpack", 1, 200) + getPendingTransactionsTest(t, "msgpack", 2, 200) + getPendingTransactionsTest(t, "msgpack", 3, 200) + getPendingTransactionsTest(t, "bad format", 0, 400) } func pendingTransactionsByAddressTest(t *testing.T, rootkeyToUse int, format string, expectedCode int) { diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index 398788e4ed..206921f2c8 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -74,6 +74,7 @@ var poolAddrResponseGolden = generatedV2.AccountResponse{ AppsTotalSchema: &appsTotalSchema, CreatedApps: &appCreatedApps, } +var txnPoolGolden = make([]transactions.SignedTxn, 2) // ordinarily mockNode would live in `components/mocks` // but doing this would create an import cycle, as mockNode needs @@ -120,7 +121,7 @@ func (m mockNode) GetPendingTransaction(txID transactions.Txid) (res node.TxnWit } func (m mockNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn, error) { - return nil, m.err + return txnPoolGolden, m.err } func (m mockNode) SuggestedFee() basics.MicroAlgos { From a8b19acb1dec2bfcfa2bb439918c2a31445a98c7 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Wed, 17 Mar 2021 17:34:49 -0400 Subject: [PATCH 069/215] nil check fix expand unit test --- util/metrics/tagcounter.go | 7 ++++++- util/metrics/tagcounter_test.go | 13 +++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go index 5d08480394..3b97454f3b 100644 --- a/util/metrics/tagcounter.go +++ b/util/metrics/tagcounter.go @@ -93,6 +93,11 @@ func (tc *TagCounter) Add(tag string, val uint64) { // WriteMetric is part of the Metric interface func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) { + tagptr := tc.tagptr.Load() + if tagptr == nil { + // no values, nothing to say. + return + } // TODO: what to do with "parentLabels"? obsolete part of interface? buf.WriteString("# ") buf.WriteString(tc.Name) @@ -100,7 +105,7 @@ func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) { buf.WriteString(tc.Description) buf.WriteString("\n") isTemplate := strings.Contains(tc.Name, "{TAG}") - tags := tc.tagptr.Load().(map[string]*uint64) + tags := tagptr.(map[string]*uint64) for tag, tagcount := range tags { if tagcount == nil { continue diff --git a/util/metrics/tagcounter_test.go b/util/metrics/tagcounter_test.go index 21215eb3d7..52d0cb349c 100644 --- a/util/metrics/tagcounter_test.go +++ b/util/metrics/tagcounter_test.go @@ -18,8 +18,11 @@ package metrics import ( "fmt" + "strings" "sync" "testing" + + "github.com/stretchr/testify/require" ) func TestTagCounter(t *testing.T) { @@ -34,6 +37,16 @@ func TestTagCounter(t *testing.T) { } tc := NewTagCounter("tc", "wat") + + // check that empty TagCounter cleanly returns no results + var sb strings.Builder + tc.WriteMetric(&sb, "") + require.Equal(t, "", sb.String()) + + result := make(map[string]string) + tc.AddMetric(result) + require.Equal(t, 0, len(result)) + var wg sync.WaitGroup wg.Add(len(tags)) From 344c6914dd05ffa1c2ac88a8b6285857a7c061b7 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Wed, 17 Mar 2021 15:03:05 -0700 Subject: [PATCH 070/215] fix --- cmd/algoh/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go index 0902496a4d..45cb0c9867 100644 --- a/cmd/algoh/main.go +++ b/cmd/algoh/main.go @@ -145,6 +145,7 @@ func main() { } err = cmd.Wait() if err != nil { + captureErrorLogs(algohConfig, errorOutput, output, absolutePath, true) reportErrorf("error waiting for algod: %v", err) } close(done) From 0affc40b7a930bbdc55b78df3c57e39289f7db49 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 17 Mar 2021 21:59:46 -0400 Subject: [PATCH 071/215] bugfix --- util/bloom/bloom.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index bc498082d8..92a044c01e 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -131,6 +131,10 @@ func UnmarshalBinary(data []byte) (*Filter, error) { } copy(f.prefix[:], data[4:8]) f.data = data[8:] + f.preimageStagingBuffer = make([]byte, 0, 4+32) + f.hashStagingBuffer = make([]uint32, f.numHashes+3) + copy(f.preimageStagingBuffer, f.prefix[:]) + f.preimageStagingBuffer = f.preimageStagingBuffer[:len(f.prefix)] return f, nil } From aa648f4295ac268a72e154961abc280639318838 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 23 Feb 2021 00:42:31 -0500 Subject: [PATCH 072/215] Initial draft of universal fetcher impl. --- catchup/fetcher.go | 8 +-- catchup/universalFetcher.go | 80 +++++++++++++++++++++ catchup/universalFetcher_test.go | 116 +++++++++++++++++++++++++++++++ 3 files changed, 200 insertions(+), 4 deletions(-) create mode 100644 catchup/universalFetcher.go create mode 100644 catchup/universalFetcher_test.go diff --git a/catchup/fetcher.go b/catchup/fetcher.go index c1f2be6049..04645b92a6 100644 --- a/catchup/fetcher.go +++ b/catchup/fetcher.go @@ -86,12 +86,12 @@ func MakeNetworkFetcherFactory(net network.GossipNode, peerLimit int, cfg *confi return factory } -// BuildFetcherClients returns a set of clients we can fetch blocks from -func (factory NetworkFetcherFactory) BuildFetcherClients() []FetcherClient { +// buildFetcherClients returns a set of clients we can fetch blocks from +func (factory NetworkFetcherFactory) buildFetcherClients() []FetcherClient { peers := factory.net.GetPeers(network.PeersPhonebookRelays) factory.log.Debugf("%d outgoing peers", len(peers)) if len(peers) == 0 { - factory.log.Warn("no outgoing peers for BuildFetcherClients") + factory.log.Warn("no outgoing peers for buildFetcherClients") return nil } out := make([]FetcherClient, 0, len(peers)) @@ -109,7 +109,7 @@ func (factory NetworkFetcherFactory) New() Fetcher { return &NetworkFetcher{ roundUpperBound: make(map[FetcherClient]basics.Round), activeFetches: make(map[FetcherClient]int), - peers: factory.BuildFetcherClients(), + peers: factory.buildFetcherClients(), log: logging.Base(), } } diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go new file mode 100644 index 0000000000..7b104b880d --- /dev/null +++ b/catchup/universalFetcher.go @@ -0,0 +1,80 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package catchup + +import ( + "context" + "time" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network" +) + +type UniversalFetcher struct { + config config.Local + net network.GossipNode + log logging.Logger +} + +func MakeUniversalFetcher(config config.Local, net network.GossipNode, log logging.Logger) UniversalFetcher { + return UniversalFetcher{ + config: config, + net: net, + log: log} +} + +func (uf *UniversalFetcher) FetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, + cert *agreement.Certificate, downloadDuration time.Duration, err error) { + + httpPeer, validHttpPeer := peer.(network.HTTPPeer) + if validHttpPeer { + fetcher := makeHTTPFetcher(uf.log, httpPeer, uf.net, &uf.config) + blk, cert, downloadDuration, err = uf.fetchBlockHttp(fetcher, ctx, round) + + } else { + fetcher := MakeWsFetcher(uf.log, []network.Peer{peer}, &uf.config) + blk, cert, downloadDuration, err = uf.fetchBlockWs(fetcher, ctx, round) + } + return blk, cert, downloadDuration, err +} + +func (uf *UniversalFetcher) fetchBlockWs(wsf Fetcher, ctx context.Context, round basics.Round) (*bookkeeping.Block, + *agreement.Certificate, time.Duration, error) { + blockDownloadStartTime := time.Now() + blk, cert, client, err := wsf.FetchBlock(ctx, round) + if err != nil { + return nil, nil, time.Duration(0), err + } + client.Close() + downloadDuration := time.Now().Sub(blockDownloadStartTime) + return blk, cert, downloadDuration, nil +} + +func (uf *UniversalFetcher) fetchBlockHttp(hf *HTTPFetcher, ctx context.Context, round basics.Round) (blk *bookkeeping.Block, + cert *agreement.Certificate, dur time.Duration, err error) { + blockDownloadStartTime := time.Now() + blk, cert, err = hf.FetchBlock(ctx, round) + downloadDuration := time.Now().Sub(blockDownloadStartTime) + if err != nil { + return nil, nil, time.Duration(0), err + } + return blk, cert, downloadDuration, err +} diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go new file mode 100644 index 0000000000..2773dfab1f --- /dev/null +++ b/catchup/universalFetcher_test.go @@ -0,0 +1,116 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package catchup + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/rpcs" +) + +// TestUGetBlockWs tests the universal fetcher ws peer case +func TestUGetBlockWs(t *testing.T) { + + cfg := config.GetDefaultLocal() + cfg.EnableCatchupFromArchiveServers = true + + ledger, next, b, err := buildTestLedger(t) + if err != nil { + t.Fatal(err) + return + } + + version := "2.1" + blockServiceConfig := config.GetDefaultLocal() + blockServiceConfig.EnableBlockService = true + + net := &httpTestPeerSource{} + + up := makeTestUnicastPeer(net, version, t) + ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") + ls.Start() + + fetcher := MakeUniversalFetcher(cfg, net, logging.TestingLog(t)) + + var block *bookkeeping.Block + var cert *agreement.Certificate + var duration time.Duration + + block, cert, _, err = fetcher.FetchBlock(context.Background(), next, up) + + require.NoError(t, err) + require.Equal(t, &b, block) + + block, cert, duration, err = fetcher.FetchBlock(context.Background(), next+1, up) + + require.Error(t, errNoBlockForRound, err) + require.Nil(t, block) + require.Nil(t, cert) + require.Equal(t, int64(duration), int64(0)) +} + +// TestUGetBlockHttp tests the universal fetcher http peer case +func TestUGetBlockHttp(t *testing.T) { + + cfg := config.GetDefaultLocal() + cfg.EnableCatchupFromArchiveServers = true + + ledger, next, b, err := buildTestLedger(t) + if err != nil { + t.Fatal(err) + return + } + + blockServiceConfig := config.GetDefaultLocal() + blockServiceConfig.EnableBlockService = true + + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + + net.addPeer(rootURL) + fetcher := MakeUniversalFetcher(cfg, net, logging.TestingLog(t)) + + var block *bookkeeping.Block + var cert *agreement.Certificate + var duration time.Duration + block, cert, duration, err = fetcher.FetchBlock(context.Background(), next, net.GetPeers()[0]) + + require.NoError(t, err) + require.Equal(t, &b, block) + require.Greater(t, int64(duration), int64(0)) + + block, cert, duration, err = fetcher.FetchBlock(context.Background(), next+1, net.GetPeers()[0]) + + require.Error(t, errNoBlockForRound, err) + require.Nil(t, block) + require.Nil(t, cert) + require.Equal(t, int64(duration), int64(0)) +} From feca7eca56d5fa815d1c517ba9ad59a93fa2f180 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 23 Feb 2021 00:56:48 -0500 Subject: [PATCH 073/215] fixing lint --- catchup/universalFetcher.go | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 7b104b880d..703e143ee2 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -28,12 +28,14 @@ import ( "github.com/algorand/go-algorand/network" ) +// UniversalFetcher fetches blocks either from an http peer or ws peer. type UniversalFetcher struct { config config.Local net network.GossipNode log logging.Logger } +// MakeUniversalFetcher returns a fetcher for http and ws peers. func MakeUniversalFetcher(config config.Local, net network.GossipNode, log logging.Logger) UniversalFetcher { return UniversalFetcher{ config: config, @@ -41,22 +43,23 @@ func MakeUniversalFetcher(config config.Local, net network.GossipNode, log loggi log: log} } +// FetchBlock returns a block from the peer. The peer can be either an http or ws peer. func (uf *UniversalFetcher) FetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, err error) { - httpPeer, validHttpPeer := peer.(network.HTTPPeer) - if validHttpPeer { + httpPeer, validHTTPPeer := peer.(network.HTTPPeer) + if validHTTPPeer { fetcher := makeHTTPFetcher(uf.log, httpPeer, uf.net, &uf.config) - blk, cert, downloadDuration, err = uf.fetchBlockHttp(fetcher, ctx, round) + blk, cert, downloadDuration, err = uf.fetchBlockHTTP(ctx, fetcher, round) } else { fetcher := MakeWsFetcher(uf.log, []network.Peer{peer}, &uf.config) - blk, cert, downloadDuration, err = uf.fetchBlockWs(fetcher, ctx, round) + blk, cert, downloadDuration, err = uf.fetchBlockWs(ctx, fetcher, round) } return blk, cert, downloadDuration, err } -func (uf *UniversalFetcher) fetchBlockWs(wsf Fetcher, ctx context.Context, round basics.Round) (*bookkeeping.Block, +func (uf *UniversalFetcher) fetchBlockWs(ctx context.Context, wsf Fetcher, round basics.Round) (*bookkeeping.Block, *agreement.Certificate, time.Duration, error) { blockDownloadStartTime := time.Now() blk, cert, client, err := wsf.FetchBlock(ctx, round) @@ -68,7 +71,7 @@ func (uf *UniversalFetcher) fetchBlockWs(wsf Fetcher, ctx context.Context, round return blk, cert, downloadDuration, nil } -func (uf *UniversalFetcher) fetchBlockHttp(hf *HTTPFetcher, ctx context.Context, round basics.Round) (blk *bookkeeping.Block, +func (uf *UniversalFetcher) fetchBlockHTTP(ctx context.Context, hf *HTTPFetcher, round basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, dur time.Duration, err error) { blockDownloadStartTime := time.Now() blk, cert, err = hf.FetchBlock(ctx, round) From 3ebfe9b346784de93e56b97bd2066583569242b2 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 23 Feb 2021 16:50:50 -0500 Subject: [PATCH 074/215] simplifying the structures used by the fetcher --- catchup/universalFetcher.go | 41 +++++++++++++++----------------- catchup/universalFetcher_test.go | 19 +++++++++++++-- 2 files changed, 36 insertions(+), 24 deletions(-) diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 703e143ee2..580aeb95a4 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -18,6 +18,7 @@ package catchup import ( "context" + "fmt" "time" "github.com/algorand/go-algorand/agreement" @@ -47,37 +48,33 @@ func MakeUniversalFetcher(config config.Local, net network.GossipNode, log loggi func (uf *UniversalFetcher) FetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, err error) { + var fetcherClient FetcherClient httpPeer, validHTTPPeer := peer.(network.HTTPPeer) if validHTTPPeer { - fetcher := makeHTTPFetcher(uf.log, httpPeer, uf.net, &uf.config) - blk, cert, downloadDuration, err = uf.fetchBlockHTTP(ctx, fetcher, round) - + fetcherClient = &HTTPFetcher{ + peer: httpPeer, + rootURL: httpPeer.GetAddress(), + net: uf.net, + client: httpPeer.GetHTTPClient(), + log: uf.log, + config: &uf.config} + } else if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { + fetcherClient = &wsFetcherClient{ + target: wsPeer, + pendingCtxs: make(map[context.Context]context.CancelFunc), + config: &uf.config, + } } else { - fetcher := MakeWsFetcher(uf.log, []network.Peer{peer}, &uf.config) - blk, cert, downloadDuration, err = uf.fetchBlockWs(ctx, fetcher, round) + return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") } - return blk, cert, downloadDuration, err -} -func (uf *UniversalFetcher) fetchBlockWs(ctx context.Context, wsf Fetcher, round basics.Round) (*bookkeeping.Block, - *agreement.Certificate, time.Duration, error) { - blockDownloadStartTime := time.Now() - blk, cert, client, err := wsf.FetchBlock(ctx, round) + fetchedBuf, err := fetcherClient.GetBlockBytes(ctx, round) if err != nil { return nil, nil, time.Duration(0), err } - client.Close() - downloadDuration := time.Now().Sub(blockDownloadStartTime) - return blk, cert, downloadDuration, nil -} - -func (uf *UniversalFetcher) fetchBlockHTTP(ctx context.Context, hf *HTTPFetcher, round basics.Round) (blk *bookkeeping.Block, - cert *agreement.Certificate, dur time.Duration, err error) { - blockDownloadStartTime := time.Now() - blk, cert, err = hf.FetchBlock(ctx, round) - downloadDuration := time.Now().Sub(blockDownloadStartTime) + block, cert, err := processBlockBytes(fetchedBuf, round, fetcherClient.Address()) if err != nil { return nil, nil, time.Duration(0), err } - return blk, cert, downloadDuration, err + return block, cert, downloadDuration, err } diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 2773dfab1f..a7d274f474 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -62,10 +62,12 @@ func TestUGetBlockWs(t *testing.T) { require.NoError(t, err) require.Equal(t, &b, block) + require.GreaterOrEqual(t, int64(duration), int64(0)) block, cert, duration, err = fetcher.FetchBlock(context.Background(), next+1, up) - require.Error(t, errNoBlockForRound, err) + require.Error(t, err) + require.Contains(t, err.Error(), "requested block is not available") require.Nil(t, block) require.Nil(t, cert) require.Equal(t, int64(duration), int64(0)) @@ -105,11 +107,24 @@ func TestUGetBlockHttp(t *testing.T) { require.NoError(t, err) require.Equal(t, &b, block) - require.Greater(t, int64(duration), int64(0)) + require.GreaterOrEqual(t, int64(duration), int64(0)) block, cert, duration, err = fetcher.FetchBlock(context.Background(), next+1, net.GetPeers()[0]) require.Error(t, errNoBlockForRound, err) + require.Contains(t, err.Error(), "No block available for given round") + require.Nil(t, block) + require.Nil(t, cert) + require.Equal(t, int64(duration), int64(0)) +} + +// TestUGetBlockUnsupported tests the handling of an unsupported peer +func TestUGetBlockUnsupported(t *testing.T) { + fetcher := UniversalFetcher{} + peer := "" + block, cert, duration, err := fetcher.FetchBlock(context.Background(), 1, peer) + require.Error(t, err) + require.Contains(t, err.Error(), "FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") require.Nil(t, block) require.Nil(t, cert) require.Equal(t, int64(duration), int64(0)) From ec08e2ff65f4425ed244a55957d86dc4347789ab Mon Sep 17 00:00:00 2001 From: algonautshant Date: Sat, 27 Feb 2021 01:08:26 -0500 Subject: [PATCH 075/215] Use the universal fetcher in fetcher service and fix the tests --- catchup/catchpointService.go | 11 ++-- catchup/pref_test.go | 3 +- catchup/service.go | 64 +++++++++++------------ catchup/service_test.go | 89 ++++++++++++++------------------ catchup/universalFetcher.go | 38 ++++++++++++-- catchup/universalFetcher_test.go | 17 +++--- 6 files changed, 122 insertions(+), 100 deletions(-) diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index efe5777681..ef95fe93fd 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -86,6 +86,8 @@ type CatchpointCatchupService struct { abortCtxFunc context.CancelFunc // blocksDownloadPeerSelector is the peer selector used for downloading blocks. blocksDownloadPeerSelector *peerSelector + // blockFetcherFactory gives a block fetcher + blockFetcherFactory blockFetcherFactory } // MakeResumedCatchpointCatchupService creates a catchpoint catchup service for a node that is already in catchpoint catchup mode @@ -107,7 +109,8 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, }), - } + blockFetcherFactory: makeUniversalBlockFetcherFactory(log, net, cfg)} + service.lastBlockHeader, err = l.BlockHdr(l.Latest()) if err != nil { return nil, err @@ -144,6 +147,7 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, }), + blockFetcherFactory: makeUniversalBlockFetcherFactory(log, net, cfg), } service.lastBlockHeader, err = l.BlockHdr(l.Latest()) if err != nil { @@ -595,9 +599,9 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui } return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock: recurring non-HTTP peer was provided by the peer selector")) } - fetcher := makeHTTPFetcher(cs.log, httpPeer, cs.net, &cs.config) + fetcher := cs.blockFetcherFactory.newBlockFetcher() blockDownloadStartTime := time.Now() - blk, _, err = fetcher.FetchBlock(cs.ctx, round) + blk, _, _, err = fetcher.fetchBlock(cs.ctx, round, httpPeer) if err != nil { if cs.ctx.Err() != nil { return nil, time.Duration(0), peer, true, cs.stopOrAbort() @@ -611,7 +615,6 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock failed after multiple blocks download attempts")) } // success - fetcher.Close() downloadDuration = time.Now().Sub(blockDownloadStartTime) return blk, downloadDuration, peer, false, nil } diff --git a/catchup/pref_test.go b/catchup/pref_test.go index 89ce251188..b960f7c063 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -57,8 +57,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { // Make Service syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil) - syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true}) - + syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true}} b.StartTimer() syncer.sync() b.StopTimer() diff --git a/catchup/service.go b/catchup/service.go index dd83f05b2e..0e187b310c 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -63,7 +63,6 @@ type Service struct { syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops cfg config.Local ledger Ledger - fetcherFactory FetcherFactory ctx context.Context cancel func() done chan struct{} @@ -86,7 +85,10 @@ type Service struct { lastSupportedRound basics.Round unmatchedPendingCertificates <-chan PendingUnmatchedCertificate - latestRoundFetcherFactory FetcherFactory + // blocksDownloadPeerSelector is the peer selector used for downloading blocks. + blocksDownloadPeerSelector *peerSelector + // blockFetcherFactory is gives a block fetcher + blockFetcherFactory blockFetcherFactory } // A BlockAuthenticator authenticates blocks given a certificate. @@ -106,15 +108,20 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode s = &Service{} s.cfg = config - s.fetcherFactory = MakeNetworkFetcherFactory(net, catchupPeersForSync, &config) s.ledger = ledger s.net = net s.auth = auth s.unmatchedPendingCertificates = unmatchedPendingCertificates - s.latestRoundFetcherFactory = MakeNetworkFetcherFactory(net, blockQueryPeerLimit, &config) s.log = log.With("Context", "sync") s.parallelBlocks = config.CatchupParallelBlocks s.deadlineTimeout = agreement.DeadlineTimeout() + s.blocksDownloadPeerSelector = makePeerSelector( + net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) + s.blockFetcherFactory = makeUniversalBlockFetcherFactory(s.log, s.net, s.cfg) return s } @@ -155,7 +162,7 @@ func (s *Service) SynchronizingTime() time.Duration { } // function scope to make a bunch of defer statements better -func (s *Service) innerFetch(fetcher Fetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) { +func (s *Service) innerFetch(fetcher blockFetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { ctx, cf := context.WithCancel(s.ctx) defer cf() stopWaitingForLedgerRound := make(chan struct{}) @@ -167,15 +174,17 @@ func (s *Service) innerFetch(fetcher Fetcher, r basics.Round) (blk *bookkeeping. cf() } }() - return fetcher.FetchBlock(ctx, r) + peer, err := s.blocksDownloadPeerSelector.GetNextPeer() + // xxx check err + return fetcher.fetchBlock(ctx, r, peer) } // fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary. // Returns false if we couldn't fetch or write (i.e., if we failed even after a given number of retries or if we were told to abort.) -func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool { +func (s *Service) fetchAndWrite(fetcher blockFetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool { i := 0 hasLookback := false - for !fetcher.OutOfPeers(r) { + for true { // !fetcher.OutOfPeers(r) { i++ select { case <-s.ctx.Done(): @@ -192,7 +201,7 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple // Try to fetch, timing out after retryInterval - block, cert, client, err := s.innerFetch(fetcher, r) + block, cert, _, err := s.innerFetch(fetcher, r) if err != nil { s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) @@ -223,12 +232,10 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple // Check if this mismatch is due to an unsupported protocol version if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok { s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol) - client.Close() return false } s.log.Warnf("fetchAndWrite(%v): block contents do not match header (attempt %d)", r, i) - client.Close() continue // retry the fetch } @@ -249,7 +256,6 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple err = s.auth.Authenticate(block, cert) if err != nil { s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err) - client.Close() continue // retry the fetch } @@ -304,7 +310,7 @@ func (s *Service) fetchAndWrite(fetcher Fetcher, r basics.Round, prevFetchComple type task func() basics.Round -func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round { +func (s *Service) pipelineCallback(fetcher blockFetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round { return func() basics.Round { fetchResult := s.fetchAndWrite(fetcher, r, prevFetchCompleteChan, lookbackChan) @@ -322,13 +328,7 @@ func (s *Service) pipelineCallback(fetcher Fetcher, r basics.Round, thisFetchCom // TODO the following code does not handle the following case: seedLookback upgrades during fetch func (s *Service) pipelinedFetch(seedLookback uint64) { - fetcher := s.fetcherFactory.NewOverGossip() - defer fetcher.Close() - - // make sure that we have at least one peer - if fetcher.NumPeers() == 0 { - return - } + fetcher := s.blockFetcherFactory.newBlockFetcher() parallelRequests := s.parallelBlocks if parallelRequests < seedLookback { @@ -557,20 +557,19 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) { // TODO this doesn't actually use the digest from cert! func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) { blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest - fetcher := s.latestRoundFetcherFactory.NewOverGossip() - defer func() { - fetcher.Close() - }() + fetcher := s.blockFetcherFactory.newBlockFetcher() for s.ledger.LastRound() < cert.Round { - if fetcher.OutOfPeers(cert.Round) { - fetcher.Close() - // refresh peers and try again - logging.Base().Warn("fetchRound found no outgoing peers") - s.net.RequestConnectOutgoing(true, s.ctx.Done()) - fetcher = s.latestRoundFetcherFactory.NewOverGossip() - } + // xxx this needs to change + /* if fetcher.OutOfPeers(cert.Round) { + fetcher.Close() + // refresh peers and try again + logging.Base().Warn("fetchRound found no outgoing peers") + s.net.RequestConnectOutgoing(true, s.ctx.Done()) + fetcher := MakeUniversalFetcher(s.log, s.net, s.cfg) + // fetcher = s.latestRoundFetcherFactory.NewOverGossip() + }*/ // Ask the fetcher to get the block somehow - block, fetchedCert, rpcc, err := s.innerFetch(fetcher, cert.Round) + block, fetchedCert, _, err := s.innerFetch(fetcher, cert.Round) if err != nil { select { @@ -582,7 +581,6 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy logging.Base().Warnf("fetchRound could not acquire block, fetcher errored out: %v", err) continue } - rpcc.Close() if block.Hash() == blockHash && block.ContentsMatchHeader() { s.ledger.EnsureBlock(block, cert) diff --git a/catchup/service_test.go b/catchup/service_test.go index 4178012764..6a65ab5b6a 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -35,6 +35,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/committee" "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" ) @@ -47,29 +48,6 @@ type MockedFetcherFactory struct { mu deadlock.Mutex } -// a lock just to sync swapping internal fetchers -func makeMockFactory(fetcher *MockedFetcher) *MockedFetcherFactory { - var factory MockedFetcherFactory - factory.fetcher = fetcher - return &factory -} - -func (factory *MockedFetcherFactory) New() Fetcher { - factory.mu.Lock() - defer factory.mu.Unlock() - return factory.fetcher -} - -func (factory *MockedFetcherFactory) NewOverGossip() Fetcher { - return factory.New() -} - -func (factory *MockedFetcherFactory) changeFetcher(fetcher *MockedFetcher) { - factory.mu.Lock() - defer factory.mu.Unlock() - factory.fetcher = fetcher -} - type MockClient struct { once sync.Once closed bool @@ -88,7 +66,24 @@ func (c *MockClient) GetBlockBytes(ctx context.Context, r basics.Round) (data [] return nil, nil } -// Mocked Fetcher +type mockBlockFetcherFactory struct { + mf *MockedFetcher + mu deadlock.Mutex +} + +func (mbff *mockBlockFetcherFactory) newBlockFetcher() blockFetcher { + mbff.mu.Lock() + defer mbff.mu.Unlock() + return mbff.mf +} + +func (mbff *mockBlockFetcherFactory) changeFetcher(fetcher *MockedFetcher) { + mbff.mu.Lock() + defer mbff.mu.Unlock() + mbff.mf = fetcher +} + +// Mocked Fetcher will mock UniversalFetcher type MockedFetcher struct { ledger Ledger timeout bool @@ -99,7 +94,10 @@ type MockedFetcher struct { mu deadlock.Mutex } -func (m *MockedFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error) { +func (m *MockedFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (*bookkeeping.Block, *agreement.Certificate, time.Duration, error) { + if m.OutOfPeers(round) { + return nil, nil, time.Duration(0), nil + } if m.timeout { time.Sleep(time.Duration(config.GetDefaultLocal().CatchupHTTPBlockFetchTimeoutSec)*time.Second + time.Second) } @@ -111,14 +109,14 @@ func (m *MockedFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bo } block, err := m.ledger.Block(round) if round > m.ledger.LastRound() { - return nil, nil, nil, errors.New("no block") + return nil, nil, time.Duration(0), errors.New("no block") } else if err != nil { panic(err) } var cert agreement.Certificate cert.Proposal.BlockDigest = block.Digest() - return &block, &cert, &m.client, nil + return &block, &cert, time.Duration(0), nil } func (m *MockedFetcher) NumPeers() int { @@ -178,7 +176,8 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { // Make Service syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) - syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}) + syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + // syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}) syncer.testStart() syncer.sync() @@ -198,8 +197,7 @@ func TestPeriodicSync(t *testing.T) { s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, auth, nil) s.deadlineTimeout = 2 * time.Second - factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} - s.fetcherFactory = &factory + s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} require.True(t, initialLocalRound < remote.LastRound()) s.Start() @@ -207,7 +205,7 @@ func TestPeriodicSync(t *testing.T) { time.Sleep(s.deadlineTimeout*2 - 200*time.Millisecond) require.Equal(t, initialLocalRound, local.LastRound()) auth.alter(-1, false) - s.fetcherFactory.(*MockedFetcherFactory).changeFetcher(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}) + s.blockFetcherFactory.(*mockBlockFetcherFactory).changeFetcher(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}) time.Sleep(2 * time.Second) // Asserts that the last block is the one we expect @@ -231,11 +229,9 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { // Make Service s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) - factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} - s.fetcherFactory = &factory + s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} // Get last round - require.False(t, factory.fetcher.client.closed) // Start the service ( dummy ) s.testStart() @@ -245,12 +241,10 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { // Asserts that the last block is the one we expect require.Equal(t, lastRoundAtStart+basics.Round(numBlocks), local.LastRound()) - require.False(t, factory.fetcher.client.closed) // Get the same block we wrote - block, _, client, err := factory.New().FetchBlock(context.Background(), lastRoundAtStart+1) + block, _, _, err := s.blockFetcherFactory.newBlockFetcher().fetchBlock(context.Background(), lastRoundAtStart+1, nil) require.NoError(t, err) - require.False(t, client.(*MockClient).closed) //Check we wrote the correct block localBlock, err := local.Block(lastRoundAtStart + 1) @@ -272,8 +266,7 @@ func TestAbruptWrites(t *testing.T) { // Make Service s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil) - factory := MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} - s.fetcherFactory = &factory + s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} var wg sync.WaitGroup wg.Add(1) @@ -309,7 +302,8 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { // Make Service syncer := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil) - syncer.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + fetcher := syncer.blockFetcherFactory.newBlockFetcher() // Start the service ( dummy ) syncer.testStart() @@ -322,9 +316,8 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { for i := basics.Round(1); i <= numberOfBlocks; i++ { // Get the same block we wrote - blk, _, client, err2 := syncer.fetcherFactory.New().FetchBlock(context.Background(), i) + blk, _, _, err2 := fetcher.fetchBlock(context.Background(), i, nil) require.NoError(t, err2) - require.False(t, client.(*MockClient).closed) // Check we wrote the correct block localBlock, err := local.Block(i) @@ -336,19 +329,19 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { func TestServiceFetchBlocksMalformed(t *testing.T) { // Make Ledger - remote, local := testingenv(t, 10) + _, local := testingenv(t, 10) lastRoundAtStart := local.LastRound() // Make Service s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) - s.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} // Start the service ( dummy ) s.testStart() s.sync() require.Equal(t, lastRoundAtStart, local.LastRound()) - require.True(t, s.fetcherFactory.(*MockedFetcherFactory).fetcher.client.closed) + // maybe check all peers/clients are closed here? + //require.True(t, s.fetcherFactory.(*MockedFetcherFactory).fetcher.client.closed) } func TestOnSwitchToUnSupportedProtocol(t *testing.T) { @@ -462,8 +455,7 @@ func helperTestOnSwitchToUnSupportedProtocol( // Make Service s := MakeService(logging.Base(), config, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil) s.deadlineTimeout = 2 * time.Second - - s.fetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} s.Start() defer s.Stop() @@ -648,7 +640,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { // Make Service s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) - s.latestRoundFetcherFactory = &MockedFetcherFactory{fetcher: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} s.testStart() for roundNumber := 2; roundNumber < 10; roundNumber += 3 { pc := &PendingUnmatchedCertificate{ @@ -660,6 +652,5 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { block, _ := remote.Block(basics.Round(roundNumber)) pc.Cert.Proposal.BlockDigest = block.Digest() s.syncCert(pc) - require.True(t, s.latestRoundFetcherFactory.(*MockedFetcherFactory).fetcher.client.closed) } } diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 580aeb95a4..1ff8485130 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -30,22 +30,52 @@ import ( ) // UniversalFetcher fetches blocks either from an http peer or ws peer. -type UniversalFetcher struct { +type universalFetcher struct { config config.Local net network.GossipNode log logging.Logger } +// Fetcher queries the current block of the network, and fetches agreed-upon blocks +type blockFetcher interface { + // fetchBlock fetches a block for a given round. + fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, + cert *agreement.Certificate, downloadDuration time.Duration, err error) +} + +// FetcherFactory creates fetchers +type blockFetcherFactory interface { + // Create a new fetcher + newBlockFetcher() blockFetcher +} + +type universalBlockFetcherFactory struct { + log logging.Logger + net network.GossipNode + config config.Local +} + +func makeUniversalBlockFetcherFactory(log logging.Logger, net network.GossipNode, config config.Local) blockFetcherFactory { + return &universalBlockFetcherFactory{ + log: log, + net: net, + config: config} +} + +func (uff *universalBlockFetcherFactory) newBlockFetcher() blockFetcher { + return makeUniversalBlockFetcher(uff.log, uff.net, uff.config) +} + // MakeUniversalFetcher returns a fetcher for http and ws peers. -func MakeUniversalFetcher(config config.Local, net network.GossipNode, log logging.Logger) UniversalFetcher { - return UniversalFetcher{ +func makeUniversalBlockFetcher(log logging.Logger, net network.GossipNode, config config.Local) blockFetcher { + return &universalFetcher{ config: config, net: net, log: log} } // FetchBlock returns a block from the peer. The peer can be either an http or ws peer. -func (uf *UniversalFetcher) FetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, +func (uf *universalFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, err error) { var fetcherClient FetcherClient diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index a7d274f474..2148679a7f 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -52,19 +52,20 @@ func TestUGetBlockWs(t *testing.T) { ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") ls.Start() - fetcher := MakeUniversalFetcher(cfg, net, logging.TestingLog(t)) + bff := makeUniversalBlockFetcherFactory(logging.TestingLog(t), net, cfg) + fetcher := bff.newBlockFetcher() var block *bookkeeping.Block var cert *agreement.Certificate var duration time.Duration - block, cert, _, err = fetcher.FetchBlock(context.Background(), next, up) + block, cert, _, err = fetcher.fetchBlock(context.Background(), next, up) require.NoError(t, err) require.Equal(t, &b, block) require.GreaterOrEqual(t, int64(duration), int64(0)) - block, cert, duration, err = fetcher.FetchBlock(context.Background(), next+1, up) + block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, up) require.Error(t, err) require.Contains(t, err.Error(), "requested block is not available") @@ -98,18 +99,18 @@ func TestUGetBlockHttp(t *testing.T) { rootURL := nodeA.rootURL() net.addPeer(rootURL) - fetcher := MakeUniversalFetcher(cfg, net, logging.TestingLog(t)) + fetcher := makeUniversalBlockFetcherFactory(logging.TestingLog(t), net, cfg).newBlockFetcher() var block *bookkeeping.Block var cert *agreement.Certificate var duration time.Duration - block, cert, duration, err = fetcher.FetchBlock(context.Background(), next, net.GetPeers()[0]) + block, cert, duration, err = fetcher.fetchBlock(context.Background(), next, net.GetPeers()[0]) require.NoError(t, err) require.Equal(t, &b, block) require.GreaterOrEqual(t, int64(duration), int64(0)) - block, cert, duration, err = fetcher.FetchBlock(context.Background(), next+1, net.GetPeers()[0]) + block, cert, duration, err = fetcher.fetchBlock(context.Background(), next+1, net.GetPeers()[0]) require.Error(t, errNoBlockForRound, err) require.Contains(t, err.Error(), "No block available for given round") @@ -120,9 +121,9 @@ func TestUGetBlockHttp(t *testing.T) { // TestUGetBlockUnsupported tests the handling of an unsupported peer func TestUGetBlockUnsupported(t *testing.T) { - fetcher := UniversalFetcher{} + fetcher := universalFetcher{} peer := "" - block, cert, duration, err := fetcher.FetchBlock(context.Background(), 1, peer) + block, cert, duration, err := fetcher.fetchBlock(context.Background(), 1, peer) require.Error(t, err) require.Contains(t, err.Error(), "FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") require.Nil(t, block) From 61f6204578b9cedb1f498dd170e0df402b4553a1 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Sat, 27 Feb 2021 20:08:07 -0500 Subject: [PATCH 076/215] more cleanup --- catchup/fetcher.go | 35 ------------------- catchup/fetcher_test.go | 56 ------------------------------- catchup/universalFetcher.go | 22 ++++++++++++ catchup/wsFetcher.go | 67 ------------------------------------- rpcs/txSyncer_test.go | 4 --- 5 files changed, 22 insertions(+), 162 deletions(-) diff --git a/catchup/fetcher.go b/catchup/fetcher.go index 04645b92a6..8374ecb8ce 100644 --- a/catchup/fetcher.go +++ b/catchup/fetcher.go @@ -30,8 +30,6 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" - "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/rpcs" ) // Fetcher queries the current block of the network, and fetches agreed-upon blocks @@ -114,20 +112,6 @@ func (factory NetworkFetcherFactory) New() Fetcher { } } -// NewOverGossip returns a fetcher using the given message tag. -// If there are gossip peers, then it returns a fetcher over gossip -// Otherwise, it returns an HTTP fetcher -func (factory NetworkFetcherFactory) NewOverGossip() Fetcher { - gossipPeers := factory.net.GetPeers(network.PeersConnectedIn) - factory.log.Debugf("%d gossip peers", len(gossipPeers)) - if len(gossipPeers) == 0 { - factory.log.Info("no gossip peers for NewOverGossip") - return factory.New() - } - f := MakeWsFetcher(factory.log, gossipPeers, factory.cfg) - return &ComposedFetcher{fetchers: []Fetcher{factory.New(), f}} -} - // NetworkFetcher fetches data from remote RPC clients type NetworkFetcher struct { roundUpperBound map[FetcherClient]basics.Round @@ -289,22 +273,3 @@ func (cf *ComposedFetcher) Close() { /* Utils */ -func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { - var decodedEntry rpcs.EncodedBlockCert - err = protocol.Decode(fetchedBuf, &decodedEntry) - if err != nil { - err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err) - return - } - - if decodedEntry.Block.Round() != r { - err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong block from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Block.Round()) - return - } - - if decodedEntry.Certificate.Round != r { - err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong cert from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Certificate.Round) - return - } - return &decodedEntry.Block, &decodedEntry.Certificate, nil -} diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index fc56eab522..defa711d80 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -848,59 +848,3 @@ func makeTestUnicastPeer(gn network.GossipNode, version string, t *testing.T) ne return &wsp } -// A quick GetBlock over websockets test hitting a mocked websocket server (no actual connection) -func TestGetBlockWS(t *testing.T) { - // test the WS fetcher: - // 1. fetcher sends UniEnsBlockReqTag to http peer - // 2. peer send message to gossip node - // 3. gossip node send message to ledger service - // 4. ledger service responds with UniCatchupResTag sending it back to the http peer - // 5. the http peer send it to the network - // 6. the network send it back to the fetcher - - // start server - ledger, next, b, err := buildTestLedger(t) - if err != nil { - t.Fatal(err) - return - } - - cfg := config.GetDefaultLocal() - - versions := []string{"2.1"} - for _, version := range versions { // range network.SupportedProtocolVersions { - - net := &httpTestPeerSource{} - blockServiceConfig := config.GetDefaultLocal() - blockServiceConfig.CatchupParallelBlocks = 5 - blockServiceConfig.EnableBlockService = true - ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") - - ls.Start() - - up := makeTestUnicastPeer(net, version, t) - net.peers = append(net.peers, up) - - _, ok := net.GetPeers(network.PeersConnectedIn)[0].(network.UnicastPeer) - require.True(t, ok) - factory := MakeNetworkFetcherFactory(net, numberOfPeers, &cfg) - factory.log = logging.TestingLog(t) - fetcher := factory.NewOverGossip() - // we have one peer, the Ws block server - require.Equal(t, fetcher.NumPeers(), 1) - - var block *bookkeeping.Block - var cert *agreement.Certificate - var client FetcherClient - - block, cert, client, err = fetcher.FetchBlock(context.Background(), next) - require.NotNil(t, client) - require.NoError(t, err) - require.Equal(t, &b, block) - if err == nil { - require.NotEqual(t, nil, block) - require.NotEqual(t, nil, cert) - } - fetcher.Close() - } -} diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 1ff8485130..a5f3441ea3 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -27,6 +27,8 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/rpcs" ) // UniversalFetcher fetches blocks either from an http peer or ws peer. @@ -108,3 +110,23 @@ func (uf *universalFetcher) fetchBlock(ctx context.Context, round basics.Round, } return block, cert, downloadDuration, err } + +func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { + var decodedEntry rpcs.EncodedBlockCert + err = protocol.Decode(fetchedBuf, &decodedEntry) + if err != nil { + err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err) + return + } + + if decodedEntry.Block.Round() != r { + err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong block from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Block.Round()) + return + } + + if decodedEntry.Certificate.Round != r { + err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong cert from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Certificate.Round) + return + } + return &decodedEntry.Block, &decodedEntry.Certificate, nil +} diff --git a/catchup/wsFetcher.go b/catchup/wsFetcher.go index 29443b53c0..9140ea8778 100644 --- a/catchup/wsFetcher.go +++ b/catchup/wsFetcher.go @@ -24,80 +24,13 @@ import ( "github.com/algorand/go-deadlock" - "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/rpcs" ) -// Buffer messages from the network to have fewer drops. -const numBufferedInternalMsg = 1 - -// WsFetcher implements Fetcher, getting the block over -// a custom websockets interface (bidirectional). Internally it keeps track -// of multiple peers and handles dropping them appropriately using a NetworkFetcher. -type WsFetcher struct { - f *NetworkFetcher - clients map[network.Peer]*wsFetcherClient - config *config.Local - - // metadata - log logging.Logger - mu deadlock.RWMutex -} - -// MakeWsFetcher creates a fetcher that fetches over the gossip network. -// It instantiates a NetworkFetcher under the hood, -// and demuxes messages appropriately to the corresponding fetcher clients. -func MakeWsFetcher(log logging.Logger, peers []network.Peer, cfg *config.Local) Fetcher { - f := &WsFetcher{ - log: log, - config: cfg, - } - f.clients = make(map[network.Peer]*wsFetcherClient) - p := make([]FetcherClient, len(peers)) - for i, peer := range peers { - fc := &wsFetcherClient{ - target: peer.(network.UnicastPeer), - pendingCtxs: make(map[context.Context]context.CancelFunc), - config: cfg, - } - p[i] = fc - f.clients[peer] = fc - } - f.f = &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: p, - log: f.log, - } - return f -} - -// FetchBlock implements Fetcher interface -func (wsf *WsFetcher) FetchBlock(ctx context.Context, r basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error) { - return wsf.f.FetchBlock(ctx, r) -} - -// OutOfPeers implements Fetcher interface -func (wsf *WsFetcher) OutOfPeers(round basics.Round) bool { - return wsf.f.OutOfPeers(round) -} - -// NumPeers implements Fetcher interface -func (wsf *WsFetcher) NumPeers() int { - return wsf.f.NumPeers() -} - -// Close calls a delegate close fn passed in by the parent of this fetcher -func (wsf *WsFetcher) Close() { - wsf.f.Close() -} - // a stub fetcherClient to satisfy the NetworkFetcher interface type wsFetcherClient struct { target network.UnicastPeer // the peer where we're going to send the request. diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go index de4bc02936..69b517806f 100644 --- a/rpcs/txSyncer_test.go +++ b/rpcs/txSyncer_test.go @@ -31,7 +31,6 @@ import ( "github.com/algorand/go-algorand/components/mocks" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" - "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/logging" @@ -130,9 +129,6 @@ func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txg } return client.client.txgroups, nil } -func (client *mockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { - return nil, nil -} // network.HTTPPeer interface func (client *mockRPCClient) GetAddress() string { From 05328b30d892f690cc3ec803039af6a707ba6c65 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Sun, 28 Feb 2021 14:39:55 -0500 Subject: [PATCH 077/215] Eliminate fetcher.go --- catchup/fetcher.go | 254 ----------------------- catchup/fetcher_test.go | 431 ---------------------------------------- 2 files changed, 685 deletions(-) diff --git a/catchup/fetcher.go b/catchup/fetcher.go index 8374ecb8ce..b2f626a39d 100644 --- a/catchup/fetcher.go +++ b/catchup/fetcher.go @@ -16,260 +16,6 @@ package catchup -import ( - "context" - "errors" - "fmt" - "math/rand" - - "github.com/algorand/go-deadlock" - - "github.com/algorand/go-algorand/agreement" - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/network" -) - -// Fetcher queries the current block of the network, and fetches agreed-upon blocks -type Fetcher interface { - // FetchBlock fetches a block for a given round. - FetchBlock(ctx context.Context, r basics.Round) (*bookkeeping.Block, *agreement.Certificate, FetcherClient, error) - - // Whether the fetcher has anyone available to ask for the block associated with round - OutOfPeers(round basics.Round) bool - - // NumPeers return the number of peers that this fetcher has available - NumPeers() int - - // Close cleans up this fetcher - Close() -} - -// FetcherFactory creates fetchers -type FetcherFactory interface { - // Create a new fetcher - New() Fetcher - // Create a new fetcher that also fetches from backup peers over gossip network utilising given message tag - NewOverGossip() Fetcher -} - -// NetworkFetcherFactory creates network fetchers -type NetworkFetcherFactory struct { - net network.GossipNode - peerLimit int - cfg *config.Local - - log logging.Logger -} - -func (factory NetworkFetcherFactory) makeHTTPFetcherFromPeer(log logging.Logger, peer network.Peer) FetcherClient { - hp, ok := peer.(network.HTTPPeer) - if ok { - return MakeHTTPFetcher(log, hp, factory.net, factory.cfg) - } - log.Errorf("%T %#v is not HTTPPeer", peer, peer) - return nil -} - -// MakeNetworkFetcherFactory returns a network fetcher factory, that associates fetchers with no more than peerLimit peers from the aggregator. -// WSClientSource can be nil, if no network exists to create clients from (defaults to http clients) -func MakeNetworkFetcherFactory(net network.GossipNode, peerLimit int, cfg *config.Local) NetworkFetcherFactory { - var factory NetworkFetcherFactory - factory.net = net - factory.peerLimit = peerLimit - factory.log = logging.Base() - factory.cfg = cfg - return factory -} - -// buildFetcherClients returns a set of clients we can fetch blocks from -func (factory NetworkFetcherFactory) buildFetcherClients() []FetcherClient { - peers := factory.net.GetPeers(network.PeersPhonebookRelays) - factory.log.Debugf("%d outgoing peers", len(peers)) - if len(peers) == 0 { - factory.log.Warn("no outgoing peers for buildFetcherClients") - return nil - } - out := make([]FetcherClient, 0, len(peers)) - for _, peer := range peers { - fetcher := factory.makeHTTPFetcherFromPeer(factory.log, peer) - if fetcher != nil { - out = append(out, fetcher) - } - } - return out -} - -// New returns a new fetcher -func (factory NetworkFetcherFactory) New() Fetcher { - return &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: factory.buildFetcherClients(), - log: logging.Base(), - } -} - -// NetworkFetcher fetches data from remote RPC clients -type NetworkFetcher struct { - roundUpperBound map[FetcherClient]basics.Round - activeFetches map[FetcherClient]int - peers []FetcherClient - mu deadlock.RWMutex - log logging.Logger -} - -func (networkFetcher *NetworkFetcher) availablePeers(round basics.Round) []FetcherClient { - // filter clients who don't claim to have the round we want, and - // return clients that have the fewest active fetches right now. - minActiveFetches := -1 - for client, activeFetches := range networkFetcher.activeFetches { - roundUpperBound, exists := networkFetcher.roundUpperBound[client] - if exists && round >= roundUpperBound { - continue - } - - if minActiveFetches == -1 { - minActiveFetches = activeFetches - } - if activeFetches < minActiveFetches { - minActiveFetches = activeFetches - } - } - - pool := make([]FetcherClient, 0) - for _, client := range networkFetcher.peers { - activeFetches, exists := networkFetcher.activeFetches[client] - if exists && activeFetches > minActiveFetches && minActiveFetches != -1 { - continue - } - if roundUpperBound, exists := networkFetcher.roundUpperBound[client]; !exists || round < roundUpperBound { - // client doesn't have this block - pool = append(pool, client) - } - } - - return pool -} - -func (networkFetcher *NetworkFetcher) selectClient(r basics.Round) (FetcherClient, error) { - networkFetcher.mu.Lock() - defer networkFetcher.mu.Unlock() - - availableClients := networkFetcher.availablePeers(r) - if len(availableClients) == 0 { - return nil, errors.New("no peers to ask") - } - - // select one of the peers at random - i := rand.Uint64() % uint64(len(availableClients)) - client := availableClients[i] - networkFetcher.activeFetches[client] = networkFetcher.activeFetches[client] + 1 - return client, nil -} - -func (networkFetcher *NetworkFetcher) releaseClient(client FetcherClient) { - networkFetcher.mu.Lock() - defer networkFetcher.mu.Unlock() - networkFetcher.activeFetches[client] = networkFetcher.activeFetches[client] - 1 -} - -func (networkFetcher *NetworkFetcher) markPeerLastRound(client FetcherClient, round basics.Round) { - networkFetcher.mu.Lock() - defer networkFetcher.mu.Unlock() - - currentLastRound, hasBound := networkFetcher.roundUpperBound[client] - if !hasBound || currentLastRound > round { - networkFetcher.roundUpperBound[client] = round - } -} - -// FetchBlock returns a block for round r -func (networkFetcher *NetworkFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) { - client, err := networkFetcher.selectClient(r) - if err != nil { - return - } - defer networkFetcher.releaseClient(client) - networkFetcher.log.Infof("networkFetcher.FetchBlock: asking client %v for block %v", client.Address(), r) - - fetchedBuf, err := client.GetBlockBytes(ctx, r) - if err != nil { - networkFetcher.markPeerLastRound(client, r) - err = fmt.Errorf("Peer %v: %v", client.Address(), err) - return - } - block, cert, err := processBlockBytes(fetchedBuf, r, client.Address()) - if err != nil { - networkFetcher.markPeerLastRound(client, r) - return - } - return block, cert, client, nil -} - -// NumPeers return the number of peers that this fetcher has available -func (networkFetcher *NetworkFetcher) NumPeers() int { - networkFetcher.mu.RLock() - defer networkFetcher.mu.RUnlock() - - return len(networkFetcher.peers) -} - -// OutOfPeers returns whether there are any peers that may have the block of a particular round -func (networkFetcher *NetworkFetcher) OutOfPeers(round basics.Round) bool { - networkFetcher.mu.RLock() - defer networkFetcher.mu.RUnlock() - - return len(networkFetcher.availablePeers(round)) == 0 -} - -// Close implements Fetcher. Nothing to clean up here. -func (networkFetcher *NetworkFetcher) Close() {} - -// ComposedFetcher wraps multiple fetchers in some priority order -type ComposedFetcher struct { - fetchers []Fetcher // ordered by priority -} - -// NumPeers implements Fetcher.NumPeers -func (cf *ComposedFetcher) NumPeers() int { - g := 0 - for _, f := range cf.fetchers { - g += f.NumPeers() - } - return g -} - -// OutOfPeers implements Fetcher.OutOfPeers -func (cf *ComposedFetcher) OutOfPeers(round basics.Round) bool { - for _, f := range cf.fetchers { - if !f.OutOfPeers(round) { - return false - } - } - return true -} - -// FetchBlock implements Fetcher.FetchBlock -func (cf *ComposedFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, rpcc FetcherClient, err error) { - for _, f := range cf.fetchers { - if f.OutOfPeers(r) { - continue - } - return f.FetchBlock(ctx, r) - } - err = errors.New("no peers in any fetchers") - return -} - -// Close implements Fetcher.Close -func (cf *ComposedFetcher) Close() { - for _, f := range cf.fetchers { - f.Close() - } -} /* Utils */ diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index defa711d80..8f644e0ef0 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -116,78 +116,6 @@ func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool return &mockClientAggregator{peers: clients} } -func getAllClientsSelectedForRound(t *testing.T, fetcher *NetworkFetcher, round basics.Round) map[FetcherClient]basics.Round { - selected := make(map[FetcherClient]basics.Round, 0) - for i := 0; i < 1000; i++ { - c, err := fetcher.selectClient(round) - if err != nil { - return selected - } - selected[c.(FetcherClient)] = fetcher.roundUpperBound[c] - } - return selected -} - -func TestSelectValidRemote(t *testing.T) { - network := makeMockClientAggregator(t, false, false) - cfg := config.GetDefaultLocal() - factory := MakeNetworkFetcherFactory(network, numberOfPeers, &cfg) - factory.log = logging.TestingLog(t) - fetcher := factory.New() - require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).peers)) - - var oldClient FetcherClient - var newClient FetcherClient - i := 0 - for _, client := range fetcher.(*NetworkFetcher).peers { - if i == 0 { - oldClient = client - r := basics.Round(2) - fetcher.(*NetworkFetcher).roundUpperBound[client] = r - } else if i == 1 { - newClient = client - r := basics.Round(4) - fetcher.(*NetworkFetcher).roundUpperBound[client] = r - } else if i > 2 { - r := basics.Round(3) - fetcher.(*NetworkFetcher).roundUpperBound[client] = r - } // skip i == 2 - i++ - } - - require.Equal(t, numberOfPeers, len(fetcher.(*NetworkFetcher).availablePeers(1))) - selected := getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 1) - require.Equal(t, numberOfPeers, len(selected)) - _, hasOld := selected[oldClient] - require.True(t, hasOld) - - _, hasNew := selected[newClient] - require.True(t, hasNew) - - require.Equal(t, numberOfPeers-1, len(fetcher.(*NetworkFetcher).availablePeers(2))) - selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 2) - require.Equal(t, numberOfPeers-1, len(selected)) - _, hasOld = selected[oldClient] - require.False(t, hasOld) - _, hasNew = selected[newClient] - require.True(t, hasNew) - - require.Equal(t, 2, len(fetcher.(*NetworkFetcher).availablePeers(3))) - selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 3) - require.Equal(t, 2, len(selected)) - _, hasOld = selected[oldClient] - require.False(t, hasOld) - _, hasNew = selected[newClient] - require.True(t, hasNew) - - require.Equal(t, 1, len(fetcher.(*NetworkFetcher).availablePeers(4))) - selected = getAllClientsSelectedForRound(t, fetcher.(*NetworkFetcher), 4) - require.Equal(t, 1, len(selected)) - _, hasOld = selected[oldClient] - require.False(t, hasOld) - _, hasNew = selected[newClient] - require.False(t, hasNew) -} type dummyFetcher struct { failWithNil bool @@ -242,181 +170,6 @@ func (df *dummyFetcher) Close() error { return nil } -func makeDummyFetchers(failWithNil bool, failWithError bool, timeout time.Duration) []FetcherClient { - out := make([]FetcherClient, numberOfPeers) - for i := range out { - out[i] = &dummyFetcher{failWithNil, failWithError, timeout} - } - return out -} - -func TestFetchBlock(t *testing.T) { - fetcher := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(false, false, 100*time.Millisecond), - log: logging.TestingLog(t), - } - - var err error - var block *bookkeeping.Block - var cert *agreement.Certificate - var client FetcherClient - - fetched := false - for i := 0; i < numberOfPeers; i++ { - start := time.Now() - block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.NoError(t, err) - require.NotNil(t, client) - end := time.Now() - require.True(t, end.Sub(start) > 100*time.Millisecond) - require.True(t, end.Sub(start) < 100*time.Millisecond+5*time.Second) // we want to have a higher margin here, as the machine we're running on might be slow. - if err == nil { - require.NotEqual(t, nil, block) - require.NotEqual(t, nil, cert) - _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.NotNil(t, client) - require.NoError(t, err) - fetched = true - } - } - require.True(t, fetched) -} - -func TestFetchBlockFail(t *testing.T) { - fetcher := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(true, false, 100*time.Millisecond), - log: logging.TestingLog(t), - } - - for i := 0; i < numberOfPeers; i++ { - require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers))) - _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.Error(t, err) - } - require.True(t, fetcher.OutOfPeers(basics.Round(numberOfPeers))) -} - -func TestFetchBlockAborted(t *testing.T) { - fetcher := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(false, false, 2*time.Second), - log: logging.TestingLog(t), - } - - ctx, cf := context.WithCancel(context.Background()) - defer cf() - go func() { - cf() - }() - start := time.Now() - _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1)) - end := time.Now() - require.True(t, strings.Contains(err.Error(), context.Canceled.Error())) - require.Nil(t, client) - require.True(t, end.Sub(start) < 10*time.Second) -} - -func TestFetchBlockTimeout(t *testing.T) { - fetcher := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(false, false, 10*time.Second), - log: logging.TestingLog(t), - } - start := time.Now() - ctx, cf := context.WithTimeout(context.Background(), 500*time.Millisecond) - defer cf() - _, _, client, err := fetcher.FetchBlock(ctx, basics.Round(1)) - end := time.Now() - require.True(t, strings.Contains(err.Error(), context.DeadlineExceeded.Error())) - require.Nil(t, client) - require.True(t, end.Sub(start) >= 500*time.Millisecond) - require.True(t, end.Sub(start) < 10*time.Second) -} - -func TestFetchBlockErrorCall(t *testing.T) { - fetcher := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(false, true, 10*time.Millisecond), - log: logging.TestingLog(t), - } - - require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers))) - _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.Error(t, err) - require.Nil(t, client) -} - -func TestFetchBlockComposedNoOp(t *testing.T) { - f := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(false, false, 1*time.Millisecond), - log: logging.TestingLog(t), - } - fetcher := &ComposedFetcher{fetchers: []Fetcher{f, nil}} - - var err error - var block *bookkeeping.Block - var cert *agreement.Certificate - var client FetcherClient - - fetched := false - for i := 0; i < numberOfPeers; i++ { - start := time.Now() - block, cert, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.NoError(t, err) - require.NotNil(t, client) - end := time.Now() - require.True(t, end.Sub(start) >= 1*time.Millisecond) - require.True(t, end.Sub(start) < 1*time.Millisecond+10*time.Second) // we take a very high margin here for the fetcher to complete. - if err == nil { - require.NotEqual(t, nil, block) - require.NotEqual(t, nil, cert) - _, _, client, err = fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.NotNil(t, client) - require.NoError(t, err) - fetched = true - } - } - require.True(t, fetched) -} - -// Make sure composed fetchers are hit in priority order -func TestFetchBlockComposedFail(t *testing.T) { - f := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(true, false, 1*time.Millisecond), - log: logging.TestingLog(t), - } - f2 := &NetworkFetcher{ - roundUpperBound: make(map[FetcherClient]basics.Round), - activeFetches: make(map[FetcherClient]int), - peers: makeDummyFetchers(false, false, 1*time.Millisecond), - log: logging.TestingLog(t), - } - fetcher := &ComposedFetcher{fetchers: []Fetcher{f, f2}} - - for i := 0; i < numberOfPeers; i++ { - require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers))) - _, _, _, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.Error(t, err) - } - require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers))) - for i := 0; i < numberOfPeers; i++ { - require.False(t, fetcher.OutOfPeers(basics.Round(numberOfPeers))) - _, _, client, err := fetcher.FetchBlock(context.Background(), basics.Round(numberOfPeers)) - require.NotNil(t, client) - require.NoError(t, err) - } -} func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) { var user basics.Address @@ -572,54 +325,6 @@ func (s *httpTestPeerSource) addPeer(rootURL string) { s.peers = append(s.peers, &peer) } -// Build a ledger with genesis and one block, start an HTTPServer around it, use NetworkFetcher to fetch the block. -// For smaller test, see blockService_test.go TestGetBlockHTTP -// todo - fix this one -func TestGetBlockHTTP(t *testing.T) { - // start server - ledger, next, b, err := buildTestLedger(t) - if err != nil { - t.Fatal(err) - return - } - net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(config.GetDefaultLocal(), ledger, net, "test genesisID") - - nodeA := basicRPCNode{} - nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) - nodeA.start() - defer nodeA.stop() - rootURL := nodeA.rootURL() - - // run fetcher - net.addPeer(rootURL) - _, ok := net.GetPeers(network.PeersConnectedOut)[0].(network.HTTPPeer) - require.True(t, ok) - cfg := config.GetDefaultLocal() - factory := MakeNetworkFetcherFactory(net, numberOfPeers, &cfg) - factory.log = logging.TestingLog(t) - fetcher := factory.New() - // we have one peer, the HTTP block server - require.Equal(t, len(fetcher.(*NetworkFetcher).peers), 1) - - var block *bookkeeping.Block - var cert *agreement.Certificate - var client FetcherClient - - start := time.Now() - block, cert, client, err = fetcher.FetchBlock(context.Background(), next) - end := time.Now() - require.NotNil(t, client) - require.NoError(t, err) - - require.True(t, end.Sub(start) < 10*time.Second) - require.Equal(t, &b, block) - if err == nil { - require.NotEqual(t, nil, block) - require.NotEqual(t, nil, cert) - } -} - func nodePair() (*basicRPCNode, *basicRPCNode) { nodeA := &basicRPCNode{} nodeA.start() @@ -632,142 +337,6 @@ func nodePair() (*basicRPCNode, *basicRPCNode) { return nodeA, nodeB } -func TestGetBlockMocked(t *testing.T) { - var user basics.Address - user[0] = 123 - - proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesis := make(map[basics.Address]basics.AccountData) - genesis[user] = basics.AccountData{ - Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, - } - genesis[sinkAddr] = basics.AccountData{ - Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, - } - genesis[poolAddr] = basics.AccountData{ - Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, - } - - log := logging.TestingLog(t) - // A network with two nodes, A and B - nodeA, nodeB := nodePair() - defer nodeA.stop() - defer nodeB.stop() - - // A is running the ledger service and will respond to fetch requests - genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr) - const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = true - ledgerA, err := data.LoadLedger( - log.With("name", "A"), t.Name(), inMem, - protocol.ConsensusCurrentVersion, genBal, "", crypto.Digest{}, - nil, cfg, - ) - if err != nil { - t.Errorf("Couldn't make ledger: %v", err) - } - blockServiceConfig := config.GetDefaultLocal() - blockServiceConfig.EnableBlockService = true - rpcs.MakeBlockService(blockServiceConfig, ledgerA, nodeA, "test genesisID") - - next := ledgerA.NextRound() - genHash := crypto.Digest{0x42} - tx := transactions.Transaction{ - Type: protocol.PaymentTx, - Header: transactions.Header{ - Sender: user, - Fee: basics.MicroAlgos{Raw: proto.MinTxnFee}, - FirstValid: next, - LastValid: next, - GenesisHash: genHash, - }, - PaymentTxnFields: transactions.PaymentTxnFields{ - Receiver: user, - Amount: basics.MicroAlgos{Raw: 2}, - }, - } - signedtx := transactions.SignedTxn{ - Txn: tx, - } - - var b bookkeeping.Block - prev, err := ledgerA.Block(ledgerA.LastRound()) - require.NoError(t, err) - b.RewardsLevel = prev.RewardsLevel - b.BlockHeader.Round = next - b.BlockHeader.GenesisHash = genHash - b.CurrentProtocol = protocol.ConsensusCurrentVersion - txib, err := b.EncodeSignedTxn(signedtx, transactions.ApplyData{}) - require.NoError(t, err) - b.Payset = []transactions.SignedTxnInBlock{ - txib, - } - require.NoError(t, ledgerA.AddBlock(b, agreement.Certificate{Round: next})) - - // B tries to fetch block - factory := MakeNetworkFetcherFactory(nodeB, 10, &cfg) - factory.log = logging.TestingLog(t) - nodeBRPC := factory.New() - ctx, cf := context.WithTimeout(context.Background(), time.Second) - defer cf() - eblock, _, _, err := nodeBRPC.FetchBlock(ctx, next) - if err != nil { - require.Failf(t, "Error fetching block", "%v", err) - } - block, err := ledgerA.Block(next) - require.NoError(t, err) - if eblock.Hash() != block.Hash() { - t.Errorf("FetchBlock returned wrong block: expected %v; got %v", block.Hash(), eblock) - } -} - -func TestGetFutureBlock(t *testing.T) { - log := logging.TestingLog(t) - // A network with two nodes, A and B - nodeA, nodeB := nodePair() - defer nodeA.stop() - defer nodeB.stop() - - proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesis := make(map[basics.Address]basics.AccountData) - genesis[sinkAddr] = basics.AccountData{ - Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, - } - genesis[poolAddr] = basics.AccountData{ - Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, - } - - gen := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr) - // A is running the ledger service and will respond to fetch requests - const inMem = true - cfg := config.GetDefaultLocal() - cfg.Archival = true - ledgerA, err := data.LoadLedger( - log.With("name", "A"), t.Name(), inMem, - protocol.ConsensusCurrentVersion, gen, "", crypto.Digest{}, - nil, cfg, - ) - if err != nil { - t.Errorf("Couldn't make ledger: %v", err) - } - rpcs.MakeBlockService(config.GetDefaultLocal(), ledgerA, nodeA, "test genesisID") - - // B tries to fetch block 4 - factory := MakeNetworkFetcherFactory(nodeB, 10, &cfg) - factory.log = logging.TestingLog(t) - nodeBRPC := factory.New() - ctx, cf := context.WithTimeout(context.Background(), time.Second) - defer cf() - _, _, client, err := nodeBRPC.FetchBlock(ctx, ledgerA.NextRound()) - require.Error(t, err) - require.Nil(t, client) -} // implement network.UnicastPeer type testUnicastPeer struct { From 9a68579a5b8d0248e8017eac3470464ee4f38b78 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Wed, 3 Mar 2021 02:34:57 -0500 Subject: [PATCH 078/215] Removed the fetcher factories. Updated some of the catchup service tests. --- catchup/catchpointService.go | 8 +- catchup/fetcher_test.go | 31 +++++-- catchup/httpFetcher.go | 10 --- catchup/pref_test.go | 4 +- catchup/service.go | 31 +++---- catchup/service_test.go | 144 +++++++++++++++++-------------- catchup/universalFetcher.go | 44 ++-------- catchup/universalFetcher_test.go | 7 +- catchup/wsFetcher.go | 15 ---- 9 files changed, 129 insertions(+), 165 deletions(-) diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index ef95fe93fd..8cc06ffaab 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -86,8 +86,6 @@ type CatchpointCatchupService struct { abortCtxFunc context.CancelFunc // blocksDownloadPeerSelector is the peer selector used for downloading blocks. blocksDownloadPeerSelector *peerSelector - // blockFetcherFactory gives a block fetcher - blockFetcherFactory blockFetcherFactory } // MakeResumedCatchpointCatchupService creates a catchpoint catchup service for a node that is already in catchpoint catchup mode @@ -109,8 +107,7 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, }), - blockFetcherFactory: makeUniversalBlockFetcherFactory(log, net, cfg)} - + } service.lastBlockHeader, err = l.BlockHdr(l.Latest()) if err != nil { return nil, err @@ -147,7 +144,6 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, }), - blockFetcherFactory: makeUniversalBlockFetcherFactory(log, net, cfg), } service.lastBlockHeader, err = l.BlockHdr(l.Latest()) if err != nil { @@ -599,7 +595,7 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui } return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock: recurring non-HTTP peer was provided by the peer selector")) } - fetcher := cs.blockFetcherFactory.newBlockFetcher() + fetcher := makeUniversalBlockFetcher(cs.log, cs.net, cs.config) blockDownloadStartTime := time.Now() blk, _, _, err = fetcher.fetchBlock(cs.ctx, round, httpPeer) if err != nil { diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index 8f644e0ef0..a12badb138 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -116,7 +116,6 @@ func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool return &mockClientAggregator{peers: clients} } - type dummyFetcher struct { failWithNil bool failWithError bool @@ -170,7 +169,6 @@ func (df *dummyFetcher) Close() error { return nil } - func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) { var user basics.Address user[0] = 123 @@ -179,15 +177,15 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo genesis := make(map[basics.Address]basics.AccountData) genesis[user] = basics.AccountData{ Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, + MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000}, } genesis[sinkAddr] = basics.AccountData{ Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, + MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000}, } genesis[poolAddr] = basics.AccountData{ Status: basics.Online, - MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2}, + MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000}, } log := logging.TestingLog(t) @@ -225,6 +223,7 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo prev, err := ledger.Block(ledger.LastRound()) require.NoError(t, err) + b.BlockHeader.RewardsState.RewardsPool = poolAddr b.RewardsLevel = prev.RewardsLevel b.BlockHeader.Round = next b.BlockHeader.GenesisHash = genHash @@ -234,11 +233,29 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo b.Payset = []transactions.SignedTxnInBlock{ txib, } - + b.TxnRoot, err = b.PaysetCommit() + require.NoError(t, err) require.NoError(t, ledger.AddBlock(b, agreement.Certificate{Round: next})) return } +func addBlocks(t *testing.T, ledger *data.Ledger, blk bookkeeping.Block, numBlocks int) { + var err error + for i := 0; i < numBlocks; i++ { + blk.BlockHeader.Round++ + blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000) + blk.TxnRoot, err = blk.PaysetCommit() + require.NoError(t, err) + + err := ledger.AddBlock(blk, agreement.Certificate{Round: blk.BlockHeader.Round}) + require.NoError(t, err) + + hdr, err := ledger.BlockHdr(blk.BlockHeader.Round) + require.NoError(t, err) + require.Equal(t, blk.BlockHeader, hdr) + } +} + type basicRPCNode struct { listener net.Listener server http.Server @@ -337,7 +354,6 @@ func nodePair() (*basicRPCNode, *basicRPCNode) { return nodeA, nodeB } - // implement network.UnicastPeer type testUnicastPeer struct { gn network.GossipNode @@ -416,4 +432,3 @@ func makeTestUnicastPeer(gn network.GossipNode, version string, t *testing.T) ne wsp.responseChannels = make(map[uint64]chan *network.Response) return &wsp } - diff --git a/catchup/httpFetcher.go b/catchup/httpFetcher.go index 8d06b2225d..87c9e4ba4b 100644 --- a/catchup/httpFetcher.go +++ b/catchup/httpFetcher.go @@ -43,7 +43,6 @@ var errNoBlockForRound = errors.New("No block available for given round") type FetcherClient interface { GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) Address() string - Close() error } // HTTPFetcher implements FetcherClient doing an HTTP GET of the block @@ -144,15 +143,6 @@ func (hf *HTTPFetcher) Address() string { return hf.rootURL } -// Close is part of FetcherClient interface -// -// Does nothing, leaves underlying client open because other HTTP -// requests from other interfaces could be open on it. Somewhere a -// Peer owns that connection and will close as needed. -func (hf *HTTPFetcher) Close() error { - return nil -} - // FetchBlock is a copy of the functionality in NetworkFetcher.FetchBlock, designed to complete // the HTTPFetcher functionality as a standalone fetcher func (hf *HTTPFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { diff --git a/catchup/pref_test.go b/catchup/pref_test.go index b960f7c063..aa5cae5792 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -20,7 +20,7 @@ import ( "math/rand" "strconv" "testing" - "time" + // "time" "github.com/stretchr/testify/require" @@ -57,7 +57,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { // Make Service syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil) - syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true}} + // syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true}} b.StartTimer() syncer.sync() b.StopTimer() diff --git a/catchup/service.go b/catchup/service.go index 0e187b310c..5d881948d6 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -87,8 +87,6 @@ type Service struct { // blocksDownloadPeerSelector is the peer selector used for downloading blocks. blocksDownloadPeerSelector *peerSelector - // blockFetcherFactory is gives a block fetcher - blockFetcherFactory blockFetcherFactory } // A BlockAuthenticator authenticates blocks given a certificate. @@ -121,7 +119,6 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, }) - s.blockFetcherFactory = makeUniversalBlockFetcherFactory(s.log, s.net, s.cfg) return s } @@ -162,7 +159,7 @@ func (s *Service) SynchronizingTime() time.Duration { } // function scope to make a bunch of defer statements better -func (s *Service) innerFetch(fetcher blockFetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { +func (s *Service) innerFetch(fetcher *universalBlockFetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { ctx, cf := context.WithCancel(s.ctx) defer cf() stopWaitingForLedgerRound := make(chan struct{}) @@ -175,16 +172,18 @@ func (s *Service) innerFetch(fetcher blockFetcher, r basics.Round) (blk *bookkee } }() peer, err := s.blocksDownloadPeerSelector.GetNextPeer() - // xxx check err + if err != nil { + return nil, nil, time.Duration(0), err + } return fetcher.fetchBlock(ctx, r, peer) } // fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary. // Returns false if we couldn't fetch or write (i.e., if we failed even after a given number of retries or if we were told to abort.) -func (s *Service) fetchAndWrite(fetcher blockFetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool { +func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool { i := 0 hasLookback := false - for true { // !fetcher.OutOfPeers(r) { + for true { i++ select { case <-s.ctx.Done(): @@ -204,6 +203,9 @@ func (s *Service) fetchAndWrite(fetcher blockFetcher, r basics.Round, prevFetchC block, cert, _, err := s.innerFetch(fetcher, r) if err != nil { + if err == errPeerSelectorNoPeerPoolsAvailable { + break + } s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) // we've just failed to retrieve a block; wait until the previous block is fetched before trying again // to avoid the usecase where the first block doesn't exists and we're making many requests down the chain @@ -310,7 +312,7 @@ func (s *Service) fetchAndWrite(fetcher blockFetcher, r basics.Round, prevFetchC type task func() basics.Round -func (s *Service) pipelineCallback(fetcher blockFetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round { +func (s *Service) pipelineCallback(fetcher *universalBlockFetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round { return func() basics.Round { fetchResult := s.fetchAndWrite(fetcher, r, prevFetchCompleteChan, lookbackChan) @@ -328,7 +330,7 @@ func (s *Service) pipelineCallback(fetcher blockFetcher, r basics.Round, thisFet // TODO the following code does not handle the following case: seedLookback upgrades during fetch func (s *Service) pipelinedFetch(seedLookback uint64) { - fetcher := s.blockFetcherFactory.newBlockFetcher() + fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) parallelRequests := s.parallelBlocks if parallelRequests < seedLookback { @@ -557,17 +559,8 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) { // TODO this doesn't actually use the digest from cert! func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) { blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest - fetcher := s.blockFetcherFactory.newBlockFetcher() + fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) for s.ledger.LastRound() < cert.Round { - // xxx this needs to change - /* if fetcher.OutOfPeers(cert.Round) { - fetcher.Close() - // refresh peers and try again - logging.Base().Warn("fetchRound found no outgoing peers") - s.net.RequestConnectOutgoing(true, s.ctx.Done()) - fetcher := MakeUniversalFetcher(s.log, s.net, s.cfg) - // fetcher = s.latestRoundFetcherFactory.NewOverGossip() - }*/ // Ask the fetcher to get the block somehow block, fetchedCert, _, err := s.innerFetch(fetcher, cert.Round) diff --git a/catchup/service_test.go b/catchup/service_test.go index 6a65ab5b6a..ae4b906ddf 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -20,7 +20,7 @@ import ( "context" "errors" "math/rand" - "sync" + // "sync" "testing" "time" @@ -28,7 +28,7 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/agreement" - "github.com/algorand/go-algorand/components/mocks" + // "github.com/algorand/go-algorand/components/mocks" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" @@ -37,58 +37,21 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/rpcs" ) var defaultConfig = config.GetDefaultLocal() var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} -type MockedFetcherFactory struct { - fetcher *MockedFetcher - mu deadlock.Mutex -} - -type MockClient struct { - once sync.Once - closed bool -} - -func (*MockClient) Address() string { - return "mock.address." -} -func (c *MockClient) Close() error { - c.once.Do(func() { - c.closed = true - }) - return nil -} -func (c *MockClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { - return nil, nil -} - -type mockBlockFetcherFactory struct { - mf *MockedFetcher - mu deadlock.Mutex -} -func (mbff *mockBlockFetcherFactory) newBlockFetcher() blockFetcher { - mbff.mu.Lock() - defer mbff.mu.Unlock() - return mbff.mf -} -func (mbff *mockBlockFetcherFactory) changeFetcher(fetcher *MockedFetcher) { - mbff.mu.Lock() - defer mbff.mu.Unlock() - mbff.mf = fetcher -} // Mocked Fetcher will mock UniversalFetcher type MockedFetcher struct { ledger Ledger timeout bool tries map[basics.Round]int - client MockClient latency time.Duration predictable bool mu deadlock.Mutex @@ -166,51 +129,82 @@ func (auth *mockedAuthenticator) alter(errorRound int, fail bool) { } func TestServiceFetchBlocksSameRange(t *testing.T) { - // Make Ledger - remote, local := testingenv(t, 10) + // Make Ledgers + _, local := testingenv(t, 10) + + remote, _, blk, err := buildTestLedger(t) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, 10) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") - require.NotNil(t, remote) - require.NotNil(t, local) + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) - net := &mocks.MockNetwork{} // Make Service syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) - syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} - // syncer.fetcherFactory = makeMockFactory(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}) syncer.testStart() syncer.sync() - - require.Equal(t, remote.LastRound(), local.LastRound()) + rr, lr := remote.LastRound(), local.LastRound() + require.Equal(t, rr, lr) } + func TestPeriodicSync(t *testing.T) { // Make Ledger - remote, local := testingenv(t, 10) + _, local := testingenv(t, 10) + + remote, _, blk, err := buildTestLedger(t) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, 10) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + auth := &mockedAuthenticator{fail: true} initialLocalRound := local.LastRound() require.True(t, 0 == initialLocalRound) - // Make Service - s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, auth, nil) + // Make Service + s := MakeService(logging.Base(), defaultConfig, net, local, auth, nil) s.deadlineTimeout = 2 * time.Second - s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} - require.True(t, initialLocalRound < remote.LastRound()) - s.Start() defer s.Stop() time.Sleep(s.deadlineTimeout*2 - 200*time.Millisecond) require.Equal(t, initialLocalRound, local.LastRound()) auth.alter(-1, false) - s.blockFetcherFactory.(*mockBlockFetcherFactory).changeFetcher(&MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}) time.Sleep(2 * time.Second) // Asserts that the last block is the one we expect - require.Equal(t, remote.LastRound(), local.LastRound()) - for r := basics.Round(0); r < remote.LastRound(); r++ { + rr, lr := remote.LastRound(), local.LastRound() + require.Equal(t, rr, lr) + for r := basics.Round(1); r < remote.LastRound(); r++ { localBlock, err := local.Block(r) require.NoError(t, err) remoteBlock, err := remote.Block(r) @@ -222,14 +216,30 @@ func TestPeriodicSync(t *testing.T) { func TestServiceFetchBlocksOneBlock(t *testing.T) { // Make Ledger numBlocks := 10 - remote, local := testingenv(t, numBlocks) + _, local := testingenv(t, numBlocks) lastRoundAtStart := local.LastRound() - net := &mocks.MockNetwork{} + remote, _, blk, err := buildTestLedger(t) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, numBlocks-1) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) // Make Service s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) - s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} // Get last round @@ -243,7 +253,10 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { require.Equal(t, lastRoundAtStart+basics.Round(numBlocks), local.LastRound()) // Get the same block we wrote - block, _, _, err := s.blockFetcherFactory.newBlockFetcher().fetchBlock(context.Background(), lastRoundAtStart+1, nil) + block, _, _, err := makeUniversalBlockFetcher(logging.Base(), + net, + defaultConfig).fetchBlock(context.Background(), lastRoundAtStart+1, net.peers[0]) + require.NoError(t, err) //Check we wrote the correct block @@ -251,7 +264,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { require.NoError(t, err) require.Equal(t, *block, localBlock) } - +/* func TestAbruptWrites(t *testing.T) { numberOfBlocks := 100 @@ -462,7 +475,7 @@ func helperTestOnSwitchToUnSupportedProtocol( <-s.done return local, remote } - +*/ const defaultRewardUnit = 1e6 type mockedLedger struct { @@ -631,7 +644,7 @@ func (s *Service) testStart() { s.ctx, s.cancel = context.WithCancel(context.Background()) s.InitialSyncDone = make(chan struct{}) } - +/* func TestCatchupUnmatchedCertificate(t *testing.T) { // Make Ledger remote, local := testingenv(t, 10) @@ -654,3 +667,4 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { s.syncCert(pc) } } +*/ diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index a5f3441ea3..29a33070ff 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -32,52 +32,22 @@ import ( ) // UniversalFetcher fetches blocks either from an http peer or ws peer. -type universalFetcher struct { +type universalBlockFetcher struct { config config.Local net network.GossipNode log logging.Logger } -// Fetcher queries the current block of the network, and fetches agreed-upon blocks -type blockFetcher interface { - // fetchBlock fetches a block for a given round. - fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, - cert *agreement.Certificate, downloadDuration time.Duration, err error) -} - -// FetcherFactory creates fetchers -type blockFetcherFactory interface { - // Create a new fetcher - newBlockFetcher() blockFetcher -} - -type universalBlockFetcherFactory struct { - log logging.Logger - net network.GossipNode - config config.Local -} - -func makeUniversalBlockFetcherFactory(log logging.Logger, net network.GossipNode, config config.Local) blockFetcherFactory { - return &universalBlockFetcherFactory{ - log: log, - net: net, - config: config} -} - -func (uff *universalBlockFetcherFactory) newBlockFetcher() blockFetcher { - return makeUniversalBlockFetcher(uff.log, uff.net, uff.config) -} - -// MakeUniversalFetcher returns a fetcher for http and ws peers. -func makeUniversalBlockFetcher(log logging.Logger, net network.GossipNode, config config.Local) blockFetcher { - return &universalFetcher{ +// makeUniversalFetcher returns a fetcher for http and ws peers. +func makeUniversalBlockFetcher(log logging.Logger, net network.GossipNode, config config.Local) *universalBlockFetcher { + return &universalBlockFetcher{ config: config, net: net, log: log} } // FetchBlock returns a block from the peer. The peer can be either an http or ws peer. -func (uf *universalFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, +func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, err error) { var fetcherClient FetcherClient @@ -93,7 +63,6 @@ func (uf *universalFetcher) fetchBlock(ctx context.Context, round basics.Round, } else if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { fetcherClient = &wsFetcherClient{ target: wsPeer, - pendingCtxs: make(map[context.Context]context.CancelFunc), config: &uf.config, } } else { @@ -113,6 +82,9 @@ func (uf *universalFetcher) fetchBlock(ctx context.Context, round basics.Round, func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { var decodedEntry rpcs.EncodedBlockCert + if uint64(r) == 0 { + r = 0 + } err = protocol.Decode(fetchedBuf, &decodedEntry) if err != nil { err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err) diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 2148679a7f..3d8ada27a7 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -52,8 +52,7 @@ func TestUGetBlockWs(t *testing.T) { ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") ls.Start() - bff := makeUniversalBlockFetcherFactory(logging.TestingLog(t), net, cfg) - fetcher := bff.newBlockFetcher() + fetcher := makeUniversalBlockFetcher(logging.TestingLog(t), net, cfg) var block *bookkeeping.Block var cert *agreement.Certificate @@ -99,7 +98,7 @@ func TestUGetBlockHttp(t *testing.T) { rootURL := nodeA.rootURL() net.addPeer(rootURL) - fetcher := makeUniversalBlockFetcherFactory(logging.TestingLog(t), net, cfg).newBlockFetcher() + fetcher := makeUniversalBlockFetcher(logging.TestingLog(t), net, cfg) var block *bookkeeping.Block var cert *agreement.Certificate @@ -121,7 +120,7 @@ func TestUGetBlockHttp(t *testing.T) { // TestUGetBlockUnsupported tests the handling of an unsupported peer func TestUGetBlockUnsupported(t *testing.T) { - fetcher := universalFetcher{} + fetcher := universalBlockFetcher{} peer := "" block, cert, duration, err := fetcher.fetchBlock(context.Background(), 1, peer) require.Error(t, err) diff --git a/catchup/wsFetcher.go b/catchup/wsFetcher.go index 9140ea8778..e42cfd3675 100644 --- a/catchup/wsFetcher.go +++ b/catchup/wsFetcher.go @@ -34,7 +34,6 @@ import ( // a stub fetcherClient to satisfy the NetworkFetcher interface type wsFetcherClient struct { target network.UnicastPeer // the peer where we're going to send the request. - pendingCtxs map[context.Context]context.CancelFunc // a map of all the current pending contexts. config *config.Local closed bool // a flag indicating that the fetcher will not perform additional block retrivals. @@ -51,7 +50,6 @@ func (w *wsFetcherClient) GetBlockBytes(ctx context.Context, r basics.Round) ([] } childCtx, cancelFunc := context.WithTimeout(ctx, time.Duration(w.config.CatchupGossipBlockFetchTimeoutSec)*time.Second) - w.pendingCtxs[childCtx] = cancelFunc w.mu.Unlock() defer func() { @@ -59,7 +57,6 @@ func (w *wsFetcherClient) GetBlockBytes(ctx context.Context, r basics.Round) ([] // note that we don't need to have additional Unlock here since // we already have a defered Unlock above ( which executes in reversed order ) w.mu.Lock() - delete(w.pendingCtxs, childCtx) }() blockBytes, err := w.requestBlock(childCtx, r) @@ -77,18 +74,6 @@ func (w *wsFetcherClient) Address() string { return fmt.Sprintf("[ws] (%v)", w.target.GetAddress()) } -// Close is part of FetcherClient interface -func (w *wsFetcherClient) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - w.closed = true - for _, cancelFunc := range w.pendingCtxs { - cancelFunc() - } - w.pendingCtxs = make(map[context.Context]context.CancelFunc) - return nil -} - // requestBlock send a request for block and wait until it receives a response or a context expires. func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) { roundBin := make([]byte, binary.MaxVarintLen64) From 7ba545f09296dbf68f59ad1c10354e98b4e43a99 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Thu, 4 Mar 2021 02:12:06 -0500 Subject: [PATCH 079/215] fix test failure --- catchup/service.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/catchup/service.go b/catchup/service.go index 5d881948d6..23f591b3d5 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -118,6 +118,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode []peerClass{ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRank1LowBlockTime, peerClass: network.PeersConnectedIn}, }) return s } @@ -204,6 +205,7 @@ func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, if err != nil { if err == errPeerSelectorNoPeerPoolsAvailable { + s.log.Infof("fetchAndWrite: was unable to obtain a peer to retrieve the block from") break } s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) From 39aa8ffd39afc74b44f3ccd9d64215c4db32d4f1 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Thu, 4 Mar 2021 18:54:06 -0500 Subject: [PATCH 080/215] test failure fix --- catchup/service.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index 23f591b3d5..0a83f17d3d 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -116,9 +116,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode s.blocksDownloadPeerSelector = makePeerSelector( net, []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRank1LowBlockTime, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, }) return s } @@ -208,7 +206,7 @@ func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, s.log.Infof("fetchAndWrite: was unable to obtain a peer to retrieve the block from") break } - s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) + s.log.Infof("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) // we've just failed to retrieve a block; wait until the previous block is fetched before trying again // to avoid the usecase where the first block doesn't exists and we're making many requests down the chain // for no reason. @@ -302,7 +300,7 @@ func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, return false } - s.log.Debugf("fetchAndWrite(%v): Wrote block to ledger", r) + s.log.Infof("fetchAndWrite(%v): Wrote block to ledger", r) return true } s.log.Warnf("fetchAndWrite(%v): previous block doesn't exist (perhaps fetching block %v failed)", r, r-1) From 24da14bd457dd09dabf3be6361579258586fe3b3 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Fri, 5 Mar 2021 10:40:25 -0500 Subject: [PATCH 081/215] listExpiredParticipationKeyTest fail configuration --- catchup/service.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/catchup/service.go b/catchup/service.go index 0a83f17d3d..406c25b762 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -117,7 +117,8 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode net, []peerClass{ {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, - }) + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) return s } From ed5cfa2760d5829634194f5a6404ff49ac15547d Mon Sep 17 00:00:00 2001 From: algonautshant Date: Fri, 5 Mar 2021 15:57:48 -0500 Subject: [PATCH 082/215] Refactor peer selector and fetcher creation. Based on review comments, pushing down these for more fine tuned configuration and to eliminate the reuse of the fetcher. --- catchup/peerSelector.go | 16 ++++++++- catchup/service.go | 65 +++++++++++++++++++++++++------------ catchup/universalFetcher.go | 4 +-- 3 files changed, 61 insertions(+), 24 deletions(-) diff --git a/catchup/peerSelector.go b/catchup/peerSelector.go index 03b1278189..ef5004f637 100644 --- a/catchup/peerSelector.go +++ b/catchup/peerSelector.go @@ -38,6 +38,14 @@ const ( peerRank1LowBlockTime = 201 peerRank1HighBlockTime = 399 + peerRankInitialThirdPriority = 400 + peerRank2LowBlockTime = 401 + peerRank2HighBlockTime = 599 + + peerRankInitialFourthPriority = 600 + peerRank3LowBlockTime = 601 + peerRank3HighBlockTime = 799 + // peerRankDownloadFailed is used for responses which could be temporary, such as missing files, or such that we don't // have clear resolution peerRankDownloadFailed = 900 @@ -170,8 +178,14 @@ func (ps *peerSelector) PeerDownloadDurationToRank(peer network.Peer, blockDownl switch ps.pools[poolIdx].peers[peerIdx].class.initialRank { case peerRankInitialFirstPriority: return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank0LowBlockTime, peerRank0HighBlockTime) - default: // i.e. peerRankInitialSecondPriority + case peerRankInitialSecondPriority: return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank1LowBlockTime, peerRank1HighBlockTime) + case peerRankInitialThirdPriority: + return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime) + default: // i.e. peerRankInitialFourthPriority + return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime) + + } } diff --git a/catchup/service.go b/catchup/service.go index 406c25b762..62fc031c95 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -84,9 +84,6 @@ type Service struct { protocolErrorLogged bool lastSupportedRound basics.Round unmatchedPendingCertificates <-chan PendingUnmatchedCertificate - - // blocksDownloadPeerSelector is the peer selector used for downloading blocks. - blocksDownloadPeerSelector *peerSelector } // A BlockAuthenticator authenticates blocks given a certificate. @@ -113,12 +110,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode s.log = log.With("Context", "sync") s.parallelBlocks = config.CatchupParallelBlocks s.deadlineTimeout = agreement.DeadlineTimeout() - s.blocksDownloadPeerSelector = makePeerSelector( - net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, - }) + return s } @@ -159,8 +151,9 @@ func (s *Service) SynchronizingTime() time.Duration { } // function scope to make a bunch of defer statements better -func (s *Service) innerFetch(fetcher *universalBlockFetcher, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { +func (s *Service) innerFetch(r basics.Round, peerSelector *peerSelector) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { ctx, cf := context.WithCancel(s.ctx) + fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) defer cf() stopWaitingForLedgerRound := make(chan struct{}) defer close(stopWaitingForLedgerRound) @@ -171,7 +164,7 @@ func (s *Service) innerFetch(fetcher *universalBlockFetcher, r basics.Round) (bl cf() } }() - peer, err := s.blocksDownloadPeerSelector.GetNextPeer() + peer, err := peerSelector.GetNextPeer() if err != nil { return nil, nil, time.Duration(0), err } @@ -180,7 +173,7 @@ func (s *Service) innerFetch(fetcher *universalBlockFetcher, r basics.Round) (bl // fetchAndWrite fetches a block, checks the cert, and writes it to the ledger. Cert checking and ledger writing both wait for the ledger to advance if necessary. // Returns false if we couldn't fetch or write (i.e., if we failed even after a given number of retries or if we were told to abort.) -func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool) bool { +func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, lookbackComplete chan bool, peerSelector *peerSelector) bool { i := 0 hasLookback := false for true { @@ -200,7 +193,7 @@ func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, // Try to fetch, timing out after retryInterval - block, cert, _, err := s.innerFetch(fetcher, r) + block, cert, _, err := s.innerFetch(r, peerSelector) if err != nil { if err == errPeerSelectorNoPeerPoolsAvailable { @@ -313,9 +306,9 @@ func (s *Service) fetchAndWrite(fetcher *universalBlockFetcher, r basics.Round, type task func() basics.Round -func (s *Service) pipelineCallback(fetcher *universalBlockFetcher, r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool) func() basics.Round { +func (s *Service) pipelineCallback(r basics.Round, thisFetchComplete chan bool, prevFetchCompleteChan chan bool, lookbackChan chan bool, peerSelector *peerSelector) func() basics.Round { return func() basics.Round { - fetchResult := s.fetchAndWrite(fetcher, r, prevFetchCompleteChan, lookbackChan) + fetchResult := s.fetchAndWrite(r, prevFetchCompleteChan, lookbackChan, peerSelector) // the fetch result will be read at most twice (once as the lookback block and once as the prev block, so we write the result twice) thisFetchComplete <- fetchResult @@ -331,8 +324,6 @@ func (s *Service) pipelineCallback(fetcher *universalBlockFetcher, r basics.Roun // TODO the following code does not handle the following case: seedLookback upgrades during fetch func (s *Service) pipelinedFetch(seedLookback uint64) { - fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) - parallelRequests := s.parallelBlocks if parallelRequests < seedLookback { parallelRequests = seedLookback @@ -348,6 +339,23 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { close(completed) }() + var peerSelector *peerSelector + if s.cfg.NetAddress != "" { + peerSelector = makePeerSelector( + s.net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) + } else { + peerSelector = makePeerSelector( + s.net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) + } + // Invariant: len(taskCh) + (# pending writes to completed) <= N wg.Add(int(parallelRequests)) for i := uint64(0); i < parallelRequests; i++ { @@ -394,7 +402,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { currentRoundComplete := make(chan bool, 2) // len(taskCh) + (# pending writes to completed) increases by 1 - taskCh <- s.pipelineCallback(fetcher, nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[len(recentReqs)-int(seedLookback)]) + taskCh <- s.pipelineCallback(nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[len(recentReqs)-int(seedLookback)], peerSelector) recentReqs = append(recentReqs[1:], currentRoundComplete) } @@ -426,7 +434,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { currentRoundComplete := make(chan bool, 2) // len(taskCh) + (# pending writes to completed) increases by 1 - taskCh <- s.pipelineCallback(fetcher, nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[0]) + taskCh <- s.pipelineCallback(nextRound, currentRoundComplete, recentReqs[len(recentReqs)-1], recentReqs[0], peerSelector) recentReqs = append(recentReqs[1:], currentRoundComplete) nextRound++ } @@ -560,10 +568,25 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) { // TODO this doesn't actually use the digest from cert! func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) { blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest - fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) + var peerSelector *peerSelector + if s.cfg.NetAddress != "" { + peerSelector = makePeerSelector( + s.net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) + } else { + peerSelector = makePeerSelector( + s.net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) + } for s.ledger.LastRound() < cert.Round { // Ask the fetcher to get the block somehow - block, fetchedCert, _, err := s.innerFetch(fetcher, cert.Round) + block, fetchedCert, _, err := s.innerFetch(cert.Round, peerSelector) if err != nil { select { diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 29a33070ff..b175ee7d1f 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -62,8 +62,8 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro config: &uf.config} } else if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { fetcherClient = &wsFetcherClient{ - target: wsPeer, - config: &uf.config, + target: wsPeer, + config: &uf.config, } } else { return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") From b47f303a585c17ef99fb3a2507de76c29e6098d3 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 00:58:36 -0500 Subject: [PATCH 083/215] minor changes --- catchup/fetcher_test.go | 4 ++-- catchup/httpFetcher.go | 32 ++++++++------------------------ catchup/universalFetcher.go | 2 +- catchup/wsFetcher.go | 4 ++-- 4 files changed, 13 insertions(+), 29 deletions(-) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index a12badb138..cb5d92ca20 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -83,7 +83,7 @@ func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txg } return client.client.txgroups, nil } -func (client *mockRPCClient) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { +func (client *mockRPCClient) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { return nil, nil } @@ -123,7 +123,7 @@ type dummyFetcher struct { } // FetcherClient interface -func (df *dummyFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { +func (df *dummyFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { if df.failWithNil { return nil, nil } diff --git a/catchup/httpFetcher.go b/catchup/httpFetcher.go index 87c9e4ba4b..f41c3b1695 100644 --- a/catchup/httpFetcher.go +++ b/catchup/httpFetcher.go @@ -39,9 +39,9 @@ const fetcherMaxBlockBytes = 5 << 20 var errNoBlockForRound = errors.New("No block available for given round") -// FetcherClient abstracts how to GetBlockBytes from a node on the net. +// FetcherClient abstracts how to getBlockBytes from a node on the net. type FetcherClient interface { - GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) + getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) Address() string } @@ -57,25 +57,9 @@ type HTTPFetcher struct { config *config.Local } -// MakeHTTPFetcher wraps an HTTPPeer so that we can get blocks from it, and return the FetcherClient interface -func MakeHTTPFetcher(log logging.Logger, peer network.HTTPPeer, net network.GossipNode, cfg *config.Local) (fc FetcherClient) { - return makeHTTPFetcher(log, peer, net, cfg) -} - -// makeHTTPFetcher wraps an HTTPPeer so that we can get blocks from it, and returns a HTTPFetcher object. -func makeHTTPFetcher(log logging.Logger, peer network.HTTPPeer, net network.GossipNode, cfg *config.Local) *HTTPFetcher { - return &HTTPFetcher{ - peer: peer, - rootURL: peer.GetAddress(), - net: net, - client: peer.GetHTTPClient(), - log: log, - config: cfg} -} - -// GetBlockBytes gets a block. +// getBlockBytes gets a block. // Core piece of FetcherClient interface -func (hf *HTTPFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { +func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { parsedURL, err := network.ParseHostOrURL(hf.rootURL) if err != nil { return nil, err @@ -106,11 +90,11 @@ func (hf *HTTPFetcher) GetBlockBytes(ctx context.Context, r basics.Round) (data return nil, errNoBlockForRound default: bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) - hf.log.Warn("HTTPFetcher.GetBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes)) + hf.log.Warnf("HTTPFetcher.getBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes)) if err == nil { - err = fmt.Errorf("GetBlockBytes error response status code %d when requesting '%s'. Response body '%s'", response.StatusCode, blockURL, string(bodyBytes)) + err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. Response body '%s'", response.StatusCode, blockURL, string(bodyBytes)) } else { - err = fmt.Errorf("GetBlockBytes error response status code %d when requesting '%s'. %w", response.StatusCode, blockURL, err) + err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. %w", response.StatusCode, blockURL, err) } return nil, err } @@ -146,7 +130,7 @@ func (hf *HTTPFetcher) Address() string { // FetchBlock is a copy of the functionality in NetworkFetcher.FetchBlock, designed to complete // the HTTPFetcher functionality as a standalone fetcher func (hf *HTTPFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { - fetchedBuf, err := hf.GetBlockBytes(ctx, r) + fetchedBuf, err := hf.getBlockBytes(ctx, r) if err != nil { err = fmt.Errorf("Peer %v: %v", hf.Address(), err) return diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index b175ee7d1f..7ac8977003 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -69,7 +69,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") } - fetchedBuf, err := fetcherClient.GetBlockBytes(ctx, round) + fetchedBuf, err := fetcherClient.getBlockBytes(ctx, round) if err != nil { return nil, nil, time.Duration(0), err } diff --git a/catchup/wsFetcher.go b/catchup/wsFetcher.go index e42cfd3675..758381e251 100644 --- a/catchup/wsFetcher.go +++ b/catchup/wsFetcher.go @@ -41,8 +41,8 @@ type wsFetcherClient struct { mu deadlock.Mutex } -// GetBlockBytes implements FetcherClient -func (w *wsFetcherClient) GetBlockBytes(ctx context.Context, r basics.Round) ([]byte, error) { +// getBlockBytes implements FetcherClient +func (w *wsFetcherClient) getBlockBytes(ctx context.Context, r basics.Round) ([]byte, error) { w.mu.Lock() defer w.mu.Unlock() if w.closed { From 8161731b463df3a5d882aed83b65fd6df4b78987 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 01:21:13 -0500 Subject: [PATCH 084/215] Configuring the peer selections --- catchup/service.go | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index 62fc031c95..a2d62d5c3d 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -340,19 +340,22 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { }() var peerSelector *peerSelector - if s.cfg.NetAddress != "" { + if s.cfg.NetAddress != "" { // Relay node peerSelector = makePeerSelector( s.net, []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}, }) } else { peerSelector = makePeerSelector( s.net, []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, }) } @@ -569,19 +572,22 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) { func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) { blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest var peerSelector *peerSelector - if s.cfg.NetAddress != "" { + if s.cfg.NetAddress != "" { // Relay node peerSelector = makePeerSelector( s.net, []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers}, }) } else { peerSelector = makePeerSelector( s.net, []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}, }) } for s.ledger.LastRound() < cert.Round { From d1c76fc22c8c54e81384837efe825bb4ffddd327 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 13:28:42 -0500 Subject: [PATCH 085/215] remove unneeded code --- catchup/universalFetcher.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 7ac8977003..e480fb3b1d 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -82,9 +82,6 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { var decodedEntry rpcs.EncodedBlockCert - if uint64(r) == 0 { - r = 0 - } err = protocol.Decode(fetchedBuf, &decodedEntry) if err != nil { err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err) From 31300e6bd574eb7a8e2ce2c876228759c53b0eeb Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 19:34:22 -0500 Subject: [PATCH 086/215] updating error text --- catchup/universalFetcher.go | 2 +- catchup/universalFetcher_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index e480fb3b1d..106371ba81 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -66,7 +66,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro config: &uf.config, } } else { - return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") + return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } fetchedBuf, err := fetcherClient.getBlockBytes(ctx, round) diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 3d8ada27a7..756c927b1a 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -124,7 +124,7 @@ func TestUGetBlockUnsupported(t *testing.T) { peer := "" block, cert, duration, err := fetcher.fetchBlock(context.Background(), 1, peer) require.Error(t, err) - require.Contains(t, err.Error(), "FetchBlock: UniversalFetcher only supports HTTPPeer or UnicastPeer") + require.Contains(t, err.Error(), "FetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") require.Nil(t, block) require.Nil(t, cert) require.Equal(t, int64(duration), int64(0)) From 6f8ce6e8e9409fc16c207b9a1efac49187bd2bdc Mon Sep 17 00:00:00 2001 From: algonautshant Date: Tue, 9 Mar 2021 20:02:27 -0500 Subject: [PATCH 087/215] remving the hard-coded version from the test --- catchup/fetcher_test.go | 4 ++-- catchup/universalFetcher_test.go | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index cb5d92ca20..45178f787c 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -424,11 +424,11 @@ func (p *testUnicastPeer) Unicast(ctx context.Context, msg []byte, tag protocol. return nil } -func makeTestUnicastPeer(gn network.GossipNode, version string, t *testing.T) network.UnicastPeer { +func makeTestUnicastPeer(gn network.GossipNode, t *testing.T) network.UnicastPeer { wsp := testUnicastPeer{} wsp.gn = gn wsp.t = t - wsp.version = version + wsp.version = network.ProtocolVersion wsp.responseChannels = make(map[uint64]chan *network.Response) return &wsp } diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 756c927b1a..dd9a820200 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -42,13 +42,12 @@ func TestUGetBlockWs(t *testing.T) { return } - version := "2.1" blockServiceConfig := config.GetDefaultLocal() blockServiceConfig.EnableBlockService = true net := &httpTestPeerSource{} - up := makeTestUnicastPeer(net, version, t) + up := makeTestUnicastPeer(net, t) ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") ls.Start() From 1e35f6eba975fe81fc6b5e443461684e802f57be Mon Sep 17 00:00:00 2001 From: algonautshant Date: Wed, 10 Mar 2021 01:48:39 -0500 Subject: [PATCH 088/215] Reverse the check for UnicastPeer HTTPPeer, remove network Ready() --- catchup/service.go | 7 ------- catchup/universalFetcher.go | 13 ++++++------- 2 files changed, 6 insertions(+), 14 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index a2d62d5c3d..f2620ff840 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -450,13 +450,6 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { // periodicSync periodically asks the network for its latest round and syncs if we've fallen behind (also if our ledger stops advancing) func (s *Service) periodicSync() { defer close(s.done) - // wait until network is ready, or until we're told to quit - select { - case <-s.net.Ready(): - s.log.Info("network ready") - case <-s.ctx.Done(): - return - } // if the catchup is disabled in the config file, just skip it. if s.parallelBlocks != 0 { s.sync() diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 106371ba81..bfe0fa7b24 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -51,8 +51,12 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro cert *agreement.Certificate, downloadDuration time.Duration, err error) { var fetcherClient FetcherClient - httpPeer, validHTTPPeer := peer.(network.HTTPPeer) - if validHTTPPeer { + if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { + fetcherClient = &wsFetcherClient{ + target: wsPeer, + config: &uf.config, + } + } else if httpPeer, validHTTPPeer := peer.(network.HTTPPeer); validHTTPPeer { fetcherClient = &HTTPFetcher{ peer: httpPeer, rootURL: httpPeer.GetAddress(), @@ -60,11 +64,6 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro client: httpPeer.GetHTTPClient(), log: uf.log, config: &uf.config} - } else if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { - fetcherClient = &wsFetcherClient{ - target: wsPeer, - config: &uf.config, - } } else { return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } From 855725a67f40c6ed59298a3d371e6c98bec96f5f Mon Sep 17 00:00:00 2001 From: algonautshant Date: Wed, 10 Mar 2021 14:09:55 -0500 Subject: [PATCH 089/215] Revert Infof to Debugf --- catchup/service.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index f2620ff840..a77c31e071 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -197,10 +197,10 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, if err != nil { if err == errPeerSelectorNoPeerPoolsAvailable { - s.log.Infof("fetchAndWrite: was unable to obtain a peer to retrieve the block from") + s.log.Debugf("fetchAndWrite: was unable to obtain a peer to retrieve the block from") break } - s.log.Infof("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) + s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) // we've just failed to retrieve a block; wait until the previous block is fetched before trying again // to avoid the usecase where the first block doesn't exists and we're making many requests down the chain // for no reason. @@ -294,7 +294,7 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, return false } - s.log.Infof("fetchAndWrite(%v): Wrote block to ledger", r) + s.log.Debugf("fetchAndWrite(%v): Wrote block to ledger", r) return true } s.log.Warnf("fetchAndWrite(%v): previous block doesn't exist (perhaps fetching block %v failed)", r, r-1) From fcbd432fd8980f95bf5b60644b2e33ee5106dfe6 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Wed, 10 Mar 2021 21:30:25 -0500 Subject: [PATCH 090/215] Cleanup WebsocketNetwork Ready and eventualReady. The functions are removed, and all supporting fields. Tests that relied on them are updated to use alternative means for wating whenever necessary. --- network/ping_test.go | 12 +-- network/requestLogger_test.go | 5 -- network/requestTracker_test.go | 23 +++--- network/wsNetwork.go | 35 -------- network/wsNetwork_test.go | 145 ++++++++++----------------------- 5 files changed, 60 insertions(+), 160 deletions(-) diff --git a/network/ping_test.go b/network/ping_test.go index 7cb17c4792..8c2e64e936 100644 --- a/network/ping_test.go +++ b/network/ping_test.go @@ -41,11 +41,13 @@ func TestPing(t *testing.T) { netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") + // wait for the peer to connect + for t := 0; t < 200; t++ { + if len(netB.peers) > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } bpeers := netB.GetPeers(PeersConnectedOut) require.Equal(t, 1, len(bpeers)) diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go index 00196a5a9e..167198cf47 100644 --- a/network/requestLogger_test.go +++ b/network/requestLogger_test.go @@ -54,7 +54,6 @@ func TestRequestLogger(t *testing.T) { } netA.config.EnableRequestLogger = true netA.setup() - netA.eventualReadyDelay = time.Second netA.config.GossipFanout = 1 netA.Start() @@ -69,10 +68,6 @@ func TestRequestLogger(t *testing.T) { netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) - select { case <-time.After(10 * time.Second): // we failed to get the event within the time limits. diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index 067841abc8..fcd839002f 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -91,7 +91,6 @@ func TestRateLimiting(t *testing.T) { wn.config.MaxConnectionsPerIP += int(testConfig.ConnectionsRateLimitingCount) * 5 wn.setup() - wn.eventualReadyDelay = time.Second netA := wn netA.config.GossipFanout = 1 @@ -140,19 +139,17 @@ func TestRateLimiting(t *testing.T) { connectedClients = 0 time.Sleep(100 * time.Millisecond) for i := 0; i < clientsCount; i++ { - // check if the channel is ready. - readyCh := networks[i].Ready() - select { - case <-readyCh: - // it's closed, so this client got connected. - connectedClients++ - phonebookLen := len(phonebooks[i].GetAddresses(1, PhoneBookEntryRelayRole)) - // if this channel is ready, than we should have an address, since it didn't get blocked. - require.Equal(t, 1, phonebookLen) - default: - // not ready yet. - // wait abit longer. + // Wait for peers to connect + for t := 0; t < 200; t++ { + if networks[i].NumPeers() > 0 { + break + } + time.Sleep(10 * time.Millisecond) } + connectedClients++ + phonebookLen := len(phonebooks[i].GetAddresses(1, PhoneBookEntryRelayRole)) + // if this channel is ready, than we should have an address, since it didn't get blocked. + require.Equal(t, 1, phonebookLen) } if connectedClients >= int(testConfig.ConnectionsRateLimitingCount) { timedOut = time.Now().After(deadline) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 3b3df0ce8f..aa619d52a5 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -154,7 +154,6 @@ type GossipNode interface { Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error Disconnect(badnode Peer) DisconnectPeers() - Ready() chan struct{} // RegisterHTTPHandler path accepts gorilla/mux path annotations RegisterHTTPHandler(path string, handler http.Handler) @@ -321,7 +320,6 @@ type WebsocketNetwork struct { RandomID string ready int32 - readyChan chan struct{} meshUpdateRequests chan meshRequest @@ -334,8 +332,6 @@ type WebsocketNetwork struct { incomingMsgFilter *messageFilter // message filter to remove duplicate incoming messages from different peers - eventualReadyDelay time.Duration - relayMessages bool // True if we should relay messages from other nodes (nominally true for relays, false otherwise) prioScheme NetPrioScheme @@ -518,11 +514,6 @@ func (wn *WebsocketNetwork) DisconnectPeers() { closeGroup.Wait() } -// Ready returns a chan that will be closed when we have a minimum number of peer connections active -func (wn *WebsocketNetwork) Ready() chan struct{} { - return wn.readyChan -} - // RegisterHTTPHandler path accepts gorilla/mux path annotations func (wn *WebsocketNetwork) RegisterHTTPHandler(path string, handler http.Handler) { wn.router.Handle(path, handler) @@ -631,9 +622,7 @@ func (wn *WebsocketNetwork) setup() { wn.broadcastQueueHighPrio = make(chan broadcastRequest, wn.outgoingMessagesBufferSize) wn.broadcastQueueBulk = make(chan broadcastRequest, 100) wn.meshUpdateRequests = make(chan meshRequest, 5) - wn.readyChan = make(chan struct{}) wn.tryConnectAddrs = make(map[string]int64) - wn.eventualReadyDelay = time.Minute wn.prioTracker = newPrioTracker(wn) if wn.slowWritingPeerMonitorInterval == 0 { wn.slowWritingPeerMonitorInterval = slowWritingPeerMonitorInterval @@ -2144,30 +2133,6 @@ func (wn *WebsocketNetwork) addPeer(peer *wsPeer) { wn.prioTracker.setPriority(peer, peer.prioAddress, peer.prioWeight) atomic.AddInt32(&wn.peersChangeCounter, 1) wn.countPeersSetGauges() - if len(wn.peers) >= wn.config.GossipFanout { - // we have a quorum of connected peers, if we weren't ready before, we are now - if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) { - wn.log.Debug("ready") - close(wn.readyChan) - } - } else if atomic.LoadInt32(&wn.ready) == 0 { - // but if we're not ready in a minute, call whatever peers we've got as good enough - wn.wg.Add(1) - go wn.eventualReady() - } -} - -func (wn *WebsocketNetwork) eventualReady() { - defer wn.wg.Done() - minute := time.NewTimer(wn.eventualReadyDelay) - select { - case <-wn.ctx.Done(): - case <-minute.C: - if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) { - wn.log.Debug("ready") - close(wn.readyChan) - } - } } // should be run from inside a context holding wn.peersLock diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 3464ee750c..ff810d1431 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -25,7 +25,6 @@ import ( "net" "net/http" "os" - "runtime" "sort" "strings" "sync" @@ -122,7 +121,6 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local) *Websocket NetworkID: config.Devtestnet, } wn.setup() - wn.eventualReadyDelay = time.Second return wn } @@ -205,17 +203,6 @@ func TestWebsocketNetworkStartStop(t *testing.T) { netA.Stop() } -func waitReady(t testing.TB, wn *WebsocketNetwork, timeout <-chan time.Time) bool { - select { - case <-wn.Ready(): - return true - case <-timeout: - _, file, line, _ := runtime.Caller(1) - t.Fatalf("%s:%d timeout waiting for ready", file, line) - return false - } -} - // Set up two nodes, test that a.Broadcast is received by B func TestWebsocketNetworkBasic(t *testing.T) { netA := makeTestWebsocketNode(t) @@ -234,12 +221,6 @@ func TestWebsocketNetworkBasic(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") - netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil) netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil) @@ -268,11 +249,13 @@ func TestWebsocketNetworkUnicast(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") + // wait for peers to connect (2 seconds max) + for t := 0; t < 200; t++ { + if len(netA.peers) > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } require.Equal(t, 1, len(netA.peers)) require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) @@ -310,11 +293,13 @@ func TestWebsocketNetworkNoAddress(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") + // Wait for peers to connect + for t := 0; t < 200; t++ { + if netA.NumPeers() > 0 && netB.NumPeers() > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil) netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil) @@ -363,17 +348,6 @@ func closeNodes(nodes []*WebsocketNetwork) { wg.Wait() } -func waitNodesReady(t *testing.T, nodes []*WebsocketNetwork, timeout time.Duration) { - tc := time.After(timeout) - for i, node := range nodes { - select { - case <-node.Ready(): - case <-tc: - t.Fatalf("node[%d] not ready at timeout", i) - } - } -} - const lineNetworkLength = 20 const lineNetworkNumMessages = 5 @@ -383,7 +357,6 @@ const lineNetworkNumMessages = 5 func TestLineNetwork(t *testing.T) { nodes, counters := lineNetwork(t, lineNetworkLength) t.Logf("line network length: %d", lineNetworkLength) - waitNodesReady(t, nodes, 2*time.Second) t.Log("ready") defer closeNodes(nodes) counter := &counters[len(counters)-1] @@ -692,7 +665,6 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor } require.True(t, wn.config.EnableIncomingMessageFilter) wn.setup() - wn.eventualReadyDelay = time.Second require.True(t, wn.config.EnableIncomingMessageFilter) return wn } @@ -727,14 +699,6 @@ func TestDupFilter(t *testing.T) { msg := make([]byte, messageFilterSize+1) rand.Read(msg) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") - waitReady(t, netC, readyTimeout.C) - t.Log("c ready") - // TODO: this test has two halves that exercise inbound de-dup and outbound non-send due to recieved hash. But it doesn't properly _test_ them as it doesn't measure _why_ it receives each message exactly once. The second half below could actualy be because of the same inbound de-dup as this first half. You can see the actions of either in metrics. // algod_network_duplicate_message_received_total{} 2 // algod_outgoing_network_message_filtered_out_total{} 2 @@ -792,16 +756,15 @@ func TestGetPeers(t *testing.T) { netB.Start() defer netB.Stop() - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") - + // Wait for peers to connect + for t := 0; t < 200; t++ { + if netA.NumPeers() > 0 && netB.NumPeers() > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } phbMulti.ReplacePeerList([]string{"a", "b", "c"}, "ph", PhoneBookEntryRelayRole) - //addrB, _ := netB.Address() - // A has only an inbound connection from B aPeers := netA.GetPeers(PeersConnectedOut) assert.Equal(t, 0, len(aPeers)) @@ -858,11 +821,6 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) { bhandler := benchmarkHandler{returns} netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: &bhandler}}) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") var ireturned uint64 t.StartTimer() @@ -941,7 +899,6 @@ func TestWebsocketNetworkPrio(t *testing.T) { case <-time.After(time.Second): t.Errorf("timeout on netA.prioResponseChan") } - waitReady(t, netA, time.After(time.Second)) // Peek at A's peers netA.peersLock.RLock() @@ -1004,7 +961,6 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) { case <-time.After(time.Second): t.Errorf("timeout on netA.prioResponseChan 2") } - waitReady(t, netA, time.After(time.Second)) netA.Broadcast(context.Background(), protocol.TxnTag, nil, true, nil) @@ -1063,13 +1019,6 @@ func TestWebsocketNetworkManyIdle(t *testing.T) { clients = append(clients, client) } - readyTimeout := time.NewTimer(30 * time.Second) - waitReady(t, relay, readyTimeout.C) - - for i := 0; i < numClients; i++ { - waitReady(t, clients[i], readyTimeout.C) - } - var r0utime, r1utime int64 var r0stime, r1stime int64 @@ -1178,10 +1127,6 @@ func TestDelayedMessageDrop(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) - currentTime := time.Now() for i := 0; i < 10; i++ { err := netA.broadcastWithTimestamp(protocol.TxnTag, []byte("foo"), currentTime.Add(time.Hour*time.Duration(i-5))) @@ -1207,7 +1152,6 @@ func TestSlowPeerDisconnection(t *testing.T) { slowWritingPeerMonitorInterval: time.Millisecond * 50, } wn.setup() - wn.eventualReadyDelay = time.Second wn.messagesOfInterest = nil // clear this before starting the network so that we won't be sending a MOI upon connection. netA := wn @@ -1226,9 +1170,13 @@ func TestSlowPeerDisconnection(t *testing.T) { netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) + // Wait for peers to connect + for t := 0; t < 200; t++ { + if netA.NumPeers() > 0 && netB.NumPeers() > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } var peers []*wsPeer peers, _ = netA.peerSnapshot(peers) @@ -1266,7 +1214,6 @@ func TestForceMessageRelaying(t *testing.T) { NetworkID: config.Devtestnet, } wn.setup() - wn.eventualReadyDelay = time.Second netA := wn netA.config.GossipFanout = 1 @@ -1295,11 +1242,6 @@ func TestForceMessageRelaying(t *testing.T) { netC.Start() defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }() - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) - waitReady(t, netC, readyTimeout.C) - // send 5 messages from both netB and netC to netA for i := 0; i < 5; i++ { err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil) @@ -1446,11 +1388,13 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) { }, }) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - t.Log("a ready") - waitReady(t, netB, readyTimeout.C) - t.Log("b ready") + // Wait for peers to connect + for t := 0; t < 200; t++ { + if netA.NumPeers() > 0 && netB.NumPeers() > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } peerA := netA.peers[0] @@ -1527,10 +1471,6 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) { MessageHandler: HandlerFunc(waitMessageArriveHandler), }}) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) - // have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag netB.Broadcast(context.Background(), protocol.MsgOfInterestTag, MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag}), true, nil) // send another message which we can track, so that we'll know that the first message was delivered. @@ -1620,9 +1560,14 @@ func TestWebsocketDisconnection(t *testing.T) { netB.ClearHandlers() netB.RegisterHandlers(taggedHandlersB) - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) + // Wait for peers to connect + for t := 0; t < 200; t++ { + if netA.NumPeers() > 0 && netB.NumPeers() > 0 { + break + } + time.Sleep(10 * time.Millisecond) + } + netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0}, true, nil) // wait until the peers disconnect. for { @@ -1778,10 +1723,6 @@ func BenchmarkVariableTransactionMessageBlockSizes(t *testing.B) { netB.ClearHandlers() - readyTimeout := time.NewTimer(2 * time.Second) - waitReady(t, netA, readyTimeout.C) - waitReady(t, netB, readyTimeout.C) - highestRate := float64(1) sinceHighestRate := 0 rate := float64(0) From 3e3a622b5e4c061b2076769dfc4c5432d9cf747b Mon Sep 17 00:00:00 2001 From: algonautshant Date: Thu, 11 Mar 2021 00:31:00 -0500 Subject: [PATCH 091/215] Combine wsFetcher and httpFetcher with universalFetcher --- catchup/fetcher.go | 21 ---- catchup/httpFetcher.go | 143 -------------------------- catchup/universalFetcher.go | 198 ++++++++++++++++++++++++++++++++++-- catchup/wsFetcher.go | 111 -------------------- 4 files changed, 192 insertions(+), 281 deletions(-) delete mode 100644 catchup/fetcher.go delete mode 100644 catchup/httpFetcher.go delete mode 100644 catchup/wsFetcher.go diff --git a/catchup/fetcher.go b/catchup/fetcher.go deleted file mode 100644 index b2f626a39d..0000000000 --- a/catchup/fetcher.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright (C) 2019-2021 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package catchup - - -/* Utils */ - diff --git a/catchup/httpFetcher.go b/catchup/httpFetcher.go deleted file mode 100644 index f41c3b1695..0000000000 --- a/catchup/httpFetcher.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright (C) 2019-2021 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package catchup - -import ( - "context" - "errors" - "fmt" - "net/http" - "path" - "strconv" - "time" - - "github.com/algorand/go-algorand/agreement" - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/data/bookkeeping" - "github.com/algorand/go-algorand/logging" - "github.com/algorand/go-algorand/network" - "github.com/algorand/go-algorand/rpcs" -) - -// set max fetcher size to 5MB, this is enough to fit the block and certificate -const fetcherMaxBlockBytes = 5 << 20 - -var errNoBlockForRound = errors.New("No block available for given round") - -// FetcherClient abstracts how to getBlockBytes from a node on the net. -type FetcherClient interface { - getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) - Address() string -} - -// HTTPFetcher implements FetcherClient doing an HTTP GET of the block -type HTTPFetcher struct { - peer network.HTTPPeer - rootURL string - net network.GossipNode - - client *http.Client - - log logging.Logger - config *config.Local -} - -// getBlockBytes gets a block. -// Core piece of FetcherClient interface -func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { - parsedURL, err := network.ParseHostOrURL(hf.rootURL) - if err != nil { - return nil, err - } - - parsedURL.Path = hf.net.SubstituteGenesisID(path.Join(parsedURL.Path, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(r), 36))) - blockURL := parsedURL.String() - hf.log.Debugf("block GET %#v peer %#v %T", blockURL, hf.peer, hf.peer) - request, err := http.NewRequest("GET", blockURL, nil) - if err != nil { - return nil, err - } - requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(hf.config.CatchupHTTPBlockFetchTimeoutSec)*time.Second) - defer requestCancel() - request = request.WithContext(requestCtx) - network.SetUserAgentHeader(request.Header) - response, err := hf.client.Do(request) - if err != nil { - hf.log.Debugf("GET %#v : %s", blockURL, err) - return nil, err - } - - // check to see that we had no errors. - switch response.StatusCode { - case http.StatusOK: - case http.StatusNotFound: // server could not find a block with that round numbers. - response.Body.Close() - return nil, errNoBlockForRound - default: - bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) - hf.log.Warnf("HTTPFetcher.getBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes)) - if err == nil { - err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. Response body '%s'", response.StatusCode, blockURL, string(bodyBytes)) - } else { - err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. %w", response.StatusCode, blockURL, err) - } - return nil, err - } - - // at this point, we've already receieved the response headers. ensure that the - // response content type is what we'd like it to be. - contentTypes := response.Header["Content-Type"] - if len(contentTypes) != 1 { - err = fmt.Errorf("http block fetcher invalid content type count %d", len(contentTypes)) - hf.log.Warn(err) - response.Body.Close() - return nil, err - } - - // TODO: Temporarily allow old and new content types so we have time for lazy upgrades - // Remove this 'old' string after next release. - const blockResponseContentTypeOld = "application/algorand-block-v1" - if contentTypes[0] != rpcs.BlockResponseContentType && contentTypes[0] != blockResponseContentTypeOld { - hf.log.Warnf("http block fetcher response has an invalid content type : %s", contentTypes[0]) - response.Body.Close() - return nil, fmt.Errorf("http block fetcher invalid content type '%s'", contentTypes[0]) - } - - return rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) -} - -// Address is part of FetcherClient interface. -// Returns the root URL of the connected peer. -func (hf *HTTPFetcher) Address() string { - return hf.rootURL -} - -// FetchBlock is a copy of the functionality in NetworkFetcher.FetchBlock, designed to complete -// the HTTPFetcher functionality as a standalone fetcher -func (hf *HTTPFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { - fetchedBuf, err := hf.getBlockBytes(ctx, r) - if err != nil { - err = fmt.Errorf("Peer %v: %v", hf.Address(), err) - return - } - block, cert, err := processBlockBytes(fetchedBuf, r, hf.Address()) - if err != nil { - return - } - return block, cert, nil -} diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index bfe0fa7b24..3c2093272e 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -18,9 +18,16 @@ package catchup import ( "context" + "encoding/binary" + "errors" "fmt" + "net/http" + "path" + "strconv" "time" + "github.com/algorand/go-deadlock" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/basics" @@ -50,29 +57,32 @@ func makeUniversalBlockFetcher(log logging.Logger, net network.GossipNode, confi func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, err error) { - var fetcherClient FetcherClient + var fetchedBuf []byte + var address string if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { - fetcherClient = &wsFetcherClient{ + fetcherClient := &wsFetcherClient{ target: wsPeer, config: &uf.config, } + fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round) + address = fetcherClient.address() } else if httpPeer, validHTTPPeer := peer.(network.HTTPPeer); validHTTPPeer { - fetcherClient = &HTTPFetcher{ + fetcherClient := &HTTPFetcher{ peer: httpPeer, rootURL: httpPeer.GetAddress(), net: uf.net, client: httpPeer.GetHTTPClient(), log: uf.log, config: &uf.config} + fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round) + address = fetcherClient.address() } else { return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } - - fetchedBuf, err := fetcherClient.getBlockBytes(ctx, round) if err != nil { return nil, nil, time.Duration(0), err } - block, cert, err := processBlockBytes(fetchedBuf, round, fetcherClient.Address()) + block, cert, err := processBlockBytes(fetchedBuf, round, address) if err != nil { return nil, nil, time.Duration(0), err } @@ -98,3 +108,179 @@ func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk } return &decodedEntry.Block, &decodedEntry.Certificate, nil } + +// a stub fetcherClient to satisfy the NetworkFetcher interface +type wsFetcherClient struct { + target network.UnicastPeer // the peer where we're going to send the request. + config *config.Local + + mu deadlock.Mutex +} + +// getBlockBytes implements FetcherClient +func (w *wsFetcherClient) getBlockBytes(ctx context.Context, r basics.Round) ([]byte, error) { + w.mu.Lock() + defer w.mu.Unlock() + + childCtx, cancelFunc := context.WithTimeout(ctx, time.Duration(w.config.CatchupGossipBlockFetchTimeoutSec)*time.Second) + w.mu.Unlock() + + defer func() { + cancelFunc() + // note that we don't need to have additional Unlock here since + // we already have a defered Unlock above ( which executes in reversed order ) + w.mu.Lock() + }() + + blockBytes, err := w.requestBlock(childCtx, r) + if err != nil { + return nil, err + } + if len(blockBytes) == 0 { + return nil, fmt.Errorf("wsFetcherClient(%d): empty response", r) + } + return blockBytes, nil +} + +// Address implements FetcherClient +func (w *wsFetcherClient) address() string { + return fmt.Sprintf("[ws] (%v)", w.target.GetAddress()) +} + +// requestBlock send a request for block and wait until it receives a response or a context expires. +func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) { + roundBin := make([]byte, binary.MaxVarintLen64) + binary.PutUvarint(roundBin, uint64(round)) + topics := network.Topics{ + network.MakeTopic(rpcs.RequestDataTypeKey, + []byte(rpcs.BlockAndCertValue)), + network.MakeTopic( + rpcs.RoundKey, + roundBin), + } + resp, err := w.target.Request(ctx, protocol.UniEnsBlockReqTag, topics) + if err != nil { + return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %v", w.target.GetAddress(), round, err) + } + + if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found { + return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %s", w.target.GetAddress(), round, string(errMsg)) + } + + blk, found := resp.Topics.GetValue(rpcs.BlockDataKey) + if !found { + return nil, fmt.Errorf("wsFetcherClient(%s): request failed: block data not found", w.target.GetAddress()) + } + cert, found := resp.Topics.GetValue(rpcs.CertDataKey) + if !found { + return nil, fmt.Errorf("wsFetcherClient(%s): request failed: cert data not found", w.target.GetAddress()) + } + + blockCertBytes := protocol.EncodeReflect(rpcs.PreEncodedBlockCert{ + Block: blk, + Certificate: cert}) + + return blockCertBytes, nil +} + +// set max fetcher size to 5MB, this is enough to fit the block and certificate +const fetcherMaxBlockBytes = 5 << 20 + +var errNoBlockForRound = errors.New("No block available for given round") + +// HTTPFetcher implements FetcherClient doing an HTTP GET of the block +type HTTPFetcher struct { + peer network.HTTPPeer + rootURL string + net network.GossipNode + + client *http.Client + + log logging.Logger + config *config.Local +} + +// getBlockBytes gets a block. +// Core piece of FetcherClient interface +func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { + parsedURL, err := network.ParseHostOrURL(hf.rootURL) + if err != nil { + return nil, err + } + + parsedURL.Path = hf.net.SubstituteGenesisID(path.Join(parsedURL.Path, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(r), 36))) + blockURL := parsedURL.String() + hf.log.Debugf("block GET %#v peer %#v %T", blockURL, hf.peer, hf.peer) + request, err := http.NewRequest("GET", blockURL, nil) + if err != nil { + return nil, err + } + requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(hf.config.CatchupHTTPBlockFetchTimeoutSec)*time.Second) + defer requestCancel() + request = request.WithContext(requestCtx) + network.SetUserAgentHeader(request.Header) + response, err := hf.client.Do(request) + if err != nil { + hf.log.Debugf("GET %#v : %s", blockURL, err) + return nil, err + } + + // check to see that we had no errors. + switch response.StatusCode { + case http.StatusOK: + case http.StatusNotFound: // server could not find a block with that round numbers. + response.Body.Close() + return nil, errNoBlockForRound + default: + bodyBytes, err := rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) + hf.log.Warnf("HTTPFetcher.getBlockBytes: response status code %d from '%s'. Response body '%s' ", response.StatusCode, blockURL, string(bodyBytes)) + if err == nil { + err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. Response body '%s'", response.StatusCode, blockURL, string(bodyBytes)) + } else { + err = fmt.Errorf("getBlockBytes error response status code %d when requesting '%s'. %w", response.StatusCode, blockURL, err) + } + return nil, err + } + + // at this point, we've already receieved the response headers. ensure that the + // response content type is what we'd like it to be. + contentTypes := response.Header["Content-Type"] + if len(contentTypes) != 1 { + err = fmt.Errorf("http block fetcher invalid content type count %d", len(contentTypes)) + hf.log.Warn(err) + response.Body.Close() + return nil, err + } + + // TODO: Temporarily allow old and new content types so we have time for lazy upgrades + // Remove this 'old' string after next release. + const blockResponseContentTypeOld = "application/algorand-block-v1" + if contentTypes[0] != rpcs.BlockResponseContentType && contentTypes[0] != blockResponseContentTypeOld { + hf.log.Warnf("http block fetcher response has an invalid content type : %s", contentTypes[0]) + response.Body.Close() + return nil, fmt.Errorf("http block fetcher invalid content type '%s'", contentTypes[0]) + } + + return rpcs.ResponseBytes(response, hf.log, fetcherMaxBlockBytes) +} + +// Address is part of FetcherClient interface. +// Returns the root URL of the connected peer. +func (hf *HTTPFetcher) address() string { + return hf.rootURL +} + +// FetchBlock is a copy of the functionality in NetworkFetcher.FetchBlock, designed to complete +// the HTTPFetcher functionality as a standalone fetcher +func (hf *HTTPFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { + fetchedBuf, err := hf.getBlockBytes(ctx, r) + if err != nil { + err = fmt.Errorf("Peer %v: %v", hf.address(), err) + return + } + block, cert, err := processBlockBytes(fetchedBuf, r, hf.address()) + if err != nil { + return + } + return block, cert, nil +} diff --git a/catchup/wsFetcher.go b/catchup/wsFetcher.go deleted file mode 100644 index 758381e251..0000000000 --- a/catchup/wsFetcher.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (C) 2019-2021 Algorand, Inc. -// This file is part of go-algorand -// -// go-algorand is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as -// published by the Free Software Foundation, either version 3 of the -// License, or (at your option) any later version. -// -// go-algorand is distributed in the hope that it will be useful, -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with go-algorand. If not, see . - -package catchup - -import ( - "context" - "encoding/binary" - "fmt" - "time" - - "github.com/algorand/go-deadlock" - - "github.com/algorand/go-algorand/config" - "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/network" - "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/rpcs" -) - -// a stub fetcherClient to satisfy the NetworkFetcher interface -type wsFetcherClient struct { - target network.UnicastPeer // the peer where we're going to send the request. - config *config.Local - - closed bool // a flag indicating that the fetcher will not perform additional block retrivals. - - mu deadlock.Mutex -} - -// getBlockBytes implements FetcherClient -func (w *wsFetcherClient) getBlockBytes(ctx context.Context, r basics.Round) ([]byte, error) { - w.mu.Lock() - defer w.mu.Unlock() - if w.closed { - return nil, fmt.Errorf("wsFetcherClient(%d): shutdown", r) - } - - childCtx, cancelFunc := context.WithTimeout(ctx, time.Duration(w.config.CatchupGossipBlockFetchTimeoutSec)*time.Second) - w.mu.Unlock() - - defer func() { - cancelFunc() - // note that we don't need to have additional Unlock here since - // we already have a defered Unlock above ( which executes in reversed order ) - w.mu.Lock() - }() - - blockBytes, err := w.requestBlock(childCtx, r) - if err != nil { - return nil, err - } - if len(blockBytes) == 0 { - return nil, fmt.Errorf("wsFetcherClient(%d): empty response", r) - } - return blockBytes, nil -} - -// Address implements FetcherClient -func (w *wsFetcherClient) Address() string { - return fmt.Sprintf("[ws] (%v)", w.target.GetAddress()) -} - -// requestBlock send a request for block and wait until it receives a response or a context expires. -func (w *wsFetcherClient) requestBlock(ctx context.Context, round basics.Round) ([]byte, error) { - roundBin := make([]byte, binary.MaxVarintLen64) - binary.PutUvarint(roundBin, uint64(round)) - topics := network.Topics{ - network.MakeTopic(rpcs.RequestDataTypeKey, - []byte(rpcs.BlockAndCertValue)), - network.MakeTopic( - rpcs.RoundKey, - roundBin), - } - resp, err := w.target.Request(ctx, protocol.UniEnsBlockReqTag, topics) - if err != nil { - return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %v", w.target.GetAddress(), round, err) - } - - if errMsg, found := resp.Topics.GetValue(network.ErrorKey); found { - return nil, fmt.Errorf("wsFetcherClient(%s).requestBlock(%d): Request failed, %s", w.target.GetAddress(), round, string(errMsg)) - } - - blk, found := resp.Topics.GetValue(rpcs.BlockDataKey) - if !found { - return nil, fmt.Errorf("wsFetcherClient(%s): request failed: block data not found", w.target.GetAddress()) - } - cert, found := resp.Topics.GetValue(rpcs.CertDataKey) - if !found { - return nil, fmt.Errorf("wsFetcherClient(%s): request failed: cert data not found", w.target.GetAddress()) - } - - blockCertBytes := protocol.EncodeReflect(rpcs.PreEncodedBlockCert{ - Block: blk, - Certificate: cert}) - - return blockCertBytes, nil -} From 47d8383f53e059ee87b68fe525cda6190eddd90e Mon Sep 17 00:00:00 2001 From: algonautshant Date: Thu, 11 Mar 2021 01:04:38 -0500 Subject: [PATCH 092/215] fix test race issue --- network/ping_test.go | 2 +- network/wsNetwork_test.go | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/network/ping_test.go b/network/ping_test.go index 8c2e64e936..4b4e8d3ab7 100644 --- a/network/ping_test.go +++ b/network/ping_test.go @@ -43,7 +43,7 @@ func TestPing(t *testing.T) { // wait for the peer to connect for t := 0; t < 200; t++ { - if len(netB.peers) > 0 { + if netB.NumPeers() > 0 { break } time.Sleep(10 * time.Millisecond) diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index ff810d1431..b143e99568 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -251,13 +251,13 @@ func TestWebsocketNetworkUnicast(t *testing.T) { // wait for peers to connect (2 seconds max) for t := 0; t < 200; t++ { - if len(netA.peers) > 0 { + if netA.NumPeers() > 0 { break } time.Sleep(10 * time.Millisecond) } - require.Equal(t, 1, len(netA.peers)) + require.Equal(t, 1, netA.NumPeers()) require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) peerB := netA.peers[0] err := peerB.Unicast(context.Background(), []byte("foo"), protocol.TxnTag) @@ -580,7 +580,7 @@ func avgSendBufferHighPrioLength(wn *WebsocketNetwork) float64 { for _, peer := range wn.peers { sum += len(peer.sendBufferHighPrio) } - return float64(sum) / float64(len(wn.peers)) + return float64(sum) / float64(wn.NumPeers()) } // TestSlowOutboundPeer tests what happens when one outbound peer is slow and the rest are fine. Current logic is to disconnect the one slow peer when its outbound channel is full. @@ -903,7 +903,7 @@ func TestWebsocketNetworkPrio(t *testing.T) { // Peek at A's peers netA.peersLock.RLock() defer netA.peersLock.RUnlock() - require.Equal(t, len(netA.peers), 1) + require.Equal(t, netA.NumPeers(), 1) require.Equal(t, netA.peers[0].prioAddress, prioB.addr) require.Equal(t, netA.peers[0].prioWeight, prioB.prio) From d76d27d69b4b7538279ed74ffac0c6da5fa75537 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Thu, 11 Mar 2021 14:13:20 -0500 Subject: [PATCH 093/215] restore Ready and eventualReady in newtwork package --- network/ping_test.go | 12 ++- network/requestLogger_test.go | 5 ++ network/requestTracker_test.go | 23 ++--- network/wsNetwork.go | 35 ++++++++ network/wsNetwork_test.go | 151 +++++++++++++++++++++++---------- 5 files changed, 163 insertions(+), 63 deletions(-) diff --git a/network/ping_test.go b/network/ping_test.go index 4b4e8d3ab7..7cb17c4792 100644 --- a/network/ping_test.go +++ b/network/ping_test.go @@ -41,13 +41,11 @@ func TestPing(t *testing.T) { netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() - // wait for the peer to connect - for t := 0; t < 200; t++ { - if netB.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") bpeers := netB.GetPeers(PeersConnectedOut) require.Equal(t, 1, len(bpeers)) diff --git a/network/requestLogger_test.go b/network/requestLogger_test.go index 167198cf47..00196a5a9e 100644 --- a/network/requestLogger_test.go +++ b/network/requestLogger_test.go @@ -54,6 +54,7 @@ func TestRequestLogger(t *testing.T) { } netA.config.EnableRequestLogger = true netA.setup() + netA.eventualReadyDelay = time.Second netA.config.GossipFanout = 1 netA.Start() @@ -68,6 +69,10 @@ func TestRequestLogger(t *testing.T) { netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + select { case <-time.After(10 * time.Second): // we failed to get the event within the time limits. diff --git a/network/requestTracker_test.go b/network/requestTracker_test.go index fcd839002f..067841abc8 100644 --- a/network/requestTracker_test.go +++ b/network/requestTracker_test.go @@ -91,6 +91,7 @@ func TestRateLimiting(t *testing.T) { wn.config.MaxConnectionsPerIP += int(testConfig.ConnectionsRateLimitingCount) * 5 wn.setup() + wn.eventualReadyDelay = time.Second netA := wn netA.config.GossipFanout = 1 @@ -139,17 +140,19 @@ func TestRateLimiting(t *testing.T) { connectedClients = 0 time.Sleep(100 * time.Millisecond) for i := 0; i < clientsCount; i++ { - // Wait for peers to connect - for t := 0; t < 200; t++ { - if networks[i].NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) + // check if the channel is ready. + readyCh := networks[i].Ready() + select { + case <-readyCh: + // it's closed, so this client got connected. + connectedClients++ + phonebookLen := len(phonebooks[i].GetAddresses(1, PhoneBookEntryRelayRole)) + // if this channel is ready, than we should have an address, since it didn't get blocked. + require.Equal(t, 1, phonebookLen) + default: + // not ready yet. + // wait abit longer. } - connectedClients++ - phonebookLen := len(phonebooks[i].GetAddresses(1, PhoneBookEntryRelayRole)) - // if this channel is ready, than we should have an address, since it didn't get blocked. - require.Equal(t, 1, phonebookLen) } if connectedClients >= int(testConfig.ConnectionsRateLimitingCount) { timedOut = time.Now().After(deadline) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index aa619d52a5..3b3df0ce8f 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -154,6 +154,7 @@ type GossipNode interface { Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error Disconnect(badnode Peer) DisconnectPeers() + Ready() chan struct{} // RegisterHTTPHandler path accepts gorilla/mux path annotations RegisterHTTPHandler(path string, handler http.Handler) @@ -320,6 +321,7 @@ type WebsocketNetwork struct { RandomID string ready int32 + readyChan chan struct{} meshUpdateRequests chan meshRequest @@ -332,6 +334,8 @@ type WebsocketNetwork struct { incomingMsgFilter *messageFilter // message filter to remove duplicate incoming messages from different peers + eventualReadyDelay time.Duration + relayMessages bool // True if we should relay messages from other nodes (nominally true for relays, false otherwise) prioScheme NetPrioScheme @@ -514,6 +518,11 @@ func (wn *WebsocketNetwork) DisconnectPeers() { closeGroup.Wait() } +// Ready returns a chan that will be closed when we have a minimum number of peer connections active +func (wn *WebsocketNetwork) Ready() chan struct{} { + return wn.readyChan +} + // RegisterHTTPHandler path accepts gorilla/mux path annotations func (wn *WebsocketNetwork) RegisterHTTPHandler(path string, handler http.Handler) { wn.router.Handle(path, handler) @@ -622,7 +631,9 @@ func (wn *WebsocketNetwork) setup() { wn.broadcastQueueHighPrio = make(chan broadcastRequest, wn.outgoingMessagesBufferSize) wn.broadcastQueueBulk = make(chan broadcastRequest, 100) wn.meshUpdateRequests = make(chan meshRequest, 5) + wn.readyChan = make(chan struct{}) wn.tryConnectAddrs = make(map[string]int64) + wn.eventualReadyDelay = time.Minute wn.prioTracker = newPrioTracker(wn) if wn.slowWritingPeerMonitorInterval == 0 { wn.slowWritingPeerMonitorInterval = slowWritingPeerMonitorInterval @@ -2133,6 +2144,30 @@ func (wn *WebsocketNetwork) addPeer(peer *wsPeer) { wn.prioTracker.setPriority(peer, peer.prioAddress, peer.prioWeight) atomic.AddInt32(&wn.peersChangeCounter, 1) wn.countPeersSetGauges() + if len(wn.peers) >= wn.config.GossipFanout { + // we have a quorum of connected peers, if we weren't ready before, we are now + if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) { + wn.log.Debug("ready") + close(wn.readyChan) + } + } else if atomic.LoadInt32(&wn.ready) == 0 { + // but if we're not ready in a minute, call whatever peers we've got as good enough + wn.wg.Add(1) + go wn.eventualReady() + } +} + +func (wn *WebsocketNetwork) eventualReady() { + defer wn.wg.Done() + minute := time.NewTimer(wn.eventualReadyDelay) + select { + case <-wn.ctx.Done(): + case <-minute.C: + if atomic.CompareAndSwapInt32(&wn.ready, 0, 1) { + wn.log.Debug("ready") + close(wn.readyChan) + } + } } // should be run from inside a context holding wn.peersLock diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index b143e99568..3464ee750c 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -25,6 +25,7 @@ import ( "net" "net/http" "os" + "runtime" "sort" "strings" "sync" @@ -121,6 +122,7 @@ func makeTestWebsocketNodeWithConfig(t testing.TB, conf config.Local) *Websocket NetworkID: config.Devtestnet, } wn.setup() + wn.eventualReadyDelay = time.Second return wn } @@ -203,6 +205,17 @@ func TestWebsocketNetworkStartStop(t *testing.T) { netA.Stop() } +func waitReady(t testing.TB, wn *WebsocketNetwork, timeout <-chan time.Time) bool { + select { + case <-wn.Ready(): + return true + case <-timeout: + _, file, line, _ := runtime.Caller(1) + t.Fatalf("%s:%d timeout waiting for ready", file, line) + return false + } +} + // Set up two nodes, test that a.Broadcast is received by B func TestWebsocketNetworkBasic(t *testing.T) { netA := makeTestWebsocketNode(t) @@ -221,6 +234,12 @@ func TestWebsocketNetworkBasic(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") + netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil) netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil) @@ -249,15 +268,13 @@ func TestWebsocketNetworkUnicast(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) - // wait for peers to connect (2 seconds max) - for t := 0; t < 200; t++ { - if netA.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") - require.Equal(t, 1, netA.NumPeers()) + require.Equal(t, 1, len(netA.peers)) require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) peerB := netA.peers[0] err := peerB.Unicast(context.Background(), []byte("foo"), protocol.TxnTag) @@ -293,13 +310,11 @@ func TestWebsocketNetworkNoAddress(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) - // Wait for peers to connect - for t := 0; t < 200; t++ { - if netA.NumPeers() > 0 && netB.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") netA.Broadcast(context.Background(), protocol.TxnTag, []byte("foo"), false, nil) netA.Broadcast(context.Background(), protocol.TxnTag, []byte("bar"), false, nil) @@ -348,6 +363,17 @@ func closeNodes(nodes []*WebsocketNetwork) { wg.Wait() } +func waitNodesReady(t *testing.T, nodes []*WebsocketNetwork, timeout time.Duration) { + tc := time.After(timeout) + for i, node := range nodes { + select { + case <-node.Ready(): + case <-tc: + t.Fatalf("node[%d] not ready at timeout", i) + } + } +} + const lineNetworkLength = 20 const lineNetworkNumMessages = 5 @@ -357,6 +383,7 @@ const lineNetworkNumMessages = 5 func TestLineNetwork(t *testing.T) { nodes, counters := lineNetwork(t, lineNetworkLength) t.Logf("line network length: %d", lineNetworkLength) + waitNodesReady(t, nodes, 2*time.Second) t.Log("ready") defer closeNodes(nodes) counter := &counters[len(counters)-1] @@ -580,7 +607,7 @@ func avgSendBufferHighPrioLength(wn *WebsocketNetwork) float64 { for _, peer := range wn.peers { sum += len(peer.sendBufferHighPrio) } - return float64(sum) / float64(wn.NumPeers()) + return float64(sum) / float64(len(wn.peers)) } // TestSlowOutboundPeer tests what happens when one outbound peer is slow and the rest are fine. Current logic is to disconnect the one slow peer when its outbound channel is full. @@ -665,6 +692,7 @@ func makeTestFilterWebsocketNode(t *testing.T, nodename string) *WebsocketNetwor } require.True(t, wn.config.EnableIncomingMessageFilter) wn.setup() + wn.eventualReadyDelay = time.Second require.True(t, wn.config.EnableIncomingMessageFilter) return wn } @@ -699,6 +727,14 @@ func TestDupFilter(t *testing.T) { msg := make([]byte, messageFilterSize+1) rand.Read(msg) + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") + waitReady(t, netC, readyTimeout.C) + t.Log("c ready") + // TODO: this test has two halves that exercise inbound de-dup and outbound non-send due to recieved hash. But it doesn't properly _test_ them as it doesn't measure _why_ it receives each message exactly once. The second half below could actualy be because of the same inbound de-dup as this first half. You can see the actions of either in metrics. // algod_network_duplicate_message_received_total{} 2 // algod_outgoing_network_message_filtered_out_total{} 2 @@ -756,15 +792,16 @@ func TestGetPeers(t *testing.T) { netB.Start() defer netB.Stop() - // Wait for peers to connect - for t := 0; t < 200; t++ { - if netA.NumPeers() > 0 && netB.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") + phbMulti.ReplacePeerList([]string{"a", "b", "c"}, "ph", PhoneBookEntryRelayRole) + //addrB, _ := netB.Address() + // A has only an inbound connection from B aPeers := netA.GetPeers(PeersConnectedOut) assert.Equal(t, 0, len(aPeers)) @@ -821,6 +858,11 @@ func BenchmarkWebsocketNetworkBasic(t *testing.B) { bhandler := benchmarkHandler{returns} netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: &bhandler}}) + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") var ireturned uint64 t.StartTimer() @@ -899,11 +941,12 @@ func TestWebsocketNetworkPrio(t *testing.T) { case <-time.After(time.Second): t.Errorf("timeout on netA.prioResponseChan") } + waitReady(t, netA, time.After(time.Second)) // Peek at A's peers netA.peersLock.RLock() defer netA.peersLock.RUnlock() - require.Equal(t, netA.NumPeers(), 1) + require.Equal(t, len(netA.peers), 1) require.Equal(t, netA.peers[0].prioAddress, prioB.addr) require.Equal(t, netA.peers[0].prioWeight, prioB.prio) @@ -961,6 +1004,7 @@ func TestWebsocketNetworkPrioLimit(t *testing.T) { case <-time.After(time.Second): t.Errorf("timeout on netA.prioResponseChan 2") } + waitReady(t, netA, time.After(time.Second)) netA.Broadcast(context.Background(), protocol.TxnTag, nil, true, nil) @@ -1019,6 +1063,13 @@ func TestWebsocketNetworkManyIdle(t *testing.T) { clients = append(clients, client) } + readyTimeout := time.NewTimer(30 * time.Second) + waitReady(t, relay, readyTimeout.C) + + for i := 0; i < numClients; i++ { + waitReady(t, clients[i], readyTimeout.C) + } + var r0utime, r1utime int64 var r0stime, r1stime int64 @@ -1127,6 +1178,10 @@ func TestDelayedMessageDrop(t *testing.T) { counterDone := counter.done netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + currentTime := time.Now() for i := 0; i < 10; i++ { err := netA.broadcastWithTimestamp(protocol.TxnTag, []byte("foo"), currentTime.Add(time.Hour*time.Duration(i-5))) @@ -1152,6 +1207,7 @@ func TestSlowPeerDisconnection(t *testing.T) { slowWritingPeerMonitorInterval: time.Millisecond * 50, } wn.setup() + wn.eventualReadyDelay = time.Second wn.messagesOfInterest = nil // clear this before starting the network so that we won't be sending a MOI upon connection. netA := wn @@ -1170,13 +1226,9 @@ func TestSlowPeerDisconnection(t *testing.T) { netB.Start() defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() - // Wait for peers to connect - for t := 0; t < 200; t++ { - if netA.NumPeers() > 0 && netB.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) var peers []*wsPeer peers, _ = netA.peerSnapshot(peers) @@ -1214,6 +1266,7 @@ func TestForceMessageRelaying(t *testing.T) { NetworkID: config.Devtestnet, } wn.setup() + wn.eventualReadyDelay = time.Second netA := wn netA.config.GossipFanout = 1 @@ -1242,6 +1295,11 @@ func TestForceMessageRelaying(t *testing.T) { netC.Start() defer func() { t.Log("stopping C"); netC.Stop(); t.Log("C done") }() + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + waitReady(t, netC, readyTimeout.C) + // send 5 messages from both netB and netC to netA for i := 0; i < 5; i++ { err := netB.Relay(context.Background(), protocol.TxnTag, []byte{1, 2, 3}, true, nil) @@ -1388,13 +1446,11 @@ func TestWebsocketNetworkTopicRoundtrip(t *testing.T) { }, }) - // Wait for peers to connect - for t := 0; t < 200; t++ { - if netA.NumPeers() > 0 && netB.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") peerA := netA.peers[0] @@ -1471,6 +1527,10 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) { MessageHandler: HandlerFunc(waitMessageArriveHandler), }}) + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + // have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag netB.Broadcast(context.Background(), protocol.MsgOfInterestTag, MarshallMessageOfInterest([]protocol.Tag{protocol.AgreementVoteTag, protocol.ProposalPayloadTag}), true, nil) // send another message which we can track, so that we'll know that the first message was delivered. @@ -1560,14 +1620,9 @@ func TestWebsocketDisconnection(t *testing.T) { netB.ClearHandlers() netB.RegisterHandlers(taggedHandlersB) - // Wait for peers to connect - for t := 0; t < 200; t++ { - if netA.NumPeers() > 0 && netB.NumPeers() > 0 { - break - } - time.Sleep(10 * time.Millisecond) - } - + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) netA.Broadcast(context.Background(), protocol.ProposalPayloadTag, []byte{0}, true, nil) // wait until the peers disconnect. for { @@ -1723,6 +1778,10 @@ func BenchmarkVariableTransactionMessageBlockSizes(t *testing.B) { netB.ClearHandlers() + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + waitReady(t, netB, readyTimeout.C) + highestRate := float64(1) sinceHighestRate := 0 rate := float64(0) From 38e56211dc02876722a2e08b481195e0b7964b5d Mon Sep 17 00:00:00 2001 From: algonautshant Date: Thu, 11 Mar 2021 21:18:37 -0500 Subject: [PATCH 094/215] stop pipelinedFetch if there are no peers. Message fixes --- catchup/service.go | 5 ++++- catchup/universalFetcher.go | 24 +++++------------------- 2 files changed, 9 insertions(+), 20 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index a77c31e071..bfe8cd6163 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -358,7 +358,10 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, }) } - + if _, err := peerSelector.GetNextPeer(); err == errPeerSelectorNoPeerPoolsAvailable { + return + } + // Invariant: len(taskCh) + (# pending writes to completed) <= N wg.Add(int(parallelRequests)) for i := uint64(0); i < parallelRequests; i++ { diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 3c2093272e..8f97fe1446 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -53,7 +53,7 @@ func makeUniversalBlockFetcher(log logging.Logger, net network.GossipNode, confi log: log} } -// FetchBlock returns a block from the peer. The peer can be either an http or ws peer. +// fetchBlock returns a block from the peer. The peer can be either an http or ws peer. func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, downloadDuration time.Duration, err error) { @@ -77,7 +77,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro fetchedBuf, err = fetcherClient.getBlockBytes(ctx, round) address = fetcherClient.address() } else { - return nil, nil, time.Duration(0), fmt.Errorf("FetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") + return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } if err != nil { return nil, nil, time.Duration(0), err @@ -93,17 +93,17 @@ func processBlockBytes(fetchedBuf []byte, r basics.Round, debugStr string) (blk var decodedEntry rpcs.EncodedBlockCert err = protocol.Decode(fetchedBuf, &decodedEntry) if err != nil { - err = fmt.Errorf("networkFetcher.FetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err) + err = fmt.Errorf("fetchBlock(%d): cannot decode block from peer %v: %v", r, debugStr, err) return } if decodedEntry.Block.Round() != r { - err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong block from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Block.Round()) + err = fmt.Errorf("fetchBlock(%d): got wrong block from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Block.Round()) return } if decodedEntry.Certificate.Round != r { - err = fmt.Errorf("networkFetcher.FetchBlock(%d): got wrong cert from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Certificate.Round) + err = fmt.Errorf("fetchBlock(%d): got wrong cert from peer %v: wanted %v, got %v", r, debugStr, r, decodedEntry.Certificate.Round) return } return &decodedEntry.Block, &decodedEntry.Certificate, nil @@ -270,17 +270,3 @@ func (hf *HTTPFetcher) address() string { return hf.rootURL } -// FetchBlock is a copy of the functionality in NetworkFetcher.FetchBlock, designed to complete -// the HTTPFetcher functionality as a standalone fetcher -func (hf *HTTPFetcher) FetchBlock(ctx context.Context, r basics.Round) (blk *bookkeeping.Block, cert *agreement.Certificate, err error) { - fetchedBuf, err := hf.getBlockBytes(ctx, r) - if err != nil { - err = fmt.Errorf("Peer %v: %v", hf.address(), err) - return - } - block, cert, err := processBlockBytes(fetchedBuf, r, hf.address()) - if err != nil { - return - } - return block, cert, nil -} From d896faee981133826432e9714887fa1dc3bc1628 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Fri, 12 Mar 2021 00:03:08 -0500 Subject: [PATCH 095/215] Fix test failure. debug message if no peers found. --- catchup/service.go | 3 ++- catchup/universalFetcher_test.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index bfe8cd6163..5aff54a328 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -359,9 +359,10 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { }) } if _, err := peerSelector.GetNextPeer(); err == errPeerSelectorNoPeerPoolsAvailable { + s.log.Debugf("pipelinedFetch: was unable to obtain a peer to retrieve the block from") return } - + // Invariant: len(taskCh) + (# pending writes to completed) <= N wg.Add(int(parallelRequests)) for i := uint64(0); i < parallelRequests; i++ { diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index dd9a820200..5c28681a70 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -123,7 +123,7 @@ func TestUGetBlockUnsupported(t *testing.T) { peer := "" block, cert, duration, err := fetcher.fetchBlock(context.Background(), 1, peer) require.Error(t, err) - require.Contains(t, err.Error(), "FetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") + require.Contains(t, err.Error(), "fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") require.Nil(t, block) require.Nil(t, cert) require.Equal(t, int64(duration), int64(0)) From 166214b5000ee21fa975d540232eaa24cf1370fb Mon Sep 17 00:00:00 2001 From: algonautshant Date: Fri, 12 Mar 2021 01:04:43 -0500 Subject: [PATCH 096/215] Add feedback to peerselector. Update Catchpoint service fetcher time duration computation. --- catchup/catchpointService.go | 4 +--- catchup/service.go | 35 +++++++++++++++++++++++------------ catchup/universalFetcher.go | 2 ++ 3 files changed, 26 insertions(+), 15 deletions(-) diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index 8cc06ffaab..043471a9a5 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -596,8 +596,7 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock: recurring non-HTTP peer was provided by the peer selector")) } fetcher := makeUniversalBlockFetcher(cs.log, cs.net, cs.config) - blockDownloadStartTime := time.Now() - blk, _, _, err = fetcher.fetchBlock(cs.ctx, round, httpPeer) + blk, _, downloadDuration, err = fetcher.fetchBlock(cs.ctx, round, httpPeer) if err != nil { if cs.ctx.Err() != nil { return nil, time.Duration(0), peer, true, cs.stopOrAbort() @@ -611,7 +610,6 @@ func (cs *CatchpointCatchupService) fetchBlock(round basics.Round, retryCount ui return nil, time.Duration(0), peer, true, cs.abort(fmt.Errorf("fetchBlock failed after multiple blocks download attempts")) } // success - downloadDuration = time.Now().Sub(blockDownloadStartTime) return blk, downloadDuration, peer, false, nil } diff --git a/catchup/service.go b/catchup/service.go index 5aff54a328..e98ec1d849 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -151,7 +151,7 @@ func (s *Service) SynchronizingTime() time.Duration { } // function scope to make a bunch of defer statements better -func (s *Service) innerFetch(r basics.Round, peerSelector *peerSelector) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { +func (s *Service) innerFetch(r basics.Round, peerSelector *peerSelector, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { ctx, cf := context.WithCancel(s.ctx) fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) defer cf() @@ -164,10 +164,6 @@ func (s *Service) innerFetch(r basics.Round, peerSelector *peerSelector) (blk *b cf() } }() - peer, err := peerSelector.GetNextPeer() - if err != nil { - return nil, nil, time.Duration(0), err - } return fetcher.fetchBlock(ctx, r, peer) } @@ -191,16 +187,18 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, return false } - // Try to fetch, timing out after retryInterval + peer, getPeerErr := peerSelector.GetNextPeer() + if getPeerErr != nil { + s.log.Debugf("fetchAndWrite: was unable to obtain a peer to retrieve the block from") + break + } - block, cert, _, err := s.innerFetch(r, peerSelector) + // Try to fetch, timing out after retryInterval + block, cert, blockDownloadDuration, err := s.innerFetch(r, peerSelector, peer) if err != nil { - if err == errPeerSelectorNoPeerPoolsAvailable { - s.log.Debugf("fetchAndWrite: was unable to obtain a peer to retrieve the block from") - break - } s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) + peerSelector.RankPeer(peer, peerRankDownloadFailed) // we've just failed to retrieve a block; wait until the previous block is fetched before trying again // to avoid the usecase where the first block doesn't exists and we're making many requests down the chain // for no reason. @@ -225,6 +223,7 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, // Check that the block's contents match the block header (necessary with an untrusted block because b.Hash() only hashes the header) if !block.ContentsMatchHeader() { + peerSelector.RankPeer(peer, peerRankInvalidDownload) // Check if this mismatch is due to an unsupported protocol version if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok { s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol) @@ -252,9 +251,13 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, err = s.auth.Authenticate(block, cert) if err != nil { s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err) + peerSelector.RankPeer(peer, peerRankInvalidDownload) continue // retry the fetch } + peerRank := peerSelector.PeerDownloadDurationToRank(peer, blockDownloadDuration) + peerSelector.RankPeer(peer, peerRank) + // Write to ledger, noting that ledger writes must be in order select { case <-s.ctx.Done(): @@ -588,8 +591,14 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy }) } for s.ledger.LastRound() < cert.Round { + peer, getPeerErr := peerSelector.GetNextPeer() + if getPeerErr != nil { + s.log.Debugf("fetchRound: was unable to obtain a peer to retrieve the block from") + break + } + // Ask the fetcher to get the block somehow - block, fetchedCert, _, err := s.innerFetch(cert.Round, peerSelector) + block, fetchedCert, _, err := s.innerFetch(cert.Round, peerSelector, peer) if err != nil { select { @@ -599,6 +608,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy default: } logging.Base().Warnf("fetchRound could not acquire block, fetcher errored out: %v", err) + peerSelector.RankPeer(peer, peerRankDownloadFailed) continue } @@ -608,6 +618,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy } // Otherwise, fetcher gave us the wrong block logging.Base().Warnf("fetcher gave us bad/wrong block (for round %d): fetched hash %v; want hash %v", cert.Round, block.Hash(), blockHash) + peerSelector.RankPeer(peer, peerRankInvalidDownload) // As a failsafe, if the cert we fetched is valid but for the wrong block, panic as loudly as possible if cert.Round == fetchedCert.Round && diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 8f97fe1446..1b36f47c87 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -59,6 +59,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro var fetchedBuf []byte var address string + blockDownloadStartTime := time.Now() if wsPeer, validWSPeer := peer.(network.UnicastPeer); validWSPeer { fetcherClient := &wsFetcherClient{ target: wsPeer, @@ -79,6 +80,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro } else { return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } + downloadDuration = time.Now().Sub(blockDownloadStartTime) if err != nil { return nil, nil, time.Duration(0), err } From 53cede8fec4b90e1b41763168347e0c96f1195ab Mon Sep 17 00:00:00 2001 From: algonautshant Date: Fri, 12 Mar 2021 13:04:35 -0500 Subject: [PATCH 097/215] remove unused argument --- catchup/service.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index e98ec1d849..2450ed4e86 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -151,7 +151,7 @@ func (s *Service) SynchronizingTime() time.Duration { } // function scope to make a bunch of defer statements better -func (s *Service) innerFetch(r basics.Round, peerSelector *peerSelector, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { +func (s *Service) innerFetch(r basics.Round, peer network.Peer) (blk *bookkeeping.Block, cert *agreement.Certificate, ddur time.Duration, err error) { ctx, cf := context.WithCancel(s.ctx) fetcher := makeUniversalBlockFetcher(s.log, s.net, s.cfg) defer cf() @@ -194,7 +194,7 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, } // Try to fetch, timing out after retryInterval - block, cert, blockDownloadDuration, err := s.innerFetch(r, peerSelector, peer) + block, cert, blockDownloadDuration, err := s.innerFetch(r, peer) if err != nil { s.log.Debugf("fetchAndWrite(%v): Could not fetch: %v (attempt %d)", r, err, i) @@ -598,7 +598,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy } // Ask the fetcher to get the block somehow - block, fetchedCert, _, err := s.innerFetch(cert.Round, peerSelector, peer) + block, fetchedCert, _, err := s.innerFetch(cert.Round, peer) if err != nil { select { From 8c6f565c79f5a590ab5528988ddfd2016a40f73c Mon Sep 17 00:00:00 2001 From: algonautshant Date: Mon, 15 Mar 2021 00:04:41 -0400 Subject: [PATCH 098/215] Fix and resotre the catchup service tests --- catchup/fetcher_test.go | 3 +- catchup/pref_test.go | 7 +- catchup/service_test.go | 203 ++++++++++++++++++++++--------- catchup/universalFetcher_test.go | 4 +- 4 files changed, 151 insertions(+), 66 deletions(-) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index 45178f787c..7140ca0848 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -169,7 +169,7 @@ func (df *dummyFetcher) Close() error { return nil } -func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) { +func buildTestLedger(t *testing.T, blk bookkeeping.Block) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) { var user basics.Address user[0] = 123 @@ -223,6 +223,7 @@ func buildTestLedger(t *testing.T) (ledger *data.Ledger, next basics.Round, b bo prev, err := ledger.Block(ledger.LastRound()) require.NoError(t, err) + b = blk b.BlockHeader.RewardsState.RewardsPool = poolAddr b.RewardsLevel = prev.RewardsLevel b.BlockHeader.Round = next diff --git a/catchup/pref_test.go b/catchup/pref_test.go index aa5cae5792..332cee6c70 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -20,7 +20,6 @@ import ( "math/rand" "strconv" "testing" - // "time" "github.com/stretchr/testify/require" @@ -57,12 +56,12 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { // Make Service syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil) - // syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int), latency: 100 * time.Millisecond, predictable: true}} b.StartTimer() - syncer.sync() + syncer.Start() b.StopTimer() - local.Close() + syncer.Stop() require.Equal(b, remote.LastRound(), local.LastRound()) + local.Close() } } diff --git a/catchup/service_test.go b/catchup/service_test.go index ae4b906ddf..09afbcedf2 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -20,7 +20,7 @@ import ( "context" "errors" "math/rand" - // "sync" + "sync" "testing" "time" @@ -28,9 +28,9 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/agreement" - // "github.com/algorand/go-algorand/components/mocks" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/committee" @@ -44,9 +44,6 @@ var defaultConfig = config.GetDefaultLocal() var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} - - - // Mocked Fetcher will mock UniversalFetcher type MockedFetcher struct { ledger Ledger @@ -130,9 +127,10 @@ func (auth *mockedAuthenticator) alter(errorRound int, fail bool) { func TestServiceFetchBlocksSameRange(t *testing.T) { // Make Ledgers - _, local := testingenv(t, 10) - - remote, _, blk, err := buildTestLedger(t) + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) + + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { t.Fatal(err) return @@ -140,7 +138,7 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { addBlocks(t, remote, blk, 10) // Create a network and block service - blockServiceConfig := config.GetDefaultLocal() + blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") @@ -151,7 +149,6 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { rootURL := nodeA.rootURL() net.addPeer(rootURL) - // Make Service syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) @@ -161,12 +158,12 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { require.Equal(t, rr, lr) } - func TestPeriodicSync(t *testing.T) { // Make Ledger - _, local := testingenv(t, 10) + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) - remote, _, blk, err := buildTestLedger(t) + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { t.Fatal(err) return @@ -174,7 +171,7 @@ func TestPeriodicSync(t *testing.T) { addBlocks(t, remote, blk, 10) // Create a network and block service - blockServiceConfig := config.GetDefaultLocal() + blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") @@ -185,12 +182,11 @@ func TestPeriodicSync(t *testing.T) { rootURL := nodeA.rootURL() net.addPeer(rootURL) - auth := &mockedAuthenticator{fail: true} initialLocalRound := local.LastRound() require.True(t, 0 == initialLocalRound) - // Make Service + // Make Service s := MakeService(logging.Base(), defaultConfig, net, local, auth, nil) s.deadlineTimeout = 2 * time.Second @@ -216,10 +212,11 @@ func TestPeriodicSync(t *testing.T) { func TestServiceFetchBlocksOneBlock(t *testing.T) { // Make Ledger numBlocks := 10 - _, local := testingenv(t, numBlocks) + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) lastRoundAtStart := local.LastRound() - remote, _, blk, err := buildTestLedger(t) + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { t.Fatal(err) return @@ -227,7 +224,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { addBlocks(t, remote, blk, numBlocks-1) // Create a network and block service - blockServiceConfig := config.GetDefaultLocal() + blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") @@ -264,7 +261,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { require.NoError(t, err) require.Equal(t, *block, localBlock) } -/* + func TestAbruptWrites(t *testing.T) { numberOfBlocks := 100 @@ -273,13 +270,32 @@ func TestAbruptWrites(t *testing.T) { } // Make Ledger - remote, local := testingenv(t, numberOfBlocks) + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) lastRound := local.LastRound() + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, numberOfBlocks-1) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + // Make Service - s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil) - s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) var wg sync.WaitGroup wg.Add(1) @@ -310,13 +326,33 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { if testing.Short() { numberOfBlocks = basics.Round(10) } - remote, local := testingenv(t, int(numberOfBlocks)) + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) + lastRoundAtStart := local.LastRound() + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, int(numberOfBlocks)-1) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + // Make Service - syncer := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil) - syncer.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} - fetcher := syncer.blockFetcherFactory.newBlockFetcher() + syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) + fetcher := makeUniversalBlockFetcher(logging.Base(), net, defaultConfig) // Start the service ( dummy ) syncer.testStart() @@ -329,7 +365,7 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { for i := basics.Round(1); i <= numberOfBlocks; i++ { // Get the same block we wrote - blk, _, _, err2 := fetcher.fetchBlock(context.Background(), i, nil) + blk, _, _, err2 := fetcher.fetchBlock(context.Background(), i, net.GetPeers()[0]) require.NoError(t, err2) // Check we wrote the correct block @@ -342,11 +378,33 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { func TestServiceFetchBlocksMalformed(t *testing.T) { // Make Ledger - _, local := testingenv(t, 10) + numBlocks := 10 + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) lastRoundAtStart := local.LastRound() + + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, numBlocks-1) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + // Make Service - s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) // Start the service ( dummy ) s.testStart() @@ -373,6 +431,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) { // i.e. rounds 1 and 2 may be simultaneously fetched. require.Less(t, int(local.LastRound()), 3) require.Equal(t, lastRoundRemote, int(remote.LastRound())) + remote.Ledger.Close() } // Test the interruption in "the rest" loop @@ -388,6 +447,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) { } require.Equal(t, lastRoundLocal, int(local.LastRound())) require.Equal(t, lastRoundRemote, int(remote.LastRound())) + remote.Ledger.Close() } // Test the interruption with short notice (less than @@ -411,6 +471,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) { // fetched. require.Less(t, int(local.LastRound()), lastRoundLocal+2) require.Equal(t, lastRoundRemote, int(remote.LastRound())) + remote.Ledger.Close() } // Test the interruption with short notice (less than @@ -442,6 +503,7 @@ func TestOnSwitchToUnSupportedProtocol(t *testing.T) { // ledger, round 8 will not be fetched. require.Equal(t, int(local.LastRound()), lastRoundLocal) require.Equal(t, lastRoundRemote, int(remote.LastRound())) + remote.Ledger.Close() } } @@ -450,7 +512,7 @@ func helperTestOnSwitchToUnSupportedProtocol( lastRoundRemote, lastRoundLocal, roundWithSwitchOn, - roundsToCopy int) (local, remote Ledger) { + roundsToCopy int) (Ledger, *data.Ledger) { // Make Ledger mRemote, mLocal := testingenvWithUpgrade(t, lastRoundRemote, roundWithSwitchOn, lastRoundLocal+1) @@ -460,22 +522,44 @@ func helperTestOnSwitchToUnSupportedProtocol( mLocal.blocks = append(mLocal.blocks, mRemote.blocks[r]) } - local = mLocal - remote = Ledger(mRemote) + local := mLocal + config := defaultConfig config.CatchupParallelBlocks = 2 + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) //mRemote.blocks[0]) + if err != nil { + t.Fatal(err) + return local, remote + } + for i := 1; i < lastRoundRemote; i++ { + blk.NextProtocolSwitchOn = mRemote.blocks[i].NextProtocolSwitchOn + blk.NextProtocol = mRemote.blocks[i].NextProtocol + addBlocks(t, remote, blk, 1) + blk.BlockHeader.Round++ + } + + // Create a network and block service + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(config, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + // Make Service - s := MakeService(logging.Base(), config, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: -1}, nil) + s := MakeService(logging.Base(), config, net, local, &mockedAuthenticator{errorRound: -1}, nil) s.deadlineTimeout = 2 * time.Second - s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} s.Start() defer s.Stop() <-s.done return local, remote } -*/ + const defaultRewardUnit = 1e6 type mockedLedger struct { @@ -581,23 +665,6 @@ func (m *mockedLedger) IsWritingCatchpointFile() bool { return false } -func testingenv(t testing.TB, numBlocks int) (ledger, emptyLedger Ledger) { - mLedger := new(mockedLedger) - mEmptyLedger := new(mockedLedger) - - var blk bookkeeping.Block - blk.CurrentProtocol = protocol.ConsensusCurrentVersion - mLedger.blocks = append(mLedger.blocks, blk) - mEmptyLedger.blocks = append(mEmptyLedger.blocks, blk) - - for i := 1; i <= numBlocks; i++ { - blk = bookkeeping.MakeBlock(blk.BlockHeader) - mLedger.blocks = append(mLedger.blocks, blk) - } - - return mLedger, mEmptyLedger -} - func testingenvWithUpgrade( t testing.TB, numBlocks, @@ -644,16 +711,35 @@ func (s *Service) testStart() { s.ctx, s.cancel = context.WithCancel(context.Background()) s.InitialSyncDone = make(chan struct{}) } -/* + func TestCatchupUnmatchedCertificate(t *testing.T) { // Make Ledger - remote, local := testingenv(t, 10) - + numBlocks := 10 + local := new(mockedLedger) + local.blocks = append(local.blocks, bookkeeping.Block{}) lastRoundAtStart := local.LastRound() + remote, _, blk, err := buildTestLedger(t, bookkeeping.Block{}) + if err != nil { + t.Fatal(err) + return + } + addBlocks(t, remote, blk, numBlocks-1) + + // Create a network and block service + blockServiceConfig := config.GetDefaultLocal() + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + // Make Service - s := MakeService(logging.Base(), defaultConfig, &mocks.MockNetwork{}, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) - s.blockFetcherFactory = &mockBlockFetcherFactory{mf: &MockedFetcher{ledger: remote, timeout: false, tries: make(map[basics.Round]int)}} + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) s.testStart() for roundNumber := 2; roundNumber < 10; roundNumber += 3 { pc := &PendingUnmatchedCertificate{ @@ -667,4 +753,3 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { s.syncCert(pc) } } -*/ diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index 5c28681a70..f8ca52c792 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -36,7 +36,7 @@ func TestUGetBlockWs(t *testing.T) { cfg := config.GetDefaultLocal() cfg.EnableCatchupFromArchiveServers = true - ledger, next, b, err := buildTestLedger(t) + ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { t.Fatal(err) return @@ -78,7 +78,7 @@ func TestUGetBlockHttp(t *testing.T) { cfg := config.GetDefaultLocal() cfg.EnableCatchupFromArchiveServers = true - ledger, next, b, err := buildTestLedger(t) + ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { t.Fatal(err) return From 6be9849ce4e048c2cdfc6bfb3637adccd06102fb Mon Sep 17 00:00:00 2001 From: algonautshant Date: Mon, 15 Mar 2021 14:22:09 -0400 Subject: [PATCH 099/215] Refactor createPeerSelector --- catchup/service.go | 77 +++++++++++++++++++++++----------------------- 1 file changed, 39 insertions(+), 38 deletions(-) diff --git a/catchup/service.go b/catchup/service.go index 2450ed4e86..b534723732 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -342,25 +342,8 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { close(completed) }() - var peerSelector *peerSelector - if s.cfg.NetAddress != "" { // Relay node - peerSelector = makePeerSelector( - s.net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}, - }) - } else { - peerSelector = makePeerSelector( - s.net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, - }) - } + peerSelector := s.createPeerSelector(true) + if _, err := peerSelector.GetNextPeer(); err == errPeerSelectorNoPeerPoolsAvailable { s.log.Debugf("pipelinedFetch: was unable to obtain a peer to retrieve the block from") return @@ -571,25 +554,7 @@ func (s *Service) syncCert(cert *PendingUnmatchedCertificate) { // TODO this doesn't actually use the digest from cert! func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.AsyncVoteVerifier) { blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest - var peerSelector *peerSelector - if s.cfg.NetAddress != "" { // Relay node - peerSelector = makePeerSelector( - s.net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers}, - }) - } else { - peerSelector = makePeerSelector( - s.net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}, - }) - } + peerSelector := s.createPeerSelector(false) for s.ledger.LastRound() < cert.Round { peer, getPeerErr := peerSelector.GetNextPeer() if getPeerErr != nil { @@ -688,3 +653,39 @@ func (s *Service) handleUnsupportedRound(nextUnsupportedRound basics.Round) { s.cancel() } } + +func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector { + var peerClasses []peerClass + if pipelineFetch { + if s.cfg.NetAddress != "" { // Relay node + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}, + } + } else { + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + } + } + } else { + if s.cfg.NetAddress != "" { // Relay node + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers}, + } + } else { + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}, + } + } + } + return makePeerSelector(s.net, peerClasses) +} From 3294c79c22151a69884add3724363efd2128cd36 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Wed, 17 Mar 2021 02:46:04 -0400 Subject: [PATCH 100/215] fix the benchmark test --- catchup/pref_test.go | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/catchup/pref_test.go b/catchup/pref_test.go index 332cee6c70..7337bec783 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -20,10 +20,10 @@ import ( "math/rand" "strconv" "testing" + "time" "github.com/stretchr/testify/require" - "github.com/algorand/go-algorand/components/mocks" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data" @@ -33,6 +33,7 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" + "github.com/algorand/go-algorand/rpcs" ) func BenchmarkServiceFetchBlocks(b *testing.B) { @@ -43,9 +44,17 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { require.NotNil(b, remote) require.NotNil(b, local) - - net := &mocks.MockNetwork{} - + + // Create a network and block service + net := &httpTestPeerSource{} + ls := rpcs.MakeBlockService(config.GetDefaultLocal(), remote, net, "test genesisID") + nodeA := basicRPCNode{} + nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) + nodeA.start() + defer nodeA.stop() + rootURL := nodeA.rootURL() + net.addPeer(rootURL) + cfg := config.GetDefaultLocal() cfg.Archival = true @@ -58,6 +67,12 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil) b.StartTimer() syncer.Start() + for w := 0; w < 1000; w++ { + if remote.LastRound() == local.LastRound() { + break + } + time.Sleep(10 * time.Millisecond) + } b.StopTimer() syncer.Stop() require.Equal(b, remote.LastRound(), local.LastRound()) From fec95aa6a3c47f1573a03f3b407dac68743bf703 Mon Sep 17 00:00:00 2001 From: algonautshant Date: Wed, 17 Mar 2021 01:52:56 -0400 Subject: [PATCH 101/215] sync-cert RequestConnectOutgoing instead of quitting --- catchup/service.go | 3 ++- catchup/universalFetcher.go | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/catchup/service.go b/catchup/service.go index b534723732..2b3a222e78 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -559,7 +559,8 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy peer, getPeerErr := peerSelector.GetNextPeer() if getPeerErr != nil { s.log.Debugf("fetchRound: was unable to obtain a peer to retrieve the block from") - break + s.net.RequestConnectOutgoing(true, s.ctx.Done()) + continue } // Ask the fetcher to get the block somehow diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 1b36f47c87..844b3a2d00 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -88,6 +88,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro if err != nil { return nil, nil, time.Duration(0), err } + uf.log.Debugf("fetchBlock: downloaded block %d from %s", uint64(round), address) return block, cert, downloadDuration, err } From a89d0a3d784a9edffbb98318350177b97936757c Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Thu, 18 Mar 2021 09:49:36 -0400 Subject: [PATCH 102/215] Simplify max check in loop --- daemon/algod/api/server/v2/handlers.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 2509761a19..e3b3edba49 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "io" + "math" "net/http" "time" @@ -583,11 +584,16 @@ func (v2 *Handlers) getPendingTransactions(ctx echo.Context, max *uint64, format RewardsPool: basics.Address{}, } + txnLimit := uint64(math.MaxUint64) + if max != nil && *max != 0 { + txnLimit = *max + } + // Convert transactions to msgp / json strings topTxns := make([]transactions.SignedTxn, 0) for _, txn := range txnPool { // break out if we've reached the max number of transactions - if max != nil && *max != 0 && uint64(len(topTxns)) >= *max { + if uint64(len(topTxns)) >= txnLimit { break } From f3c77f7522aff16dd43a28b15d584c474dfef019 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 18 Mar 2021 10:46:22 -0400 Subject: [PATCH 103/215] optimize allocation. --- util/bloom/bloom.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index 92a044c01e..637825d95b 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -29,14 +29,13 @@ type Filter struct { func New(sizeBits int, numHashes uint32, prefix uint32) *Filter { m := (sizeBits + 7) / 8 filter := Filter{ - numHashes: numHashes, - data: make([]byte, m), - preimageStagingBuffer: make([]byte, 0, 4+32), - hashStagingBuffer: make([]uint32, numHashes+3), + numHashes: numHashes, + data: make([]byte, m), + hashStagingBuffer: make([]uint32, numHashes+3), } binary.BigEndian.PutUint32(filter.prefix[:], prefix) + filter.preimageStagingBuffer = make([]byte, 0, len(filter.prefix)+32) copy(filter.preimageStagingBuffer, filter.prefix[:]) - filter.preimageStagingBuffer = filter.preimageStagingBuffer[:len(filter.prefix)] return &filter } @@ -131,10 +130,9 @@ func UnmarshalBinary(data []byte) (*Filter, error) { } copy(f.prefix[:], data[4:8]) f.data = data[8:] - f.preimageStagingBuffer = make([]byte, 0, 4+32) + f.preimageStagingBuffer = make([]byte, len(f.prefix), len(f.prefix)+32) f.hashStagingBuffer = make([]uint32, f.numHashes+3) copy(f.preimageStagingBuffer, f.prefix[:]) - f.preimageStagingBuffer = f.preimageStagingBuffer[:len(f.prefix)] return f, nil } From 5430a44e2b085237eed5d24392dada65261ebee5 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 18 Mar 2021 11:26:23 -0400 Subject: [PATCH 104/215] correct a bug and add a unit test. --- util/bloom/bloom.go | 2 +- util/bloom/bloom_test.go | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/util/bloom/bloom.go b/util/bloom/bloom.go index 637825d95b..eeffc88eff 100644 --- a/util/bloom/bloom.go +++ b/util/bloom/bloom.go @@ -34,7 +34,7 @@ func New(sizeBits int, numHashes uint32, prefix uint32) *Filter { hashStagingBuffer: make([]uint32, numHashes+3), } binary.BigEndian.PutUint32(filter.prefix[:], prefix) - filter.preimageStagingBuffer = make([]byte, 0, len(filter.prefix)+32) + filter.preimageStagingBuffer = make([]byte, len(filter.prefix), len(filter.prefix)+32) copy(filter.preimageStagingBuffer, filter.prefix[:]) return &filter } diff --git a/util/bloom/bloom_test.go b/util/bloom/bloom_test.go index dd59a8b9e3..abf4ff5c4f 100644 --- a/util/bloom/bloom_test.go +++ b/util/bloom/bloom_test.go @@ -350,3 +350,20 @@ func BenchmarkBloomFilterTest(b *testing.B) { bf.Test(dataset[x%bfElements]) } } + +// TestBloomFilterReferenceHash ensure that we generate a bloom filter in a consistent way. This is important since we want to ensure that +// this code is backward compatible. +func TestBloomFilterReferenceHash(t *testing.T) { + N := 3 + sizeBits, numHashes := Optimal(N, 0.01) + prefix := uint32(0x11223344) + bf := New(sizeBits, numHashes, prefix) + + for n := 0; n < N; n++ { + hash := crypto.Hash([]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)}) + bf.Set(hash[:]) + } + bytes, err := bf.MarshalBinary() + require.NoError(t, err) + require.Equal(t, []byte{0x0, 0x0, 0x0, 0x7, 0x11, 0x22, 0x33, 0x44, 0x62, 0xf0, 0xe, 0x2c, 0x8c}, bytes) +} From a13e06796771198de164e2677a997f00d9638d59 Mon Sep 17 00:00:00 2001 From: Nicholas Guo Date: Thu, 18 Mar 2021 09:46:27 -0700 Subject: [PATCH 105/215] change --- cmd/algoh/main.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go index 45cb0c9867..ef4afbb133 100644 --- a/cmd/algoh/main.go +++ b/cmd/algoh/main.go @@ -145,7 +145,6 @@ func main() { } err = cmd.Wait() if err != nil { - captureErrorLogs(algohConfig, errorOutput, output, absolutePath, true) reportErrorf("error waiting for algod: %v", err) } close(done) @@ -363,7 +362,7 @@ func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, ou func reportErrorf(format string, args ...interface{}) { fmt.Fprintf(os.Stderr, format, args...) - logging.Base().Fatalf(format, args...) + logging.Base().Warnf(format, args...) } func sendLogs() { From 11153e22cb35be6653016710277d66eb22ddff91 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Fri, 19 Mar 2021 13:54:02 -0400 Subject: [PATCH 106/215] copy GetPeerData()/SetPeerData() from txnsync branch --- network/wsNetwork.go | 24 ++++++++++++++++++++++++ network/wsPeer.go | 23 ++++++++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 3b3df0ce8f..e15bd8a8e3 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -201,6 +201,10 @@ type GossipNode interface { // SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. SubstituteGenesisID(rawURL string) string + + GetPeerData(peer Peer, key string) interface{} + + SetPeerData(peer Peer, key string, value interface{}) } // IncomingMessage represents a message arriving from some peer in our p2p network @@ -2039,6 +2043,26 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { } } +// GetPeerData returns the peer data associated with a particular key. +func (wn *WebsocketNetwork) GetPeerData(peer Peer, key string) interface{} { + switch p := peer.(type) { + case *wsPeer: + return p.getPeerData(key) + default: + return nil + } +} + +// SetPeerData sets the peer data associated with a particular key. +func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{}) { + switch p := peer.(type) { + case *wsPeer: + p.setPeerData(key, value) + default: + return + } +} + // NewWebsocketNetwork constructor for websockets based gossip network func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) { phonebook := MakePhonebook(config.ConnectionsRateLimitingCount, diff --git a/network/wsPeer.go b/network/wsPeer.go index ed51ec442c..5089be141e 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -209,6 +209,11 @@ type wsPeer struct { // throttledOutgoingConnection determines if this outgoing connection will be throttled bassed on it's // performance or not. Throttled connections are more likely to be short-lived connections. throttledOutgoingConnection bool + + // clientDataStore is a generic key/value store used to store client-side data entries associated with a particular peer. + clientDataStore map[string]interface{} + + clientDataStoreMu deadlock.Mutex } // HTTPPeer is what the opaque Peer might be. @@ -318,6 +323,7 @@ func (wp *wsPeer) init(config config.Local, sendBufferLength int) { atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano()) wp.responseChannels = make(map[uint64]chan *Response) wp.sendMessageTag = defaultSendMessageTags + wp.clientDataStore = make(map[string]interface{}) // processed is a channel that messageHandlerThread writes to // when it's done with one of our messages, so that we can queue @@ -789,6 +795,21 @@ func (wp *wsPeer) getAndRemoveResponseChannel(key uint64) (respChan chan *Respon defer wp.responseChannelsMutex.Unlock() respChan, found = wp.responseChannels[key] delete(wp.responseChannels, key) - return } + +func (wp *wsPeer) getPeerData(key string) interface{} { + wp.clientDataStoreMu.Lock() + defer wp.clientDataStoreMu.Unlock() + return wp.clientDataStore[key] +} + +func (wp *wsPeer) setPeerData(key string, value interface{}) { + wp.clientDataStoreMu.Lock() + defer wp.clientDataStoreMu.Unlock() + if value == nil { + delete(wp.clientDataStore, key) + } else { + wp.clientDataStore[key] = value + } +} From a88498024d91384c2cc2d5387f8fae1583b40c0c Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Fri, 19 Mar 2021 14:53:14 -0400 Subject: [PATCH 107/215] comments. basic unit test of SetPeerData()/GetPeerData() --- network/wsNetwork.go | 3 +++ network/wsNetwork_test.go | 36 ++++++++++++++++++++++++++++++++++++ network/wsPeer.go | 2 ++ 3 files changed, 41 insertions(+) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index e15bd8a8e3..ce07d24457 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -202,8 +202,11 @@ type GossipNode interface { // SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. SubstituteGenesisID(rawURL string) string + // GetPeerData returns a value stored by SetPeerData GetPeerData(peer Peer, key string) interface{} + // SetPeerData attaches a piece of data to a peer. + // Other services inside go-algorand may attach data to a peer that gets garbage collected when the peer is closed. SetPeerData(peer Peer, key string, value interface{}) } diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 3464ee750c..78b2f1471c 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -289,6 +289,42 @@ func TestWebsocketNetworkUnicast(t *testing.T) { } } +// Like a basic test, but really we just want to have SetPeerData()/GetPeerData() +func TestWebsocketPeerData(t *testing.T) { + netA := makeTestWebsocketNode(t) + netA.config.GossipFanout = 1 + netA.Start() + defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }() + netB := makeTestWebsocketNode(t) + netB.config.GossipFanout = 1 + addrA, postListen := netA.Address() + require.True(t, postListen) + t.Log(addrA) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.Start() + defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() + counter := newMessageCounter(t, 2) + netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) + + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") + + require.Equal(t, 1, len(netA.peers)) + require.Equal(t, 1, len(netA.GetPeers(PeersConnectedIn))) + peerB := netA.peers[0] + + require.Equal(t, nil, netA.GetPeerData(peerB, "not there")) + netA.SetPeerData(peerB, "foo", "bar") + require.Equal(t, "bar", netA.GetPeerData(peerB, "foo")) + netA.SetPeerData(peerB, "foo", "qux") + require.Equal(t, "qux", netA.GetPeerData(peerB, "foo")) + netA.SetPeerData(peerB, "foo", nil) + require.Equal(t, nil, netA.GetPeerData(peerB, "foo")) +} + // Set up two nodes, test that a.Broadcast is received by B, when B has no address. func TestWebsocketNetworkNoAddress(t *testing.T) { netA := makeTestWebsocketNode(t) diff --git a/network/wsPeer.go b/network/wsPeer.go index 5089be141e..27e109e44e 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -211,8 +211,10 @@ type wsPeer struct { throttledOutgoingConnection bool // clientDataStore is a generic key/value store used to store client-side data entries associated with a particular peer. + // Locked by clientDataStoreMu. clientDataStore map[string]interface{} + // clientDataStoreMu synchronizes access to clientDataStore clientDataStoreMu deadlock.Mutex } From 658c250d17fe28e6a1c2d544a6ddad58748c804a Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Fri, 19 Mar 2021 15:16:47 -0400 Subject: [PATCH 108/215] add go-swagger response annotations --- daemon/kmd/lib/kmdapi/responses.go | 188 +++++++++++++++++++++-------- 1 file changed, 140 insertions(+), 48 deletions(-) diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go index 8dd5e6c29d..db86456109 100644 --- a/daemon/kmd/lib/kmdapi/responses.go +++ b/daemon/kmd/lib/kmdapi/responses.go @@ -49,161 +49,253 @@ func (r APIV1ResponseEnvelope) GetError() error { // VersionsResponse is the response to `GET /versions` // friendly:VersionsResponse +// swagger:response VersionsResponse type VersionsResponse struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - Versions []string `json:"versions"` + //in: body + Body struct{ + _struct struct{} `codec:",omitempty,omitemptyarray"` + Versions []string `json:"versions"` + } + } // APIV1GETWalletsResponse is the response to `GET /v1/wallets` // friendly:ListWalletsResponse +// swagger:response ListWalletsResponse type APIV1GETWalletsResponse struct { - APIV1ResponseEnvelope - Wallets []APIV1Wallet `json:"wallets"` + // in: body + Body struct { + APIV1ResponseEnvelope + Wallets []APIV1Wallet `json:"wallets"` + } } // APIV1POSTWalletResponse is the response to `POST /v1/wallet` // friendly:CreateWalletResponse +// swagger:response CreateWalletResponse type APIV1POSTWalletResponse struct { - APIV1ResponseEnvelope - Wallet APIV1Wallet `json:"wallet"` + // in: body + Body struct{ + APIV1ResponseEnvelope + Wallet APIV1Wallet `json:"wallet"` + } + } // APIV1POSTWalletInitResponse is the response to `POST /v1/wallet/init` // friendly:InitWalletHandleTokenResponse +// swagger:response InitWalletHandleTokenResponse type APIV1POSTWalletInitResponse struct { - APIV1ResponseEnvelope - WalletHandleToken string `json:"wallet_handle_token"` + //in: body + Body struct{ + APIV1ResponseEnvelope + WalletHandleToken string `json:"wallet_handle_token"` + } + } // APIV1POSTWalletReleaseResponse is the response to `POST /v1/wallet/release` // friendly:ReleaseWalletHandleTokenResponse +// swagger:response ReleaseWalletHandleTokenResponse type APIV1POSTWalletReleaseResponse struct { - APIV1ResponseEnvelope + //in: body + Body APIV1ResponseEnvelope } // APIV1POSTWalletRenewResponse is the response to `POST /v1/wallet/renew` // friendly:RenewWalletHandleTokenResponse +// swagger:response RenewWalletHandleTokenResponse type APIV1POSTWalletRenewResponse struct { - APIV1ResponseEnvelope - WalletHandle APIV1WalletHandle `json:"wallet_handle"` + //in:body + Body struct{ + APIV1ResponseEnvelope + WalletHandle APIV1WalletHandle `json:"wallet_handle"` + } + } // APIV1POSTWalletRenameResponse is the response to `POST /v1/wallet/rename` // friendly:RenameWalletResponse +// swagger:response RenameWalletResponse type APIV1POSTWalletRenameResponse struct { - APIV1ResponseEnvelope - Wallet APIV1Wallet `json:"wallet"` + //in:body + Body struct{ + APIV1ResponseEnvelope + Wallet APIV1Wallet `json:"wallet"` + } + } // APIV1POSTWalletInfoResponse is the response to `POST /v1/wallet/info` // friendly:WalletInfoResponse +// swagger:response WalletInfoResponse type APIV1POSTWalletInfoResponse struct { - APIV1ResponseEnvelope - WalletHandle APIV1WalletHandle `json:"wallet_handle"` + //in:body + Body struct{ + APIV1ResponseEnvelope + WalletHandle APIV1WalletHandle `json:"wallet_handle"` + } + } // APIV1POSTMasterKeyExportResponse is the reponse to `POST /v1/master-key/export` // friendly:ExportMasterKeyResponse +// swagger:response ExportMasterKeyResponse type APIV1POSTMasterKeyExportResponse struct { - APIV1ResponseEnvelope - MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` + //in: body + Body struct{ + APIV1ResponseEnvelope + MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` + } + } // APIV1POSTKeyImportResponse is the repsonse to `POST /v1/key/import` // friendly:ImportKeyResponse +// swagger:response ImportKeyResponse type APIV1POSTKeyImportResponse struct { - APIV1ResponseEnvelope - Address string `json:"address"` + //in: body + Body struct{ + APIV1ResponseEnvelope + Address string `json:"address"` + } + } // APIV1POSTKeyExportResponse is the reponse to `POST /v1/key/export` // friendly:ExportKeyResponse +// swagger:response ExportKeyResponse type APIV1POSTKeyExportResponse struct { - APIV1ResponseEnvelope - PrivateKey APIV1PrivateKey `json:"private_key"` + //in: body + Body struct{ + APIV1ResponseEnvelope + PrivateKey APIV1PrivateKey `json:"private_key"` + } } // APIV1POSTKeyResponse is the response to `POST /v1/key` // friendly:GenerateKeyResponse +// swagger:response GenerateKeyResponse type APIV1POSTKeyResponse struct { - APIV1ResponseEnvelope - Address string `json:"address"` + //in: body + Body struct{ + APIV1ResponseEnvelope + Address string `json:"address"` + } + } // APIV1DELETEKeyResponse is the response to `DELETE /v1/key` // friendly:DeleteKeyResponse +// swagger:response DeleteKeyResponse type APIV1DELETEKeyResponse struct { - APIV1ResponseEnvelope + //in: body + Body APIV1ResponseEnvelope } // APIV1POSTKeyListResponse is the response to `POST /v1/key/list` // friendly:ListKeysResponse +// swagger:response ListKeysResponse type APIV1POSTKeyListResponse struct { - APIV1ResponseEnvelope - Addresses []string `json:"addresses"` + //in: body + Body struct{ + APIV1ResponseEnvelope + Addresses []string `json:"addresses"` + } } // APIV1POSTTransactionSignResponse is the repsonse to `POST /v1/transaction/sign` // friendly:SignTransactionResponse +// swagger:response SignTransactionResponse type APIV1POSTTransactionSignResponse struct { - APIV1ResponseEnvelope + //in: body + Body struct{ + APIV1ResponseEnvelope + // swagger:strfmt byte + SignedTransaction []byte `json:"signed_transaction"` + } - // swagger:strfmt byte - SignedTransaction []byte `json:"signed_transaction"` } // APIV1POSTProgramSignResponse is the repsonse to `POST /v1/data/sign` // friendly:SignProgramResponse +// swagger:response SignProgramResponse type APIV1POSTProgramSignResponse struct { - APIV1ResponseEnvelope + //in: body + Body struct{ + APIV1ResponseEnvelope + // swagger:strfmt byte + Signature []byte `json:"sig"` + } - // swagger:strfmt byte - Signature []byte `json:"sig"` } // APIV1POSTMultisigListResponse is the response to `POST /v1/multisig/list` // friendly:ListMultisigResponse +// swagger:response ListMultisigResponse type APIV1POSTMultisigListResponse struct { - APIV1ResponseEnvelope - Addresses []string `json:"addresses"` + //in: body + Body struct{ + APIV1ResponseEnvelope + Addresses []string `json:"addresses"` + } } // APIV1POSTMultisigImportResponse is the response to `POST /v1/multisig/import` // friendly:ImportMultisigResponse +// swagger:response ImportMultisigResponse type APIV1POSTMultisigImportResponse struct { - APIV1ResponseEnvelope - Address string `json:"address"` + //in: body + Body struct{ + APIV1ResponseEnvelope + Address string `json:"address"` + } + } // APIV1POSTMultisigExportResponse is the response to `POST /v1/multisig/export` // friendly:ExportMultisigResponse +// swagger:response ExportMultisigResponse type APIV1POSTMultisigExportResponse struct { - APIV1ResponseEnvelope - Version uint8 `json:"multisig_version"` - Threshold uint8 `json:"threshold"` - PKs []APIV1PublicKey `json:"pks"` + //in: body + Body struct{ + APIV1ResponseEnvelope + Version uint8 `json:"multisig_version"` + Threshold uint8 `json:"threshold"` + PKs []APIV1PublicKey `json:"pks"` + } + } // APIV1DELETEMultisigResponse is the response to POST /v1/multisig/delete` // friendly:DeleteMultisigResponse +// swagger:response DeleteMultisigResponse type APIV1DELETEMultisigResponse struct { - APIV1ResponseEnvelope + //in: body + Body APIV1ResponseEnvelope } // APIV1POSTMultisigTransactionSignResponse is the response to `POST /v1/multisig/sign` // friendly:SignMultisigResponse +// swagger:response SignMultisigResponse type APIV1POSTMultisigTransactionSignResponse struct { - APIV1ResponseEnvelope + //in: body + Body struct{ + APIV1ResponseEnvelope + // swagger:strfmt byte + Multisig []byte `json:"multisig"` + } - // swagger:strfmt byte - Multisig []byte `json:"multisig"` } // APIV1POSTMultisigProgramSignResponse is the response to `POST /v1/multisig/signdata` // friendly:SignProgramMultisigResponse +// swagger:response SignProgramMultisigResponse type APIV1POSTMultisigProgramSignResponse struct { - APIV1ResponseEnvelope - - // swagger:strfmt byte - Multisig []byte `json:"multisig"` + //in: body + Body struct{ + APIV1ResponseEnvelope + // swagger:strfmt byte + Multisig []byte `json:"multisig"` + } } From 582e458048eeebe7acf94d0bbb42947cebc6f7db Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Fri, 19 Mar 2021 16:15:43 -0400 Subject: [PATCH 109/215] wrap api response in swagger response --- daemon/kmd/lib/kmdapi/responses.go | 296 ++++++++++++++++------------- 1 file changed, 168 insertions(+), 128 deletions(-) diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go index db86456109..6742777532 100644 --- a/daemon/kmd/lib/kmdapi/responses.go +++ b/daemon/kmd/lib/kmdapi/responses.go @@ -49,253 +49,293 @@ func (r APIV1ResponseEnvelope) GetError() error { // VersionsResponse is the response to `GET /versions` // friendly:VersionsResponse -// swagger:response VersionsResponse type VersionsResponse struct { - //in: body - Body struct{ - _struct struct{} `codec:",omitempty,omitemptyarray"` - Versions []string `json:"versions"` - } + _struct struct{} `codec:",omitempty,omitemptyarray"` + Versions []string `json:"versions"` +} +//swagger:response VersionsResponse +type _VersionsResponse struct { + //in:body + Body *VersionsResponse } // APIV1GETWalletsResponse is the response to `GET /v1/wallets` // friendly:ListWalletsResponse -// swagger:response ListWalletsResponse type APIV1GETWalletsResponse struct { - // in: body - Body struct { - APIV1ResponseEnvelope - Wallets []APIV1Wallet `json:"wallets"` - } + APIV1ResponseEnvelope + Wallets []APIV1Wallet `json:"wallets"` +} + +//swagger:response ListWalletsResponse +type ListWalletsResponse struct { + //in: body + Body *APIV1GETWalletsResponse } // APIV1POSTWalletResponse is the response to `POST /v1/wallet` // friendly:CreateWalletResponse -// swagger:response CreateWalletResponse type APIV1POSTWalletResponse struct { - // in: body - Body struct{ - APIV1ResponseEnvelope - Wallet APIV1Wallet `json:"wallet"` - } + APIV1ResponseEnvelope + Wallet APIV1Wallet `json:"wallet"` +} +//swagger:response CreateWalletResponse +type CreateWalletResponse struct { + // in:body + Body *APIV1POSTWalletResponse } // APIV1POSTWalletInitResponse is the response to `POST /v1/wallet/init` // friendly:InitWalletHandleTokenResponse -// swagger:response InitWalletHandleTokenResponse type APIV1POSTWalletInitResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - WalletHandleToken string `json:"wallet_handle_token"` - } + APIV1ResponseEnvelope + WalletHandleToken string `json:"wallet_handle_token"` +} +//swagger:response InitWalletHandleTokenResponse +type InitWalletHandleTokenResponse struct { + // in:body + Body *APIV1POSTWalletInitResponse } // APIV1POSTWalletReleaseResponse is the response to `POST /v1/wallet/release` // friendly:ReleaseWalletHandleTokenResponse -// swagger:response ReleaseWalletHandleTokenResponse type APIV1POSTWalletReleaseResponse struct { - //in: body - Body APIV1ResponseEnvelope + APIV1ResponseEnvelope +} + +//swagger:response ReleaseWalletHandleTokenResponse +type ReleaseWalletHandleTokenResponse struct { + // in:body + Body *APIV1POSTWalletReleaseResponse } // APIV1POSTWalletRenewResponse is the response to `POST /v1/wallet/renew` // friendly:RenewWalletHandleTokenResponse -// swagger:response RenewWalletHandleTokenResponse type APIV1POSTWalletRenewResponse struct { - //in:body - Body struct{ - APIV1ResponseEnvelope - WalletHandle APIV1WalletHandle `json:"wallet_handle"` - } + APIV1ResponseEnvelope + WalletHandle APIV1WalletHandle `json:"wallet_handle"` +} +//swagger:response RenewWalletHandleTokenResponse +type RenewWalletHandleTokenResponse struct { + // in:body + Body *APIV1POSTWalletRenewResponse } // APIV1POSTWalletRenameResponse is the response to `POST /v1/wallet/rename` // friendly:RenameWalletResponse -// swagger:response RenameWalletResponse type APIV1POSTWalletRenameResponse struct { - //in:body - Body struct{ - APIV1ResponseEnvelope - Wallet APIV1Wallet `json:"wallet"` - } + APIV1ResponseEnvelope + Wallet APIV1Wallet `json:"wallet"` +} +//swagger:response RenameWalletResponse +type RenameWalletResponse struct { + // in:body + Body *APIV1POSTWalletRenameResponse } // APIV1POSTWalletInfoResponse is the response to `POST /v1/wallet/info` // friendly:WalletInfoResponse -// swagger:response WalletInfoResponse type APIV1POSTWalletInfoResponse struct { - //in:body - Body struct{ - APIV1ResponseEnvelope - WalletHandle APIV1WalletHandle `json:"wallet_handle"` - } + APIV1ResponseEnvelope + WalletHandle APIV1WalletHandle `json:"wallet_handle"` +} +//swagger:response WalletInfoResponse +type WalletInfoResponse struct { + // in:body + Body *APIV1POSTWalletInfoResponse } // APIV1POSTMasterKeyExportResponse is the reponse to `POST /v1/master-key/export` // friendly:ExportMasterKeyResponse -// swagger:response ExportMasterKeyResponse type APIV1POSTMasterKeyExportResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` - } + APIV1ResponseEnvelope + MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` +} +//swagger:response ExportMasterKeyResponse +type ExportMasterKeyResponse struct { + // in:body + Body *APIV1POSTMasterKeyExportResponse } // APIV1POSTKeyImportResponse is the repsonse to `POST /v1/key/import` // friendly:ImportKeyResponse -// swagger:response ImportKeyResponse type APIV1POSTKeyImportResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - Address string `json:"address"` - } + APIV1ResponseEnvelope + Address string `json:"address"` +} +//swagger:response ImportKeyResponse +type ImportKeyResponse struct { + // in:body + Body *APIV1POSTKeyImportResponse } // APIV1POSTKeyExportResponse is the reponse to `POST /v1/key/export` // friendly:ExportKeyResponse -// swagger:response ExportKeyResponse type APIV1POSTKeyExportResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - PrivateKey APIV1PrivateKey `json:"private_key"` - } + APIV1ResponseEnvelope + PrivateKey APIV1PrivateKey `json:"private_key"` +} + +//swagger:response ExportKeyResponse +type ExportKeyResponse struct { + // in:body + Body *ExportKeyResponse } // APIV1POSTKeyResponse is the response to `POST /v1/key` // friendly:GenerateKeyResponse -// swagger:response GenerateKeyResponse type APIV1POSTKeyResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - Address string `json:"address"` - } + APIV1ResponseEnvelope + Address string `json:"address"` +} +//swagger:response GenerateKeyResponse +type GenerateKeyResponse struct { + // in:body + Body *APIV1POSTKeyResponse } // APIV1DELETEKeyResponse is the response to `DELETE /v1/key` // friendly:DeleteKeyResponse -// swagger:response DeleteKeyResponse type APIV1DELETEKeyResponse struct { - //in: body - Body APIV1ResponseEnvelope + APIV1ResponseEnvelope +} + +//swagger:response DeleteKeyResponse +type DeleteKeyResponse struct { + // in:body + Body *APIV1DELETEKeyResponse } // APIV1POSTKeyListResponse is the response to `POST /v1/key/list` // friendly:ListKeysResponse -// swagger:response ListKeysResponse type APIV1POSTKeyListResponse struct { + APIV1ResponseEnvelope + Addresses []string `json:"addresses"` +} + +//swagger:response ListKeysResponse +type ListKeysResponse struct { //in: body - Body struct{ - APIV1ResponseEnvelope - Addresses []string `json:"addresses"` - } + Body *APIV1POSTKeyListResponse } // APIV1POSTTransactionSignResponse is the repsonse to `POST /v1/transaction/sign` // friendly:SignTransactionResponse -// swagger:response SignTransactionResponse type APIV1POSTTransactionSignResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - // swagger:strfmt byte - SignedTransaction []byte `json:"signed_transaction"` - } + APIV1ResponseEnvelope + + // swagger:strfmt byte + SignedTransaction []byte `json:"signed_transaction"` +} +//swagger:response SignTransactionResponse +type SignTransactionResponse struct { + // in:body + Body *APIV1POSTTransactionSignResponse } // APIV1POSTProgramSignResponse is the repsonse to `POST /v1/data/sign` // friendly:SignProgramResponse -// swagger:response SignProgramResponse type APIV1POSTProgramSignResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - // swagger:strfmt byte - Signature []byte `json:"sig"` - } + APIV1ResponseEnvelope + // swagger:strfmt byte + Signature []byte `json:"sig"` +} + +//swagger:response SignProgramResponse +type SignProgramResponse struct { + // in:body + Body *SignProgramResponse } // APIV1POSTMultisigListResponse is the response to `POST /v1/multisig/list` // friendly:ListMultisigResponse -// swagger:response ListMultisigResponse type APIV1POSTMultisigListResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - Addresses []string `json:"addresses"` - } + APIV1ResponseEnvelope + Addresses []string `json:"addresses"` +} + +//swagger:response ListMultisigResponse +type ListMultisigResponse struct { + // in:body + Body *ListMultisigResponse } // APIV1POSTMultisigImportResponse is the response to `POST /v1/multisig/import` // friendly:ImportMultisigResponse -// swagger:response ImportMultisigResponse type APIV1POSTMultisigImportResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - Address string `json:"address"` - } + APIV1ResponseEnvelope + Address string `json:"address"` +} +//swagger:response ImportMultisigResponse +type ImportMultisigResponse struct { + // in:body + Body *ImportMultisigResponse } // APIV1POSTMultisigExportResponse is the response to `POST /v1/multisig/export` // friendly:ExportMultisigResponse -// swagger:response ExportMultisigResponse type APIV1POSTMultisigExportResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - Version uint8 `json:"multisig_version"` - Threshold uint8 `json:"threshold"` - PKs []APIV1PublicKey `json:"pks"` - } + APIV1ResponseEnvelope + Version uint8 `json:"multisig_version"` + Threshold uint8 `json:"threshold"` + PKs []APIV1PublicKey `json:"pks"` +} +//swagger:response ExportMultisigResponse +type ExportMultisigResponse struct { + // in:body + Body *APIV1POSTMultisigExportResponse } // APIV1DELETEMultisigResponse is the response to POST /v1/multisig/delete` // friendly:DeleteMultisigResponse -// swagger:response DeleteMultisigResponse type APIV1DELETEMultisigResponse struct { - //in: body - Body APIV1ResponseEnvelope + APIV1ResponseEnvelope +} + +//swagger:response DeleteMultisigResponse +type DeleteMultisigResponse struct { + //in:body + Body *APIV1DELETEMultisigResponse } // APIV1POSTMultisigTransactionSignResponse is the response to `POST /v1/multisig/sign` // friendly:SignMultisigResponse -// swagger:response SignMultisigResponse type APIV1POSTMultisigTransactionSignResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - // swagger:strfmt byte - Multisig []byte `json:"multisig"` - } + APIV1ResponseEnvelope + // swagger:strfmt byte + Multisig []byte `json:"multisig"` +} + +//swagger:response SignMultisigResponse +type SignMultisigResponse struct { + // in:body + Body *APIV1POSTMultisigTransactionSignResponse } // APIV1POSTMultisigProgramSignResponse is the response to `POST /v1/multisig/signdata` // friendly:SignProgramMultisigResponse -// swagger:response SignProgramMultisigResponse type APIV1POSTMultisigProgramSignResponse struct { - //in: body - Body struct{ - APIV1ResponseEnvelope - // swagger:strfmt byte - Multisig []byte `json:"multisig"` - } + APIV1ResponseEnvelope + + // swagger:strfmt byte + Multisig []byte `json:"multisig"` +} + +//swagger:response SignProgramMultisigResponse +type SignProgramMultisigResponse struct { + // in:body + Body *APIV1POSTMultisigProgramSignResponse } From 15ef866eeec7eef8d9c5d1ce8c9cb492a7bc78df Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 19 Mar 2021 16:26:29 -0400 Subject: [PATCH 110/215] Avoid compact cert validation on non-validate/non-generate path. --- ledger/eval.go | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index 5f2e257137..2ea055e11a 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -304,7 +304,7 @@ func (cs *roundCowState) ConsensusParams() config.ConsensusParams { return cs.proto } -func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.CompactCertType, cert compactcert.Cert, atRound basics.Round) error { +func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.CompactCertType, cert compactcert.Cert, atRound basics.Round, validate bool) error { if certType != protocol.CompactCertBasic { return fmt.Errorf("compact cert type %d not supported", certType) } @@ -317,15 +317,18 @@ func (cs *roundCowState) compactCert(certRnd basics.Round, certType protocol.Com } proto := config.Consensus[certHdr.CurrentProtocol] - votersRnd := certRnd.SubSaturate(basics.Round(proto.CompactCertRounds)) - votersHdr, err := cs.blockHdr(votersRnd) - if err != nil { - return err - } - err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) - if err != nil { - return err + if validate { + votersRnd := certRnd.SubSaturate(basics.Round(proto.CompactCertRounds)) + votersHdr, err := cs.blockHdr(votersRnd) + if err != nil { + return err + } + + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + if err != nil { + return err + } } cs.setCompactCertNext(certRnd + basics.Round(proto.CompactCertRounds)) @@ -801,6 +804,14 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * } } + // in case of a CompactCertTx transaction, we want to validate it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of + // whether we're in validate/generate mode or not, however - given that this variable in only being used in these modes, it would be safe. + if (eval.validate || eval.generate) && txn.Txn.Type == protocol.CompactCertTx { + if err := cow.compactCert(txn.Txn.CertRound, txn.Txn.CertType, txn.Txn.Cert, txn.Txn.Header.FirstValid, eval.validate); err != nil { + return err + } + } + spec := transactions.SpecialAddresses{ FeeSink: eval.block.BlockHeader.FeeSink, RewardsPool: eval.block.BlockHeader.RewardsPool, @@ -928,7 +939,9 @@ func applyTransaction(tx transactions.Transaction, balances *roundCowState, eval err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, balances, &ad, evalParams, ctr) case protocol.CompactCertTx: - err = balances.compactCert(tx.CertRound, tx.CertType, tx.Cert, tx.Header.FirstValid) + // don't do anything in the case of compact certificate transaction. This transaction type is explicitly handled in transaction(), since + // we want to conduct the testing of it only in the case of a validation or generation. + // Note that this means that the cow's CompactCertNext field would not be updated unless we're in either generate or validate mode. default: err = fmt.Errorf("Unknown transaction type %v", tx.Type) From f0c93c1290aeeffc4e1bfa5b7b02e116d603075f Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 19 Mar 2021 16:36:32 -0400 Subject: [PATCH 111/215] Improve comments. --- ledger/eval.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/ledger/eval.go b/ledger/eval.go index 2ea055e11a..62dccce87e 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -804,8 +804,11 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * } } - // in case of a CompactCertTx transaction, we want to validate it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of + // in case of a CompactCertTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of // whether we're in validate/generate mode or not, however - given that this variable in only being used in these modes, it would be safe. + // The reason for making this into an exception is that during initialization time, the accounts update is "converting" the recent 320 blocks into deltas to + // be stored in memory. These deltas don't care about the compact certificate, and so we can improve the node load time. Additionally, it save us from + // performing the validation during catchup, which is another performance boost. if (eval.validate || eval.generate) && txn.Txn.Type == protocol.CompactCertTx { if err := cow.compactCert(txn.Txn.CertRound, txn.Txn.CertType, txn.Txn.Cert, txn.Txn.Header.FirstValid, eval.validate); err != nil { return err From f90b9e2e98bc9cf9e5ca5b74fc97db67dce450e9 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Fri, 19 Mar 2021 17:00:33 -0400 Subject: [PATCH 112/215] copy byteslices that will be modified by opcode --- data/transactions/logic/eval.go | 9 +++++++-- data/transactions/logic/eval_test.go | 15 ++++++++++++++- 2 files changed, 21 insertions(+), 3 deletions(-) diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 7809dd4ce8..e3b24ec43c 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -1924,11 +1924,14 @@ func opSetBit(cx *evalContext) { // we're thinking of the bits in the byte itself as // being big endian. So this looks "reversed" mask := byte(0x80) >> bitIdx + // Copy to avoid modifying shared slice + scratch := append([]byte(nil), target.Bytes...) if bit == uint64(1) { - target.Bytes[byteIdx] |= mask + scratch[byteIdx] |= mask } else { - target.Bytes[byteIdx] &^= mask + scratch[byteIdx] &^= mask } + cx.stack[pprev].Bytes = scratch } cx.stack = cx.stack[:prev] } @@ -1961,6 +1964,8 @@ func opSetByte(cx *evalContext) { cx.err = errors.New("setbyte index > byte length") return } + // Copy to avoid modifying shared slice + cx.stack[pprev].Bytes = append([]byte(nil), cx.stack[pprev].Bytes...) cx.stack[pprev].Bytes[cx.stack[prev].Uint] = byte(cx.stack[last].Uint) cx.stack = cx.stack[:prev] } diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 1fd7e64950..2efe54a297 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -3900,6 +3900,14 @@ func TestBits(t *testing.T) { testAccepts(t, "byte 0x0000; int 15; int 1; setbit; byte 0x0001; ==", 3) testAccepts(t, "int 0x0000; int 3; int 1; setbit; int 0x0008; ==", 3) testAccepts(t, "int 0x0000; int 12; int 1; setbit; int 0x1000; ==", 3) + + // These test that setbyte is not modifying a shared value. + // Since neither bytec nor dup copies, the first test is + // insufficient, the setbit changes the original constant (if + // it fails to copy). + testAccepts(t, "byte 0xfffff0; dup; int 21; int 1; setbit; byte 0xfffff4; ==; pop; byte 0xfffff0; ==", 3) + testAccepts(t, "byte 0xffff; byte 0xf0; concat; dup; int 21; int 1; setbit; byte 0xfffff4; ==; pop; byte 0xfffff0; ==", 3) + } func TestBytes(t *testing.T) { @@ -3914,8 +3922,13 @@ func TestBytes(t *testing.T) { testPanics(t, `byte "john"; int 4; getbyte; int 1; ==`, 3) // past end testAccepts(t, `byte "john"; int 2; int 105; setbyte; byte "join"; ==`, 3) - // dup makes copies, modifying one does not change the other + + // These test that setbyte is not modifying a shared value. + // Since neither bytec nor dup copies, the first test is + // insufficient, the setbyte changes the original constant (if + // it fails to copy). testAccepts(t, `byte "john"; dup; int 2; int 105; setbyte; pop; byte "john"; ==`, 3) + testAccepts(t, `byte "jo"; byte "hn"; concat; dup; int 2; int 105; setbyte; pop; byte "john"; ==`, 3) } func TestSwap(t *testing.T) { From f071adfda6adec10c2eaf6292878912aab9f2ec3 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 19 Mar 2021 21:47:33 -0400 Subject: [PATCH 113/215] update test & fix bug. --- ledger/acctupdates_test.go | 14 ++++++------ ledger/appcow_test.go | 2 +- ledger/cow.go | 4 ++-- ledger/ledgercore/statedelta.go | 3 ++- ledger/ledgercore/statedelta_test.go | 2 +- .../features/compactcert/compactcert_test.go | 22 +++++++++++++++++++ 6 files changed, 35 insertions(+), 12 deletions(-) diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 4731cc6614..3e9fb05702 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -372,7 +372,7 @@ func TestAcctUpdates(t *testing.T) { blk.RewardsLevel = rewardLevel blk.CurrentProtocol = protocol.ConsensusCurrentVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) delta.Accts.MergeAccounts(updates) delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables) au.newBlock(blk, delta) @@ -455,7 +455,7 @@ func TestAcctUpdatesFastUpdates(t *testing.T) { blk.RewardsLevel = rewardLevel blk.CurrentProtocol = protocol.ConsensusCurrentVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) delta.Accts.MergeAccounts(updates) au.newBlock(blk, delta) accts = append(accts, totals) @@ -544,7 +544,7 @@ func BenchmarkBalancesChanges(b *testing.B) { blk.RewardsLevel = rewardLevel blk.CurrentProtocol = protocolVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) delta.Accts.MergeAccounts(updates) au.newBlock(blk, delta) accts = append(accts, totals) @@ -671,7 +671,7 @@ func TestLargeAccountCountCatchpointGeneration(t *testing.T) { blk.RewardsLevel = rewardLevel blk.CurrentProtocol = testProtocolVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) delta.Accts.MergeAccounts(updates) au.newBlock(blk, delta) accts = append(accts, totals) @@ -832,7 +832,7 @@ func TestAcctUpdatesUpdatesCorrectness(t *testing.T) { blk.RewardsLevel = rewardLevel blk.CurrentProtocol = testProtocolVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, len(updates)) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, len(updates), 0) for addr, ad := range updates { delta.Accts.Upsert(addr, ad) } @@ -1498,7 +1498,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) { } blk.RewardsLevel = rewardLevel blk.CurrentProtocol = testProtocolVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) delta.Accts.MergeAccounts(updates) delta.Creatables = creatablesFromUpdates(base, updates, knownCreatables) au.newBlock(blk, delta) @@ -1607,7 +1607,7 @@ func TestCachesInitialization(t *testing.T) { blk.RewardsLevel = rewardLevel blk.CurrentProtocol = protocolVersion - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len()) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) delta.Accts.MergeAccounts(updates) ml.addMockBlock(blockEntry{block: blk}, delta) au.newBlock(blk, delta) diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go index 28a0cd6879..fce2491516 100644 --- a/ledger/appcow_test.go +++ b/ledger/appcow_test.go @@ -97,7 +97,7 @@ type modsData struct { func getCow(creatables []modsData) *roundCowState { cs := &roundCowState{ - mods: ledgercore.MakeStateDelta(&bookkeeping.BlockHeader{}, 0, 2), + mods: ledgercore.MakeStateDelta(&bookkeeping.BlockHeader{}, 0, 2, 0), proto: config.Consensus[protocol.ConsensusCurrentVersion], } for _, e := range creatables { diff --git a/ledger/cow.go b/ledger/cow.go index be95ad3f55..c351bb0edc 100644 --- a/ledger/cow.go +++ b/ledger/cow.go @@ -68,7 +68,7 @@ func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, prevTimest lookupParent: b, commitParent: nil, proto: config.Consensus[hdr.CurrentProtocol], - mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint), + mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint, 0), sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta), } } @@ -200,7 +200,7 @@ func (cb *roundCowState) child() *roundCowState { lookupParent: cb, commitParent: cb, proto: cb.proto, - mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, 1), + mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, 1, cb.mods.CompactCertNext), sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta), } } diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 650f3a547a..9bff85b7cb 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -91,7 +91,7 @@ type AccountDeltas struct { } // MakeStateDelta creates a new instance of StateDelta -func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int) StateDelta { +func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, compactCertNext basics.Round) StateDelta { return StateDelta{ Accts: AccountDeltas{ accts: make([]basics.BalanceRecord, 0, hint*2), @@ -103,6 +103,7 @@ func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int) Hdr: hdr, PrevTimestamp: prevTimestamp, initialTransactionsCount: hint, + CompactCertNext: compactCertNext, } } diff --git a/ledger/ledgercore/statedelta_test.go b/ledger/ledgercore/statedelta_test.go index 222c53feb0..8f3b81d91a 100644 --- a/ledger/ledgercore/statedelta_test.go +++ b/ledger/ledgercore/statedelta_test.go @@ -98,7 +98,7 @@ func BenchmarkMakeStateDelta(b *testing.B) { b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - MakeStateDelta(nil, 0, hint) + MakeStateDelta(nil, 0, hint, 0) } } diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go index 690e5d7100..b2a4d6afca 100644 --- a/test/e2e-go/features/compactcert/compactcert_test.go +++ b/test/e2e-go/features/compactcert/compactcert_test.go @@ -46,6 +46,7 @@ func TestCompactCerts(t *testing.T) { consensusParams.CompactCertVotersLookback = 2 consensusParams.CompactCertWeightThreshold = (1 << 32) * 30 / 100 consensusParams.CompactCertSecKQ = 128 + consensusParams.AgreementFilterTimeoutPeriod0 = 500 * time.Millisecond configurableConsensus[consensusVersion] = consensusParams var fixture fixtures.RestClientFixture @@ -56,9 +57,30 @@ func TestCompactCerts(t *testing.T) { restClient, err := fixture.NC.AlgodClient() r.NoError(err) + node0Client := fixture.GetLibGoalClientForNamedNode("Node0") + node0Wallet, err := node0Client.GetUnencryptedWalletHandle() + r.NoError(err) + node0AccountList, err := node0Client.ListAddresses(node0Wallet) + r.NoError(err) + node0Account := node0AccountList[0] + + node1Client := fixture.GetLibGoalClientForNamedNode("Node1") + node1Wallet, err := node1Client.GetUnencryptedWalletHandle() + r.NoError(err) + node1AccountList, err := node1Client.ListAddresses(node1Wallet) + r.NoError(err) + node1Account := node1AccountList[0] + var lastCertBlock v1.Block libgoal := fixture.LibGoalClient for rnd := uint64(1); rnd <= consensusParams.CompactCertRounds*4; rnd++ { + // send a dummy payment transaction. + minTxnFee, _, err := fixture.CurrentMinFeeAndBalance() + r.NoError(err) + + _, err = node0Client.SendPaymentFromUnencryptedWallet(node0Account, node1Account, minTxnFee, rnd, nil) + r.NoError(err) + fixture.WaitForRound(rnd, 30*time.Second) blk, err := libgoal.Block(rnd) r.NoError(err) From 60c2b0e0b882a4804eb8a67be425bf0b358c061d Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 19 Mar 2021 22:00:44 -0400 Subject: [PATCH 114/215] fix missing unit test --- ledger/txtail_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ledger/txtail_test.go b/ledger/txtail_test.go index efef1ffd0a..c314f3856a 100644 --- a/ledger/txtail_test.go +++ b/ledger/txtail_test.go @@ -58,7 +58,7 @@ func TestTxTailCheckdup(t *testing.T) { txleases := make(map[ledgercore.Txlease]basics.Round, 1) txleases[ledgercore.Txlease{Sender: basics.Address(crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(2)})), Lease: crypto.Hash([]byte{byte(rnd % 256), byte(rnd / 256), byte(3)})}] = rnd + leasevalidity - delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 1) + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, 1, 0) delta.Txids = txids delta.Txleases = txleases tail.newBlock(blk, delta) From 115ecf3fdc56b4b11efe6b5438df7184cde14e26 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 11:21:28 -0400 Subject: [PATCH 115/215] add syncronization to dumpLogger --- daemon/algod/deadlockLogger.go | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/daemon/algod/deadlockLogger.go b/daemon/algod/deadlockLogger.go index b7fdc38906..ffd5a38cd4 100644 --- a/daemon/algod/deadlockLogger.go +++ b/daemon/algod/deadlockLogger.go @@ -30,13 +30,29 @@ import ( type dumpLogger struct { logging.Logger *bytes.Buffer + bufferSync chan struct{} } func (logger *dumpLogger) dump() { - logger.Error(logger.String()) + logger.bufferSync <- struct{}{} + outBuffer := logger.String() + <-logger.bufferSync + logger.Error(outBuffer) } -var logger = dumpLogger{Logger: logging.Base(), Buffer: bytes.NewBuffer(make([]byte, 0))} +// we need to implement the io.Writer interface so that we can syncronize access to underlying buffer/ +func (logger *dumpLogger) Write(p []byte) (n int, err error) { + logger.bufferSync <- struct{}{} + n, err = logger.Buffer.Write(p) + <-logger.bufferSync + return +} + +var logger = &dumpLogger{ + Logger: logging.Base(), + Buffer: bytes.NewBuffer(make([]byte, 0)), + bufferSync: make(chan struct{}, 1), +} var deadlockPanic func() From 54ccfee275d2424f37e5a422d384479dfb99294b Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 12:54:24 -0400 Subject: [PATCH 116/215] disable few network function inlining to better understand failuire point. --- network/wsNetwork.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 3b3df0ce8f..5eab35be10 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -491,6 +491,7 @@ func (wn *WebsocketNetwork) Disconnect(node Peer) { } // Disconnect from a peer, probably due to protocol errors. +//go:noinline func (wn *WebsocketNetwork) disconnect(badnode Peer, reason disconnectReason) { if badnode == nil { return @@ -1522,6 +1523,7 @@ func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool { // checkExistingConnectionsNeedDisconnecting check to see if existing connection need to be dropped due to // performance issues and/or network being stalled. +//go:noinline func (wn *WebsocketNetwork) checkExistingConnectionsNeedDisconnecting() bool { // we already connected ( or connecting.. ) to GossipFanout peers. // get the actual peers. @@ -1565,6 +1567,7 @@ func (wn *WebsocketNetwork) checkExistingConnectionsNeedDisconnecting() bool { // checkNetworkAdvanceDisconnect is using the lastNetworkAdvance indicator to see if the network is currently "stuck". // if it's seems to be "stuck", a randomally picked peer would be disconnected. +//go:noinline func (wn *WebsocketNetwork) checkNetworkAdvanceDisconnect() bool { lastNetworkAdvance := wn.getLastNetworkAdvance() if time.Now().UTC().Sub(lastNetworkAdvance) < cliqueResolveInterval { @@ -1589,6 +1592,7 @@ func (wn *WebsocketNetwork) checkNetworkAdvanceDisconnect() bool { return true } +//go:noinline func (wn *WebsocketNetwork) getLastNetworkAdvance() time.Time { wn.lastNetworkAdvanceMu.Lock() defer wn.lastNetworkAdvanceMu.Unlock() @@ -1599,6 +1603,7 @@ func (wn *WebsocketNetwork) getLastNetworkAdvance() time.Time { // this is the only indication that we have that we haven't formed a clique, where all incoming messages // arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar // characteristics as with a watchdog timer. +//go:noinline func (wn *WebsocketNetwork) OnNetworkAdvance() { wn.lastNetworkAdvanceMu.Lock() defer wn.lastNetworkAdvanceMu.Unlock() From 7766a861d6a1c72c960f390f9fd4d48f49ef8db7 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 15:21:33 -0400 Subject: [PATCH 117/215] update response handlers. --- daemon/algod/api/server/v1/handlers/errors.go | 1 + daemon/algod/api/server/v1/handlers/handlers.go | 7 ++++++- daemon/algod/api/server/v2/errors.go | 1 + daemon/algod/api/server/v2/handlers.go | 7 ++++++- test/e2e-go/features/compactcert/compactcert_test.go | 2 +- 5 files changed, 15 insertions(+), 3 deletions(-) diff --git a/daemon/algod/api/server/v1/handlers/errors.go b/daemon/algod/api/server/v1/handlers/errors.go index 5ae037969c..c51443054d 100644 --- a/daemon/algod/api/server/v1/handlers/errors.go +++ b/daemon/algod/api/server/v1/handlers/errors.go @@ -32,6 +32,7 @@ var ( errFailedToGetAssetCreator = "failed to retrieve asset creator from the ledger" errFailedToGetAppCreator = "failed to retrieve app creator from the ledger" errAppDoesNotExist = "application does not exist" + errRequestedBlockRoundIsNotAvailable = "requested block for round %d is not available" errFailedRetrievingApp = "failed to retrieve application information" errFailedToParseAddress = "failed to parse the address" errFailedToParseTransaction = "failed to parse transaction" diff --git a/daemon/algod/api/server/v1/handlers/handlers.go b/daemon/algod/api/server/v1/handlers/handlers.go index 1e3b0f634d..3de04eef9e 100644 --- a/daemon/algod/api/server/v1/handlers/handlers.go +++ b/daemon/algod/api/server/v1/handlers/handlers.go @@ -1568,7 +1568,12 @@ func GetBlock(ctx lib.ReqContext, context echo.Context) { ledger := ctx.Node.Ledger() b, c, err := ledger.BlockCert(basics.Round(queryRound)) if err != nil { - lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log) + switch errt := err.(type) { + case ledgercore.ErrNoEntry: + lib.ErrorResponse(w, http.StatusNotFound, errt, fmt.Sprintf(errRequestedBlockRoundIsNotAvailable, queryRound), ctx.Log) + default: + lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log) + } return } diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go index bb1f8bbc16..c50a86ae24 100644 --- a/daemon/algod/api/server/v2/errors.go +++ b/daemon/algod/api/server/v2/errors.go @@ -34,6 +34,7 @@ var ( errTransactionNotFound = "could not find the transaction in the transaction pool or in the last 1000 confirmed rounds" errServiceShuttingDown = "operation aborted as server is shutting down" errRequestedRoundInUnsupportedRound = "requested round would reach only after the protocol upgrade which isn't supported" + errRequestedBlockRoundIsNotAvailable = "requested block for round %d is not available" errFailedToParseCatchpoint = "failed to parse catchpoint" errFailedToAbortCatchup = "failed to abort catchup : %v" errFailedToStartCatchup = "failed to start catchup : %v" diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index e3b3edba49..3293d1e0a9 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -165,7 +165,12 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge ledger := v2.Node.Ledger() block, _, err := ledger.BlockCert(basics.Round(round)) if err != nil { - return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) + switch errt := err.(type) { + case ledgercore.ErrNoEntry: + return notFound(ctx, errt, fmt.Sprintf(errRequestedBlockRoundIsNotAvailable, round), v2.Log) + default: + return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) + } } // Encoding wasn't working well without embedding "real" objects. diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go index b2a4d6afca..57ac4f57bc 100644 --- a/test/e2e-go/features/compactcert/compactcert_test.go +++ b/test/e2e-go/features/compactcert/compactcert_test.go @@ -83,7 +83,7 @@ func TestCompactCerts(t *testing.T) { fixture.WaitForRound(rnd, 30*time.Second) blk, err := libgoal.Block(rnd) - r.NoError(err) + r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) t.Logf("Round %d, block %v\n", rnd, blk) From 2ac40a6f08700d862b310e409adbc82b4339724a Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 21:53:26 -0400 Subject: [PATCH 118/215] make deadlockLogger thread-safe. --- daemon/algod/deadlockLogger.go | 81 ++++++++++++++++++++++++---------- daemon/algod/deadlock_test.go | 37 +++++++++++++++- 2 files changed, 92 insertions(+), 26 deletions(-) diff --git a/daemon/algod/deadlockLogger.go b/daemon/algod/deadlockLogger.go index b7fdc38906..313aa6661c 100644 --- a/daemon/algod/deadlockLogger.go +++ b/daemon/algod/deadlockLogger.go @@ -21,48 +21,81 @@ import ( "fmt" "os" "runtime" + "sync" "github.com/algorand/go-deadlock" "github.com/algorand/go-algorand/logging" ) -type dumpLogger struct { +type deadlockLogger struct { logging.Logger *bytes.Buffer + bufferSync chan struct{} + panic func() + reportDeadlock sync.Once } -func (logger *dumpLogger) dump() { - logger.Error(logger.String()) +// Panic is defined here just so we can emulate the usage of the deadlockLogger +func (logger *deadlockLogger) Panic() { + logger.Logger.Panic("potential deadlock detected") } -var logger = dumpLogger{Logger: logging.Base(), Buffer: bytes.NewBuffer(make([]byte, 0))} - -var deadlockPanic func() +// Write implements the io.Writer interface, ensuring that the write is syncronized. +func (logger *deadlockLogger) Write(p []byte) (n int, err error) { + logger.bufferSync <- struct{}{} + n, err = logger.Buffer.Write(p) + <-logger.bufferSync + return +} -func setupDeadlockLogger() { - deadlockPanic = func() { - logger.Panic("potential deadlock detected") +// captureCallstack captures the callstack and return a byte array of the output. +func (logger *deadlockLogger) captureCallstack() []byte { + // Capture all goroutine stacks + var buf []byte + bufferSize := 256 * 1024 + for { + buf = make([]byte, bufferSize) + if runtime.Stack(buf, true) < bufferSize { + break + } + bufferSize *= 2 } + return buf +} - deadlock.Opts.LogBuf = logger - deadlock.Opts.OnPotentialDeadlock = func() { +// onPotentialDeadlock is the handler to be used by the deadlock library. +func (logger *deadlockLogger) onPotentialDeadlock() { + // The deadlock reporting is done only once; this would prevent recursive deadlock issues. + // in practive, once we report the deadlock, we panic and abort anyway, so it won't be an issue. + logger.reportDeadlock.Do(func() { // Capture all goroutine stacks - var buf []byte - bufferSize := 256 * 1024 - for { - buf = make([]byte, bufferSize) - if runtime.Stack(buf, true) < bufferSize { - break - } - bufferSize *= 2 - } + buf := logger.captureCallstack() - // Run this code in a separate goroutine because it might grab locks. + logger.bufferSync <- struct{}{} + loggedString := logger.String() + <-logger.bufferSync + + fmt.Fprintln(os.Stderr, string(buf)) + + // logging the logged string to the logger has to happen in a separate go-routine, since the + // logger itself ( for instance, the CyclicLogWriter ) is using a mutex of it's own. go func() { - logger.dump() - fmt.Fprintln(os.Stderr, string(buf)) - deadlockPanic() + logger.Error(loggedString) + logger.panic() }() + }) +} + +func setupDeadlockLogger() *deadlockLogger { + logger := &deadlockLogger{ + Logger: logging.Base(), + Buffer: bytes.NewBuffer(make([]byte, 0)), + bufferSync: make(chan struct{}, 1), } + + logger.panic = logger.Panic + deadlock.Opts.LogBuf = logger + deadlock.Opts.OnPotentialDeadlock = logger.onPotentialDeadlock + return logger } diff --git a/daemon/algod/deadlock_test.go b/daemon/algod/deadlock_test.go index b38a161381..8df0baec54 100644 --- a/daemon/algod/deadlock_test.go +++ b/daemon/algod/deadlock_test.go @@ -35,10 +35,10 @@ func TestDeadlockLogging(t *testing.T) { logWriter := logging.MakeCyclicFileWriter(logFn, archiveFn, 65536, time.Hour) l.SetOutput(logWriter) - setupDeadlockLogger() + logger := setupDeadlockLogger() deadlockCh := make(chan struct{}) - deadlockPanic = func() { + logger.panic = func() { close(deadlockCh) } @@ -55,3 +55,36 @@ func TestDeadlockLogging(t *testing.T) { _ = <-deadlockCh } + +func TestDeadlockOnPotentialDeadlock(t *testing.T) { + logFn := fmt.Sprintf("/tmp/test.%s.%d.log", t.Name(), crypto.RandUint64()) + archiveFn := fmt.Sprintf("%s.archive", logFn) + + l := logging.Base() + logWriter := logging.MakeCyclicFileWriter(logFn, archiveFn, 65536, time.Hour) + l.SetOutput(logWriter) + + logger := setupDeadlockLogger() + + deadlockCh := make(chan struct{}) + logger.panic = func() { + close(deadlockCh) + } + + defer func() { + r := recover() + if r != nil { + fmt.Printf("Recovered: %v\n", r) + } + }() + + for linenum := 0; linenum < 10; linenum++ { + fmt.Fprintf(logger, "line %d", linenum) + } + logger.onPotentialDeadlock() + for linenum := 10; linenum < 20; linenum++ { + fmt.Fprintf(logger, "line %d", linenum) + } + + _ = <-deadlockCh +} From 7b38d3f5256ddc8a5fc79483d7c6e83245793457 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 22:00:32 -0400 Subject: [PATCH 119/215] rollback deadlockLogger changes. Moved to it's own PR. --- daemon/algod/deadlockLogger.go | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/daemon/algod/deadlockLogger.go b/daemon/algod/deadlockLogger.go index ffd5a38cd4..b7fdc38906 100644 --- a/daemon/algod/deadlockLogger.go +++ b/daemon/algod/deadlockLogger.go @@ -30,29 +30,13 @@ import ( type dumpLogger struct { logging.Logger *bytes.Buffer - bufferSync chan struct{} } func (logger *dumpLogger) dump() { - logger.bufferSync <- struct{}{} - outBuffer := logger.String() - <-logger.bufferSync - logger.Error(outBuffer) + logger.Error(logger.String()) } -// we need to implement the io.Writer interface so that we can syncronize access to underlying buffer/ -func (logger *dumpLogger) Write(p []byte) (n int, err error) { - logger.bufferSync <- struct{}{} - n, err = logger.Buffer.Write(p) - <-logger.bufferSync - return -} - -var logger = &dumpLogger{ - Logger: logging.Base(), - Buffer: bytes.NewBuffer(make([]byte, 0)), - bufferSync: make(chan struct{}, 1), -} +var logger = dumpLogger{Logger: logging.Base(), Buffer: bytes.NewBuffer(make([]byte, 0))} var deadlockPanic func() From e93625d48961989916dc10a8b3b1327729559fc2 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 22:08:01 -0400 Subject: [PATCH 120/215] rollback error code changes --- daemon/algod/api/server/v1/handlers/errors.go | 1 - daemon/algod/api/server/v1/handlers/handlers.go | 7 +------ daemon/algod/api/server/v2/errors.go | 1 - daemon/algod/api/server/v2/handlers.go | 7 +------ 4 files changed, 2 insertions(+), 14 deletions(-) diff --git a/daemon/algod/api/server/v1/handlers/errors.go b/daemon/algod/api/server/v1/handlers/errors.go index c51443054d..5ae037969c 100644 --- a/daemon/algod/api/server/v1/handlers/errors.go +++ b/daemon/algod/api/server/v1/handlers/errors.go @@ -32,7 +32,6 @@ var ( errFailedToGetAssetCreator = "failed to retrieve asset creator from the ledger" errFailedToGetAppCreator = "failed to retrieve app creator from the ledger" errAppDoesNotExist = "application does not exist" - errRequestedBlockRoundIsNotAvailable = "requested block for round %d is not available" errFailedRetrievingApp = "failed to retrieve application information" errFailedToParseAddress = "failed to parse the address" errFailedToParseTransaction = "failed to parse transaction" diff --git a/daemon/algod/api/server/v1/handlers/handlers.go b/daemon/algod/api/server/v1/handlers/handlers.go index 3de04eef9e..1e3b0f634d 100644 --- a/daemon/algod/api/server/v1/handlers/handlers.go +++ b/daemon/algod/api/server/v1/handlers/handlers.go @@ -1568,12 +1568,7 @@ func GetBlock(ctx lib.ReqContext, context echo.Context) { ledger := ctx.Node.Ledger() b, c, err := ledger.BlockCert(basics.Round(queryRound)) if err != nil { - switch errt := err.(type) { - case ledgercore.ErrNoEntry: - lib.ErrorResponse(w, http.StatusNotFound, errt, fmt.Sprintf(errRequestedBlockRoundIsNotAvailable, queryRound), ctx.Log) - default: - lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log) - } + lib.ErrorResponse(w, http.StatusInternalServerError, err, errFailedLookingUpLedger, ctx.Log) return } diff --git a/daemon/algod/api/server/v2/errors.go b/daemon/algod/api/server/v2/errors.go index c50a86ae24..bb1f8bbc16 100644 --- a/daemon/algod/api/server/v2/errors.go +++ b/daemon/algod/api/server/v2/errors.go @@ -34,7 +34,6 @@ var ( errTransactionNotFound = "could not find the transaction in the transaction pool or in the last 1000 confirmed rounds" errServiceShuttingDown = "operation aborted as server is shutting down" errRequestedRoundInUnsupportedRound = "requested round would reach only after the protocol upgrade which isn't supported" - errRequestedBlockRoundIsNotAvailable = "requested block for round %d is not available" errFailedToParseCatchpoint = "failed to parse catchpoint" errFailedToAbortCatchup = "failed to abort catchup : %v" errFailedToStartCatchup = "failed to start catchup : %v" diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index 3293d1e0a9..e3b3edba49 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -165,12 +165,7 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge ledger := v2.Node.Ledger() block, _, err := ledger.BlockCert(basics.Round(round)) if err != nil { - switch errt := err.(type) { - case ledgercore.ErrNoEntry: - return notFound(ctx, errt, fmt.Sprintf(errRequestedBlockRoundIsNotAvailable, round), v2.Log) - default: - return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) - } + return internalError(ctx, err, errFailedLookingUpLedger, v2.Log) } // Encoding wasn't working well without embedding "real" objects. From dc3c75cfe1af1a1fa9ad7c9f9bc1663f5b5ced48 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 22:09:30 -0400 Subject: [PATCH 121/215] rollback unwanted change --- test/e2e-go/features/compactcert/compactcert_test.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go index 57ac4f57bc..02c96cf56e 100644 --- a/test/e2e-go/features/compactcert/compactcert_test.go +++ b/test/e2e-go/features/compactcert/compactcert_test.go @@ -46,7 +46,6 @@ func TestCompactCerts(t *testing.T) { consensusParams.CompactCertVotersLookback = 2 consensusParams.CompactCertWeightThreshold = (1 << 32) * 30 / 100 consensusParams.CompactCertSecKQ = 128 - consensusParams.AgreementFilterTimeoutPeriod0 = 500 * time.Millisecond configurableConsensus[consensusVersion] = consensusParams var fixture fixtures.RestClientFixture From 81538381598ff5bdb1136897e1bb989c950c2e23 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 22:10:18 -0400 Subject: [PATCH 122/215] rollback network change. --- network/wsNetwork.go | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 5eab35be10..ce07d24457 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -201,6 +201,13 @@ type GossipNode interface { // SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID. SubstituteGenesisID(rawURL string) string + + // GetPeerData returns a value stored by SetPeerData + GetPeerData(peer Peer, key string) interface{} + + // SetPeerData attaches a piece of data to a peer. + // Other services inside go-algorand may attach data to a peer that gets garbage collected when the peer is closed. + SetPeerData(peer Peer, key string, value interface{}) } // IncomingMessage represents a message arriving from some peer in our p2p network @@ -491,7 +498,6 @@ func (wn *WebsocketNetwork) Disconnect(node Peer) { } // Disconnect from a peer, probably due to protocol errors. -//go:noinline func (wn *WebsocketNetwork) disconnect(badnode Peer, reason disconnectReason) { if badnode == nil { return @@ -1523,7 +1529,6 @@ func (wn *WebsocketNetwork) checkNewConnectionsNeeded() bool { // checkExistingConnectionsNeedDisconnecting check to see if existing connection need to be dropped due to // performance issues and/or network being stalled. -//go:noinline func (wn *WebsocketNetwork) checkExistingConnectionsNeedDisconnecting() bool { // we already connected ( or connecting.. ) to GossipFanout peers. // get the actual peers. @@ -1567,7 +1572,6 @@ func (wn *WebsocketNetwork) checkExistingConnectionsNeedDisconnecting() bool { // checkNetworkAdvanceDisconnect is using the lastNetworkAdvance indicator to see if the network is currently "stuck". // if it's seems to be "stuck", a randomally picked peer would be disconnected. -//go:noinline func (wn *WebsocketNetwork) checkNetworkAdvanceDisconnect() bool { lastNetworkAdvance := wn.getLastNetworkAdvance() if time.Now().UTC().Sub(lastNetworkAdvance) < cliqueResolveInterval { @@ -1592,7 +1596,6 @@ func (wn *WebsocketNetwork) checkNetworkAdvanceDisconnect() bool { return true } -//go:noinline func (wn *WebsocketNetwork) getLastNetworkAdvance() time.Time { wn.lastNetworkAdvanceMu.Lock() defer wn.lastNetworkAdvanceMu.Unlock() @@ -1603,7 +1606,6 @@ func (wn *WebsocketNetwork) getLastNetworkAdvance() time.Time { // this is the only indication that we have that we haven't formed a clique, where all incoming messages // arrive very quickly, but might be missing some votes. The usage of this call is expected to have similar // characteristics as with a watchdog timer. -//go:noinline func (wn *WebsocketNetwork) OnNetworkAdvance() { wn.lastNetworkAdvanceMu.Lock() defer wn.lastNetworkAdvanceMu.Unlock() @@ -2044,6 +2046,26 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { } } +// GetPeerData returns the peer data associated with a particular key. +func (wn *WebsocketNetwork) GetPeerData(peer Peer, key string) interface{} { + switch p := peer.(type) { + case *wsPeer: + return p.getPeerData(key) + default: + return nil + } +} + +// SetPeerData sets the peer data associated with a particular key. +func (wn *WebsocketNetwork) SetPeerData(peer Peer, key string, value interface{}) { + switch p := peer.(type) { + case *wsPeer: + p.setPeerData(key, value) + default: + return + } +} + // NewWebsocketNetwork constructor for websockets based gossip network func NewWebsocketNetwork(log logging.Logger, config config.Local, phonebookAddresses []string, genesisID string, networkID protocol.NetworkID) (wn *WebsocketNetwork, err error) { phonebook := MakePhonebook(config.ConnectionsRateLimitingCount, From ddc325b5baf511e77a8406b0fc1c8416f3336f5d Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 20 Mar 2021 23:30:00 -0400 Subject: [PATCH 123/215] better error comment --- test/e2e-go/features/compactcert/compactcert_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go index 02c96cf56e..e36fe36a56 100644 --- a/test/e2e-go/features/compactcert/compactcert_test.go +++ b/test/e2e-go/features/compactcert/compactcert_test.go @@ -148,5 +148,5 @@ func TestCompactCerts(t *testing.T) { } } - r.True(lastCertBlock.Round == consensusParams.CompactCertRounds*3) + r.Equalf(consensusParams.CompactCertRounds*3, lastCertBlock.Round, "the expected last certificate block wasn't the one that was observed") } From c225bdc7ca4d4414014691dd6e6441af14e0f915 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sun, 21 Mar 2021 11:28:03 -0400 Subject: [PATCH 124/215] update per reviewer comments --- daemon/algod/deadlockLogger.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/algod/deadlockLogger.go b/daemon/algod/deadlockLogger.go index 313aa6661c..ed51395920 100644 --- a/daemon/algod/deadlockLogger.go +++ b/daemon/algod/deadlockLogger.go @@ -50,7 +50,7 @@ func (logger *deadlockLogger) Write(p []byte) (n int, err error) { } // captureCallstack captures the callstack and return a byte array of the output. -func (logger *deadlockLogger) captureCallstack() []byte { +func captureCallstack() []byte { // Capture all goroutine stacks var buf []byte bufferSize := 256 * 1024 @@ -70,7 +70,7 @@ func (logger *deadlockLogger) onPotentialDeadlock() { // in practive, once we report the deadlock, we panic and abort anyway, so it won't be an issue. logger.reportDeadlock.Do(func() { // Capture all goroutine stacks - buf := logger.captureCallstack() + buf := captureCallstack() logger.bufferSync <- struct{}{} loggedString := logger.String() From 2f86cf3f1bfb3938d91e46468cb68a0ca0ed9f31 Mon Sep 17 00:00:00 2001 From: algoidan Date: Mon, 22 Mar 2021 10:55:17 +0200 Subject: [PATCH 125/215] e2e tests can now run kmd with allow_unsafe_scrypt = true which will speed up tests. this will be used on ARM machines. --- test/scripts/e2e.sh | 17 ++++++++++++-- test/scripts/e2e_client_runner.py | 37 +++++++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index 2ec92c2b8e..3a075b99aa 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -45,6 +45,19 @@ TEST_RUN_ID=$(${SCRIPT_PATH}/testrunid.py) export TEMPDIR=${SRCROOT}/tmp/out/e2e/${TEST_RUN_ID} echo "Test output can be found in ${TEMPDIR}" + +# some ARM64 testing machines have memory issues which cause some tests to fail . +# thus, on those platforms we launch kmd with unsafe_scrypt = true to speed up the tests. +RUN_KMD_IN_UNSAFE_SCRYPT="" +ARCHTYPE=$("${SRCROOT}/scripts/archtype.sh") + +echo "ARCHTYPE: ${ARCHTYPE}" +if [[ "${ARCHTYPE}" = arm* ]]; then + RUN_KMD_IN_UNSAFE_SCRYPT="--unsafe_scrypt" +fi + +echo "RUN_KMD_IN_UNSAFE_SCRYPT = ${RUN_KMD_IN_UNSAFE_SCRYPT}" + export BINDIR=${TEMPDIR}/bin export DATADIR=${TEMPDIR}/data @@ -85,9 +98,9 @@ python3 -m venv "${TEMPDIR}/ve" . "${TEMPDIR}/ve/bin/activate" "${TEMPDIR}/ve/bin/pip3" install --upgrade pip "${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography -"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py "$SRCROOT"/test/scripts/e2e_subs/*.sh +"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_IN_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do - "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py --version "$(basename "$vdir")" "$vdir"/*.sh + "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_IN_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh done deactivate diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py index 39f4885b8f..7a28cddd25 100755 --- a/test/scripts/e2e_client_runner.py +++ b/test/scripts/e2e_client_runner.py @@ -69,6 +69,33 @@ def read_script_for_timeout(fname): logger.debug('read timeout match err', exc_info=True) return 200 + +def create_kmd_config_with_unsafe_scrypt(working_dir): + + kmd_config_dir = os.path.join(working_dir,"kmd-v0.5") + with open(os.path.join(kmd_config_dir,"kmd_config.json.example")) as f: + kmd_conf_data = json.load(f) + if "drivers" not in kmd_conf_data: + raise Exception("kmd_conf example does not contian drivers attribute") + if "sqlite" not in kmd_conf_data["drivers"]: + raise Exception("kmd_conf example does not contian sqlite attribute") + if "allow_unsafe_scrypt" not in kmd_conf_data["drivers"]["sqlite"]: + raise Exception("kmd_conf example does not contian allow_unsafe_scrypt attribute") + if "scrypt" not in kmd_conf_data["drivers"]["sqlite"]: + raise Exception("kmd_conf example does not contian scrypt attribute") + if "scrypt_n" not in kmd_conf_data["drivers"]["sqlite"]["scrypt"]: + raise Exception("kmd_conf example does not contian scrypt_n attribute") + if "scrypt_r" not in kmd_conf_data["drivers"]["sqlite"]["scrypt"]: + raise Exception("kmd_conf example does not contian scrypt_r attribute") + + kmd_conf_data["drivers"]["sqlite"]["allow_unsafe_scrypt"] = True + kmd_conf_data["drivers"]["sqlite"]["scrypt"]["scrypt_n"] = 4096 + with open(os.path.join(kmd_config_dir,"kmd_config.json"),"w") as f: + json.dump(kmd_conf_data,f) + + + + def _script_thread_inner(runset, scriptname): start = time.time() algod, kmd = runset.connect() @@ -190,8 +217,11 @@ def connect(self): def _connect(self): if self.algod and self.kmd: return + + # should run from inside self.lock algodata = self.env['ALGORAND_DATA'] + xrun(['goal', 'kmd', 'start', '-t', '3600','-d', algodata], env=self.env, timeout=5) self.kmd = openkmd(algodata) self.algod = openalgod(algodata) @@ -374,6 +404,8 @@ def main(): ap.add_argument('--timeout', default=500, type=int, help='integer seconds to wait for the scripts to run') ap.add_argument('--verbose', default=False, action='store_true') ap.add_argument('--version', default="Future") + ap.add_argument('--unsafe_scrypt', default=False, action='store_true', help="allows kmd to run with unsafe scrypt attribute. This will speed up tests time") + args = ap.parse_args() if args.verbose: @@ -408,6 +440,11 @@ def main(): env['ALGORAND_DATA'] = os.path.join(netdir, 'Node') env['ALGORAND_DATA2'] = os.path.join(netdir, 'Primary') + if args.unsafe_scrypt: + create_kmd_config_with_unsafe_scrypt(env['ALGORAND_DATA']) + create_kmd_config_with_unsafe_scrypt(env['ALGORAND_DATA2']) + + xrun(['goal', '-v'], env=env, timeout=5) xrun(['goal', 'node', 'status'], env=env, timeout=5) From b8ef4a4fde4e539a2c00fc5df15b6c6ce6b24e7e Mon Sep 17 00:00:00 2001 From: algoidan Date: Mon, 22 Mar 2021 14:48:19 +0200 Subject: [PATCH 126/215] vars name change --- test/scripts/e2e.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index 3a075b99aa..a24229ab3b 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -48,15 +48,15 @@ echo "Test output can be found in ${TEMPDIR}" # some ARM64 testing machines have memory issues which cause some tests to fail . # thus, on those platforms we launch kmd with unsafe_scrypt = true to speed up the tests. -RUN_KMD_IN_UNSAFE_SCRYPT="" -ARCHTYPE=$("${SRCROOT}/scripts/archtype.sh") +RUN_KMD_WITH_UNSAFE_SCRYPT="" +PLATFORM_ARCHTYPE=$("${SRCROOT}/scripts/archtype.sh") -echo "ARCHTYPE: ${ARCHTYPE}" -if [[ "${ARCHTYPE}" = arm* ]]; then - RUN_KMD_IN_UNSAFE_SCRYPT="--unsafe_scrypt" +echo "ARCHTYPE: ${PLATFORM_ARCHTYPE}" +if [[ "${PLATFORM_ARCHTYPE}" = arm* ]]; then + RUN_KMD_WITH_UNSAFE_SCRYPT="--unsafe_scrypt" fi -echo "RUN_KMD_IN_UNSAFE_SCRYPT = ${RUN_KMD_IN_UNSAFE_SCRYPT}" +echo "RUN_KMD_WITH_UNSAFE_SCRYPT = ${RUN_KMD_IN_UNSAFE_SCRYPT}" export BINDIR=${TEMPDIR}/bin export DATADIR=${TEMPDIR}/data From 5bd2089a4c292238cb638c771f3a8f9487814ae3 Mon Sep 17 00:00:00 2001 From: algoidan Date: Mon, 22 Mar 2021 15:15:52 +0200 Subject: [PATCH 127/215] fixing typo --- test/scripts/e2e.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index a24229ab3b..1990439963 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -56,7 +56,7 @@ if [[ "${PLATFORM_ARCHTYPE}" = arm* ]]; then RUN_KMD_WITH_UNSAFE_SCRYPT="--unsafe_scrypt" fi -echo "RUN_KMD_WITH_UNSAFE_SCRYPT = ${RUN_KMD_IN_UNSAFE_SCRYPT}" +echo "RUN_KMD_WITH_UNSAFE_SCRYPT = ${RUN_KMD_WITH_UNSAFE_SCRYPT}" export BINDIR=${TEMPDIR}/bin export DATADIR=${TEMPDIR}/data @@ -98,9 +98,9 @@ python3 -m venv "${TEMPDIR}/ve" . "${TEMPDIR}/ve/bin/activate" "${TEMPDIR}/ve/bin/pip3" install --upgrade pip "${TEMPDIR}/ve/bin/pip3" install --upgrade py-algorand-sdk cryptography -"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_IN_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh +"${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} "$SRCROOT"/test/scripts/e2e_subs/*.sh for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do - "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_IN_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh + "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh done deactivate From 51e2061f50aa4de20dc2e913590b2578d0a80a22 Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Mon, 22 Mar 2021 11:54:25 -0400 Subject: [PATCH 128/215] fix linting errors --- daemon/kmd/lib/kmdapi/responses.go | 96 +++++++++++++++--------------- 1 file changed, 48 insertions(+), 48 deletions(-) diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go index 6742777532..93529df47f 100644 --- a/daemon/kmd/lib/kmdapi/responses.go +++ b/daemon/kmd/lib/kmdapi/responses.go @@ -54,8 +54,8 @@ type VersionsResponse struct { Versions []string `json:"versions"` } -//swagger:response VersionsResponse -type _VersionsResponse struct { +// swagger:response versionsResponse +type versionsResponse struct { //in:body Body *VersionsResponse } @@ -67,8 +67,8 @@ type APIV1GETWalletsResponse struct { Wallets []APIV1Wallet `json:"wallets"` } -//swagger:response ListWalletsResponse -type ListWalletsResponse struct { +// swagger:response listWalletsResponse +type listWalletsResponse struct { //in: body Body *APIV1GETWalletsResponse } @@ -80,8 +80,8 @@ type APIV1POSTWalletResponse struct { Wallet APIV1Wallet `json:"wallet"` } -//swagger:response CreateWalletResponse -type CreateWalletResponse struct { +// swagger:response createWalletResponse +type createWalletResponse struct { // in:body Body *APIV1POSTWalletResponse } @@ -93,8 +93,8 @@ type APIV1POSTWalletInitResponse struct { WalletHandleToken string `json:"wallet_handle_token"` } -//swagger:response InitWalletHandleTokenResponse -type InitWalletHandleTokenResponse struct { +// swagger:response initWalletHandleTokenResponse +type initWalletHandleTokenResponse struct { // in:body Body *APIV1POSTWalletInitResponse } @@ -105,8 +105,8 @@ type APIV1POSTWalletReleaseResponse struct { APIV1ResponseEnvelope } -//swagger:response ReleaseWalletHandleTokenResponse -type ReleaseWalletHandleTokenResponse struct { +// swagger:response releaseWalletHandleTokenResponse +type releaseWalletHandleTokenResponse struct { // in:body Body *APIV1POSTWalletReleaseResponse } @@ -118,8 +118,8 @@ type APIV1POSTWalletRenewResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } -//swagger:response RenewWalletHandleTokenResponse -type RenewWalletHandleTokenResponse struct { +// swagger:response renewWalletHandleTokenResponse +type renewWalletHandleTokenResponse struct { // in:body Body *APIV1POSTWalletRenewResponse } @@ -131,8 +131,8 @@ type APIV1POSTWalletRenameResponse struct { Wallet APIV1Wallet `json:"wallet"` } -//swagger:response RenameWalletResponse -type RenameWalletResponse struct { +// swagger:response renameWalletResponse +type renameWalletResponse struct { // in:body Body *APIV1POSTWalletRenameResponse } @@ -144,8 +144,8 @@ type APIV1POSTWalletInfoResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } -//swagger:response WalletInfoResponse -type WalletInfoResponse struct { +// swagger:response walletInfoResponse +type walletInfoResponse struct { // in:body Body *APIV1POSTWalletInfoResponse } @@ -157,8 +157,8 @@ type APIV1POSTMasterKeyExportResponse struct { MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` } -//swagger:response ExportMasterKeyResponse -type ExportMasterKeyResponse struct { +// swagger:response exportMasterKeyResponse +type exportMasterKeyResponse struct { // in:body Body *APIV1POSTMasterKeyExportResponse } @@ -170,8 +170,8 @@ type APIV1POSTKeyImportResponse struct { Address string `json:"address"` } -//swagger:response ImportKeyResponse -type ImportKeyResponse struct { +// swagger:response importKeyResponse +type importKeyResponse struct { // in:body Body *APIV1POSTKeyImportResponse } @@ -183,10 +183,10 @@ type APIV1POSTKeyExportResponse struct { PrivateKey APIV1PrivateKey `json:"private_key"` } -//swagger:response ExportKeyResponse -type ExportKeyResponse struct { +// swagger:response exportKeyResponse +type exportKeyResponse struct { // in:body - Body *ExportKeyResponse + Body *APIV1POSTKeyExportResponse } // APIV1POSTKeyResponse is the response to `POST /v1/key` @@ -196,8 +196,8 @@ type APIV1POSTKeyResponse struct { Address string `json:"address"` } -//swagger:response GenerateKeyResponse -type GenerateKeyResponse struct { +// swagger:response generateKeyResponse +type generateKeyResponse struct { // in:body Body *APIV1POSTKeyResponse } @@ -208,8 +208,8 @@ type APIV1DELETEKeyResponse struct { APIV1ResponseEnvelope } -//swagger:response DeleteKeyResponse -type DeleteKeyResponse struct { +// swagger:response deleteKeyResponse +type deleteKeyResponse struct { // in:body Body *APIV1DELETEKeyResponse } @@ -221,8 +221,8 @@ type APIV1POSTKeyListResponse struct { Addresses []string `json:"addresses"` } -//swagger:response ListKeysResponse -type ListKeysResponse struct { +// swagger:response listKeysResponse +type listKeysResponse struct { //in: body Body *APIV1POSTKeyListResponse } @@ -236,8 +236,8 @@ type APIV1POSTTransactionSignResponse struct { SignedTransaction []byte `json:"signed_transaction"` } -//swagger:response SignTransactionResponse -type SignTransactionResponse struct { +// swagger:response signTransactionResponse +type signTransactionResponse struct { // in:body Body *APIV1POSTTransactionSignResponse } @@ -251,10 +251,10 @@ type APIV1POSTProgramSignResponse struct { Signature []byte `json:"sig"` } -//swagger:response SignProgramResponse -type SignProgramResponse struct { +// swagger:response signProgramResponse +type signProgramResponse struct { // in:body - Body *SignProgramResponse + Body *APIV1POSTProgramSignResponse } // APIV1POSTMultisigListResponse is the response to `POST /v1/multisig/list` @@ -264,10 +264,10 @@ type APIV1POSTMultisigListResponse struct { Addresses []string `json:"addresses"` } -//swagger:response ListMultisigResponse -type ListMultisigResponse struct { +// swagger:response listMultisigResponse +type listMultisigResponse struct { // in:body - Body *ListMultisigResponse + Body *APIV1POSTMultisigListResponse } // APIV1POSTMultisigImportResponse is the response to `POST /v1/multisig/import` @@ -277,10 +277,10 @@ type APIV1POSTMultisigImportResponse struct { Address string `json:"address"` } -//swagger:response ImportMultisigResponse -type ImportMultisigResponse struct { +// swagger:response importMultisigResponse +type importMultisigResponse struct { // in:body - Body *ImportMultisigResponse + Body *APIV1POSTMultisigImportResponse } // APIV1POSTMultisigExportResponse is the response to `POST /v1/multisig/export` @@ -292,8 +292,8 @@ type APIV1POSTMultisigExportResponse struct { PKs []APIV1PublicKey `json:"pks"` } -//swagger:response ExportMultisigResponse -type ExportMultisigResponse struct { +// swagger:response exportMultisigResponse +type exportMultisigResponse struct { // in:body Body *APIV1POSTMultisigExportResponse } @@ -304,8 +304,8 @@ type APIV1DELETEMultisigResponse struct { APIV1ResponseEnvelope } -//swagger:response DeleteMultisigResponse -type DeleteMultisigResponse struct { +// swagger:response deleteMultisigResponse +type deleteMultisigResponse struct { //in:body Body *APIV1DELETEMultisigResponse } @@ -319,8 +319,8 @@ type APIV1POSTMultisigTransactionSignResponse struct { Multisig []byte `json:"multisig"` } -//swagger:response SignMultisigResponse -type SignMultisigResponse struct { +// swagger:response signMultisigResponse +type signMultisigResponse struct { // in:body Body *APIV1POSTMultisigTransactionSignResponse } @@ -334,8 +334,8 @@ type APIV1POSTMultisigProgramSignResponse struct { Multisig []byte `json:"multisig"` } -//swagger:response SignProgramMultisigResponse -type SignProgramMultisigResponse struct { +// swagger:response signProgramMultisigResponse +type signProgramMultisigResponse struct { // in:body Body *APIV1POSTMultisigProgramSignResponse } From f4d15877bc85c83a090a5705fba41c5d2728a8c4 Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Mon, 22 Mar 2021 12:15:05 -0400 Subject: [PATCH 129/215] fix key not found error in swagger validation --- daemon/kmd/lib/kmdapi/responses.go | 44 +++++++++++++++--------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go index 93529df47f..a08fb6f0da 100644 --- a/daemon/kmd/lib/kmdapi/responses.go +++ b/daemon/kmd/lib/kmdapi/responses.go @@ -54,7 +54,7 @@ type VersionsResponse struct { Versions []string `json:"versions"` } -// swagger:response versionsResponse +// swagger:response VersionsResponse type versionsResponse struct { //in:body Body *VersionsResponse @@ -67,7 +67,7 @@ type APIV1GETWalletsResponse struct { Wallets []APIV1Wallet `json:"wallets"` } -// swagger:response listWalletsResponse +// swagger:response ListWalletsResponse type listWalletsResponse struct { //in: body Body *APIV1GETWalletsResponse @@ -80,7 +80,7 @@ type APIV1POSTWalletResponse struct { Wallet APIV1Wallet `json:"wallet"` } -// swagger:response createWalletResponse +// swagger:response CreateWalletResponse type createWalletResponse struct { // in:body Body *APIV1POSTWalletResponse @@ -93,7 +93,7 @@ type APIV1POSTWalletInitResponse struct { WalletHandleToken string `json:"wallet_handle_token"` } -// swagger:response initWalletHandleTokenResponse +// swagger:response InitWalletHandleTokenResponse type initWalletHandleTokenResponse struct { // in:body Body *APIV1POSTWalletInitResponse @@ -105,7 +105,7 @@ type APIV1POSTWalletReleaseResponse struct { APIV1ResponseEnvelope } -// swagger:response releaseWalletHandleTokenResponse +// swagger:response ReleaseWalletHandleTokenResponse type releaseWalletHandleTokenResponse struct { // in:body Body *APIV1POSTWalletReleaseResponse @@ -118,7 +118,7 @@ type APIV1POSTWalletRenewResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } -// swagger:response renewWalletHandleTokenResponse +// swagger:response RenewWalletHandleTokenResponse type renewWalletHandleTokenResponse struct { // in:body Body *APIV1POSTWalletRenewResponse @@ -131,7 +131,7 @@ type APIV1POSTWalletRenameResponse struct { Wallet APIV1Wallet `json:"wallet"` } -// swagger:response renameWalletResponse +// swagger:response RenameWalletResponse type renameWalletResponse struct { // in:body Body *APIV1POSTWalletRenameResponse @@ -144,7 +144,7 @@ type APIV1POSTWalletInfoResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } -// swagger:response walletInfoResponse +// swagger:response WalletInfoResponse type walletInfoResponse struct { // in:body Body *APIV1POSTWalletInfoResponse @@ -157,7 +157,7 @@ type APIV1POSTMasterKeyExportResponse struct { MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` } -// swagger:response exportMasterKeyResponse +// swagger:response ExportMasterKeyResponse type exportMasterKeyResponse struct { // in:body Body *APIV1POSTMasterKeyExportResponse @@ -170,7 +170,7 @@ type APIV1POSTKeyImportResponse struct { Address string `json:"address"` } -// swagger:response importKeyResponse +// swagger:response ImportKeyResponse type importKeyResponse struct { // in:body Body *APIV1POSTKeyImportResponse @@ -183,7 +183,7 @@ type APIV1POSTKeyExportResponse struct { PrivateKey APIV1PrivateKey `json:"private_key"` } -// swagger:response exportKeyResponse +// swagger:response ExportKeyResponse type exportKeyResponse struct { // in:body Body *APIV1POSTKeyExportResponse @@ -196,7 +196,7 @@ type APIV1POSTKeyResponse struct { Address string `json:"address"` } -// swagger:response generateKeyResponse +// swagger:response GenerateKeyResponse type generateKeyResponse struct { // in:body Body *APIV1POSTKeyResponse @@ -208,7 +208,7 @@ type APIV1DELETEKeyResponse struct { APIV1ResponseEnvelope } -// swagger:response deleteKeyResponse +// swagger:response DeleteKeyResponse type deleteKeyResponse struct { // in:body Body *APIV1DELETEKeyResponse @@ -221,7 +221,7 @@ type APIV1POSTKeyListResponse struct { Addresses []string `json:"addresses"` } -// swagger:response listKeysResponse +// swagger:response ListKeysResponse type listKeysResponse struct { //in: body Body *APIV1POSTKeyListResponse @@ -236,7 +236,7 @@ type APIV1POSTTransactionSignResponse struct { SignedTransaction []byte `json:"signed_transaction"` } -// swagger:response signTransactionResponse +// swagger:response SignTransactionResponse type signTransactionResponse struct { // in:body Body *APIV1POSTTransactionSignResponse @@ -251,7 +251,7 @@ type APIV1POSTProgramSignResponse struct { Signature []byte `json:"sig"` } -// swagger:response signProgramResponse +// swagger:response SignProgramResponse type signProgramResponse struct { // in:body Body *APIV1POSTProgramSignResponse @@ -264,7 +264,7 @@ type APIV1POSTMultisigListResponse struct { Addresses []string `json:"addresses"` } -// swagger:response listMultisigResponse +// swagger:response ListMultisigResponse type listMultisigResponse struct { // in:body Body *APIV1POSTMultisigListResponse @@ -277,7 +277,7 @@ type APIV1POSTMultisigImportResponse struct { Address string `json:"address"` } -// swagger:response importMultisigResponse +// swagger:response ImportMultisigResponse type importMultisigResponse struct { // in:body Body *APIV1POSTMultisigImportResponse @@ -292,7 +292,7 @@ type APIV1POSTMultisigExportResponse struct { PKs []APIV1PublicKey `json:"pks"` } -// swagger:response exportMultisigResponse +// swagger:response ExportMultisigResponse type exportMultisigResponse struct { // in:body Body *APIV1POSTMultisigExportResponse @@ -304,7 +304,7 @@ type APIV1DELETEMultisigResponse struct { APIV1ResponseEnvelope } -// swagger:response deleteMultisigResponse +// swagger:response DeleteMultisigResponse type deleteMultisigResponse struct { //in:body Body *APIV1DELETEMultisigResponse @@ -319,7 +319,7 @@ type APIV1POSTMultisigTransactionSignResponse struct { Multisig []byte `json:"multisig"` } -// swagger:response signMultisigResponse +// swagger:response SignMultisigResponse type signMultisigResponse struct { // in:body Body *APIV1POSTMultisigTransactionSignResponse @@ -334,7 +334,7 @@ type APIV1POSTMultisigProgramSignResponse struct { Multisig []byte `json:"multisig"` } -// swagger:response signProgramMultisigResponse +// swagger:response SignProgramMultisigResponse type signProgramMultisigResponse struct { // in:body Body *APIV1POSTMultisigProgramSignResponse From f70cbcf5a6ba185525b080e46f825226b02d9569 Mon Sep 17 00:00:00 2001 From: algoidan <79864820+algoidan@users.noreply.github.com> Date: Mon, 22 Mar 2021 18:45:56 +0200 Subject: [PATCH 130/215] Update test/scripts/e2e.sh rewriting the comment Co-authored-by: Brian Olson --- test/scripts/e2e.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index 1990439963..e1b176b66f 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -46,7 +46,8 @@ export TEMPDIR=${SRCROOT}/tmp/out/e2e/${TEST_RUN_ID} echo "Test output can be found in ${TEMPDIR}" -# some ARM64 testing machines have memory issues which cause some tests to fail . +# ARM64 has an unoptimized scrypt() which can cause tests to timeout. +# Run kmd with scrypt() configured to run less secure and fast to go through the motions for test. # thus, on those platforms we launch kmd with unsafe_scrypt = true to speed up the tests. RUN_KMD_WITH_UNSAFE_SCRYPT="" PLATFORM_ARCHTYPE=$("${SRCROOT}/scripts/archtype.sh") From d8f861da22519512f57d410debe3c3196b447878 Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Mon, 22 Mar 2021 13:53:53 -0400 Subject: [PATCH 131/215] add descriptions to swagger response --- daemon/kmd/lib/kmdapi/responses.go | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go index a08fb6f0da..8d1052080a 100644 --- a/daemon/kmd/lib/kmdapi/responses.go +++ b/daemon/kmd/lib/kmdapi/responses.go @@ -54,8 +54,10 @@ type VersionsResponse struct { Versions []string `json:"versions"` } +// Response to `GET /versions` // swagger:response VersionsResponse type versionsResponse struct { + //Versions Response //in:body Body *VersionsResponse } @@ -67,6 +69,7 @@ type APIV1GETWalletsResponse struct { Wallets []APIV1Wallet `json:"wallets"` } +// Response to `GET /v1/wallets` // swagger:response ListWalletsResponse type listWalletsResponse struct { //in: body @@ -80,6 +83,7 @@ type APIV1POSTWalletResponse struct { Wallet APIV1Wallet `json:"wallet"` } +// Response to `POST /v1/wallet` // swagger:response CreateWalletResponse type createWalletResponse struct { // in:body @@ -93,6 +97,7 @@ type APIV1POSTWalletInitResponse struct { WalletHandleToken string `json:"wallet_handle_token"` } +// Response to `POST /v1/wallet/init` // swagger:response InitWalletHandleTokenResponse type initWalletHandleTokenResponse struct { // in:body @@ -105,6 +110,7 @@ type APIV1POSTWalletReleaseResponse struct { APIV1ResponseEnvelope } +// Response to `POST /v1/wallet/release` // swagger:response ReleaseWalletHandleTokenResponse type releaseWalletHandleTokenResponse struct { // in:body @@ -118,6 +124,7 @@ type APIV1POSTWalletRenewResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } +// Response `POST /v1/wallet/renew` // swagger:response RenewWalletHandleTokenResponse type renewWalletHandleTokenResponse struct { // in:body @@ -131,6 +138,7 @@ type APIV1POSTWalletRenameResponse struct { Wallet APIV1Wallet `json:"wallet"` } +// Response to `POST /v1/wallet/rename` // swagger:response RenameWalletResponse type renameWalletResponse struct { // in:body @@ -144,6 +152,7 @@ type APIV1POSTWalletInfoResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } +// Response to `POST /v1/wallet/rename` // swagger:response WalletInfoResponse type walletInfoResponse struct { // in:body @@ -157,6 +166,7 @@ type APIV1POSTMasterKeyExportResponse struct { MasterDerivationKey APIV1MasterDerivationKey `json:"master_derivation_key"` } +// Reponse to `POST /v1/master-key/export` // swagger:response ExportMasterKeyResponse type exportMasterKeyResponse struct { // in:body @@ -170,6 +180,7 @@ type APIV1POSTKeyImportResponse struct { Address string `json:"address"` } +// Repsonse to `POST /v1/key/import` // swagger:response ImportKeyResponse type importKeyResponse struct { // in:body @@ -183,6 +194,7 @@ type APIV1POSTKeyExportResponse struct { PrivateKey APIV1PrivateKey `json:"private_key"` } +// Reponse to `POST /v1/key/export` // swagger:response ExportKeyResponse type exportKeyResponse struct { // in:body @@ -196,6 +208,7 @@ type APIV1POSTKeyResponse struct { Address string `json:"address"` } +// Response to `POST /v1/key` // swagger:response GenerateKeyResponse type generateKeyResponse struct { // in:body @@ -208,6 +221,7 @@ type APIV1DELETEKeyResponse struct { APIV1ResponseEnvelope } +// Response to `DELETE /v1/key` // swagger:response DeleteKeyResponse type deleteKeyResponse struct { // in:body @@ -221,6 +235,7 @@ type APIV1POSTKeyListResponse struct { Addresses []string `json:"addresses"` } +// Response to `POST /v1/key/list` // swagger:response ListKeysResponse type listKeysResponse struct { //in: body @@ -236,6 +251,7 @@ type APIV1POSTTransactionSignResponse struct { SignedTransaction []byte `json:"signed_transaction"` } +// Response to `POST /v1/transaction/sign` // swagger:response SignTransactionResponse type signTransactionResponse struct { // in:body @@ -251,6 +267,7 @@ type APIV1POSTProgramSignResponse struct { Signature []byte `json:"sig"` } +// Response to `POST /v1/data/sign` // swagger:response SignProgramResponse type signProgramResponse struct { // in:body @@ -264,6 +281,7 @@ type APIV1POSTMultisigListResponse struct { Addresses []string `json:"addresses"` } +// Response to `POST /v1/multisig/list` // swagger:response ListMultisigResponse type listMultisigResponse struct { // in:body @@ -277,6 +295,7 @@ type APIV1POSTMultisigImportResponse struct { Address string `json:"address"` } +// Response to `POST /v1/multisig/import` // swagger:response ImportMultisigResponse type importMultisigResponse struct { // in:body @@ -292,6 +311,7 @@ type APIV1POSTMultisigExportResponse struct { PKs []APIV1PublicKey `json:"pks"` } +// Response to `POST /v1/multisig/export` // swagger:response ExportMultisigResponse type exportMultisigResponse struct { // in:body @@ -304,9 +324,10 @@ type APIV1DELETEMultisigResponse struct { APIV1ResponseEnvelope } +// Response to POST /v1/multisig/delete // swagger:response DeleteMultisigResponse type deleteMultisigResponse struct { - //in:body + // in:body Body *APIV1DELETEMultisigResponse } @@ -319,6 +340,7 @@ type APIV1POSTMultisigTransactionSignResponse struct { Multisig []byte `json:"multisig"` } +// Response to `POST /v1/multisig/sign` // swagger:response SignMultisigResponse type signMultisigResponse struct { // in:body @@ -334,6 +356,7 @@ type APIV1POSTMultisigProgramSignResponse struct { Multisig []byte `json:"multisig"` } +// Response to `POST /v1/multisig/signdata` // swagger:response SignProgramMultisigResponse type signProgramMultisigResponse struct { // in:body From 1b60bfbefbc108b4a480bcc0eff99f0aba9a52ce Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 22 Mar 2021 18:21:17 -0400 Subject: [PATCH 132/215] update per peer review. --- ledger/eval.go | 26 ++++++++++---------------- 1 file changed, 10 insertions(+), 16 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index 62dccce87e..8432138e9d 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -804,24 +804,13 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * } } - // in case of a CompactCertTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of - // whether we're in validate/generate mode or not, however - given that this variable in only being used in these modes, it would be safe. - // The reason for making this into an exception is that during initialization time, the accounts update is "converting" the recent 320 blocks into deltas to - // be stored in memory. These deltas don't care about the compact certificate, and so we can improve the node load time. Additionally, it save us from - // performing the validation during catchup, which is another performance boost. - if (eval.validate || eval.generate) && txn.Txn.Type == protocol.CompactCertTx { - if err := cow.compactCert(txn.Txn.CertRound, txn.Txn.CertType, txn.Txn.Cert, txn.Txn.Header.FirstValid, eval.validate); err != nil { - return err - } - } - spec := transactions.SpecialAddresses{ FeeSink: eval.block.BlockHeader.FeeSink, RewardsPool: eval.block.BlockHeader.RewardsPool, } // Apply the transaction, updating the cow balances - applyData, err := applyTransaction(txn.Txn, cow, evalParams, spec, cow.txnCounter()) + applyData, err := eval.applyTransaction(txn.Txn, cow, evalParams, spec, cow.txnCounter()) if err != nil { return fmt.Errorf("transaction %v: %v", txid, err) } @@ -892,7 +881,7 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * } // applyTransaction changes the balances according to this transaction. -func applyTransaction(tx transactions.Transaction, balances *roundCowState, evalParams *logic.EvalParams, spec transactions.SpecialAddresses, ctr uint64) (ad transactions.ApplyData, err error) { +func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, balances *roundCowState, evalParams *logic.EvalParams, spec transactions.SpecialAddresses, ctr uint64) (ad transactions.ApplyData, err error) { params := balances.ConsensusParams() // move fee to pool @@ -942,9 +931,14 @@ func applyTransaction(tx transactions.Transaction, balances *roundCowState, eval err = apply.ApplicationCall(tx.ApplicationCallTxnFields, tx.Header, balances, &ad, evalParams, ctr) case protocol.CompactCertTx: - // don't do anything in the case of compact certificate transaction. This transaction type is explicitly handled in transaction(), since - // we want to conduct the testing of it only in the case of a validation or generation. - // Note that this means that the cow's CompactCertNext field would not be updated unless we're in either generate or validate mode. + // in case of a CompactCertTx transaction, we want to "apply" it only in validate or generate mode. This will deviate the cow's CompactCertNext depending of + // whether we're in validate/generate mode or not, however - given that this variable in only being used in these modes, it would be safe. + // The reason for making this into an exception is that during initialization time, the accounts update is "converting" the recent 320 blocks into deltas to + // be stored in memory. These deltas don't care about the compact certificate, and so we can improve the node load time. Additionally, it save us from + // performing the validation during catchup, which is another performance boost. + if eval.validate || eval.generate { + err = balances.compactCert(tx.CertRound, tx.CertType, tx.Cert, tx.Header.FirstValid, eval.validate) + } default: err = fmt.Errorf("Unknown transaction type %v", tx.Type) From 3d4aadb5c999ce29df6729f4235db4e2e86ad996 Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Tue, 23 Mar 2021 13:27:43 -0400 Subject: [PATCH 133/215] typo fix --- daemon/kmd/lib/kmdapi/responses.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/kmd/lib/kmdapi/responses.go b/daemon/kmd/lib/kmdapi/responses.go index 8d1052080a..dee75b8a29 100644 --- a/daemon/kmd/lib/kmdapi/responses.go +++ b/daemon/kmd/lib/kmdapi/responses.go @@ -152,7 +152,7 @@ type APIV1POSTWalletInfoResponse struct { WalletHandle APIV1WalletHandle `json:"wallet_handle"` } -// Response to `POST /v1/wallet/rename` +// Response to `POST /v1/wallet/info` // swagger:response WalletInfoResponse type walletInfoResponse struct { // in:body From b388a79984d1db86821a45bef35db992d8e9a8ff Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Tue, 23 Mar 2021 13:54:35 -0400 Subject: [PATCH 134/215] swagger ignore RawBlockResponse --- daemon/algod/api/server/v1/handlers/responses.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/daemon/algod/api/server/v1/handlers/responses.go b/daemon/algod/api/server/v1/handlers/responses.go index 74f2e746ac..33a8b75f4f 100644 --- a/daemon/algod/api/server/v1/handlers/responses.go +++ b/daemon/algod/api/server/v1/handlers/responses.go @@ -177,7 +177,7 @@ func (r TransactionParamsResponse) getBody() interface{} { // RawBlockResponse contains encoded, raw block information // -// swagger:response RawBlockResponse +// swagger:ignore type RawBlockResponse struct { // in: body Body *v1.RawBlock From 000e9ce0344078f92abb34434f33606fc56b23b7 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy Date: Tue, 23 Mar 2021 12:55:38 -0400 Subject: [PATCH 135/215] Improve memory allocations in cloneAssetHoldings before mem 1.83GB, allocs 1773556 after mem 1.22GB, allocs 962952 --- ledger/apply/asset.go | 4 ++-- ledger/apply/asset_test.go | 17 +++++++++++++++++ network/limited_reader_slurper.go | 2 +- 3 files changed, 20 insertions(+), 3 deletions(-) diff --git a/ledger/apply/asset.go b/ledger/apply/asset.go index 7f745bda03..26d50acf2b 100644 --- a/ledger/apply/asset.go +++ b/ledger/apply/asset.go @@ -24,7 +24,7 @@ import ( ) func cloneAssetHoldings(m map[basics.AssetIndex]basics.AssetHolding) map[basics.AssetIndex]basics.AssetHolding { - res := make(map[basics.AssetIndex]basics.AssetHolding) + res := make(map[basics.AssetIndex]basics.AssetHolding, len(m)) for id, val := range m { res[id] = val } @@ -32,7 +32,7 @@ func cloneAssetHoldings(m map[basics.AssetIndex]basics.AssetHolding) map[basics. } func cloneAssetParams(m map[basics.AssetIndex]basics.AssetParams) map[basics.AssetIndex]basics.AssetParams { - res := make(map[basics.AssetIndex]basics.AssetParams) + res := make(map[basics.AssetIndex]basics.AssetParams, len(m)) for id, val := range m { res[id] = val } diff --git a/ledger/apply/asset_test.go b/ledger/apply/asset_test.go index 44b6ab665b..4b1b58d1e8 100644 --- a/ledger/apply/asset_test.go +++ b/ledger/apply/asset_test.go @@ -17,6 +17,7 @@ package apply import ( + "math/rand" "testing" "github.com/stretchr/testify/require" @@ -97,3 +98,19 @@ func TestAssetTransfer(t *testing.T) { require.Equal(t, dstAmount-toSend, addrs[cls].Assets[1].Amount) } } + +var benchTotal int = 0 + +func BenchmarkAssetCloning(b *testing.B) { + const numAssets = 800 + assets := make(map[basics.AssetIndex]basics.AssetHolding, numAssets) + for j := 0; j < numAssets; j++ { + aidx := basics.AssetIndex(rand.Int63n(100000000)) + assets[aidx] = basics.AssetHolding{Amount: uint64(aidx)} + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + clone := cloneAssetHoldings(assets) + benchTotal += len(clone) // make sure the compiler does not optimize out cloneAssetHoldings call + } +} diff --git a/network/limited_reader_slurper.go b/network/limited_reader_slurper.go index 0b7f519d24..7caff67308 100644 --- a/network/limited_reader_slurper.go +++ b/network/limited_reader_slurper.go @@ -77,7 +77,7 @@ func (s *LimitedReaderSlurper) Read(reader io.Reader) error { // if we received err == nil and n == 0, we should retry calling the Read function. continue default: - // if we recieved a non-io.EOF error, return it. + // if we received a non-io.EOF error, return it. return err } } From de7647f3931a21418402acbe1adb5ec56cca9930 Mon Sep 17 00:00:00 2001 From: John Lee Date: Tue, 23 Mar 2021 19:42:10 -0400 Subject: [PATCH 136/215] Add comments to releases page script. (#2003) --- .../releases_page/generate_releases_page.py | 120 ++++++++++++++++-- 1 file changed, 109 insertions(+), 11 deletions(-) diff --git a/scripts/release/mule/deploy/releases_page/generate_releases_page.py b/scripts/release/mule/deploy/releases_page/generate_releases_page.py index b5f76bbfa5..af5fbd865a 100755 --- a/scripts/release/mule/deploy/releases_page/generate_releases_page.py +++ b/scripts/release/mule/deploy/releases_page/generate_releases_page.py @@ -1,6 +1,16 @@ #!/usr/bin/env python3 -# This script builds https://releases.algorand.com/index.html. +# This script builds https://releases.algorand.com/index.html +# +# For each channel (stable, beta, indexer), we download the file information +# from the staging_bucket. Information from this bucket is used to create an +# html block for each channel which includes all versions found. +# +# The releases_bucket is also read, and if the file exists there, then the +# releases_bucket URL is used instead of the staging_bucket URL. +# +# All the HTML for the channels is combined to form one large release page, +# which can then be published on our releases page. import sys import boto3 @@ -11,10 +21,15 @@ releases_bucket = "algorand-releases" releases_prefix = "https://releases.algorand.com/" html_tpl = "html.tpl" +# Nit: should be styles_file styles_url = "releases_page.css" +# May want to call these channels instead tokens = ["stable", "beta", "indexer"] + def get_stage_release_set(response): + # Loop through contents of STAGING_BUCKET/releases/CHANNEL/ and return + # all[prefix] = [file_obj1, file_obj2...] prefix = None all = {} they = [] @@ -31,10 +46,15 @@ def get_stage_release_set(response): else: all[prefix] = they prefix = None + # Why do the following instead of emptying 'they' altogether? they = [x] return all + def release_set_files(rset): + # Take list of file_objs, and return a files dict, keyed by filename + # value is a dict with keys "file" (full path), "Size", and if + # present, ".asc" or ".sig" files = {} for x in rset: path = x["Key"] @@ -43,17 +63,23 @@ def release_set_files(rset): continue didsuf = False for suffix in (".asc", ".sig"): + # Check if signature file, e.g. node_beta_linux-amd64_2.5.2.tar.gz.sig if fname.endswith(suffix): + # Get base filename, e.g. without '.sig' froot = fname[:-len(suffix)] + fd = files.get(froot) if fd is None: fd = {} files[froot] = fd + # key file dict by suffix, attach whole file object fd[suffix] = x didsuf = True - break + break # end suffixes loop if didsuf: - continue + continue # go to next file in rset + + # At this point we are not a sig file, so just attach raw information fd = files.get(fname) if fd is None: fd = {} @@ -62,23 +88,34 @@ def release_set_files(rset): fd["Size"] = x["Size"] return files + def get_hashes_data(s3, rset): + # Read all hashes files for a version and return text string text = "" for x in rset: + # x here are objects under a specific prefix path = x["Key"] pre, fname = path.rsplit("/", 1) if fname.endswith(".asc"): continue if fname.endswith(".sig"): continue + + # We skip signature files and only process hashes files + # e.g. hashes_beta_linux_amd64_2.5.2 + # We read and append all of this data in the 'text' string and return + # it if fname.startswith("hashes"): ob = s3.get_object(Bucket=staging_bucket, Key=path) text += ob["Body"].read().decode() return text + def read_hashes(fin): + # Read the output of get_hashes_data by_fname = {} for line in fin: + # Ignore blanks and comments if not line: continue line = line.strip() @@ -86,11 +123,21 @@ def read_hashes(fin): continue if line[0] == "#": continue + + # E.g.: + # 7e19496802ca7f3bec68ba580ccb7042 + # algorand-beta-2.5.2-1.x86_64.rpm hashstr, fname = line.split() ob = by_fname.get(fname) + + # If the filename is not in by_fname, create an empty dict and assign + # it if not ob: ob = {} by_fname[fname] = ob + + # if 32 chars, it's md5; 64 is sha256, 128 is sha512. Asign to dict + # under those keys if len(hashstr) == 32: ob["md5"] = hashstr elif len(hashstr) == 64: @@ -99,6 +146,7 @@ def read_hashes(fin): ob["sha512"] = hashstr return by_fname + def objects_by_fname(they): out = {} for x in they: @@ -110,53 +158,103 @@ def objects_by_fname(they): out[fname] = x return out + def getContent(url): with open(url, "r") as reader: content = reader.read() return content + def build_page(channels): + # read html_tpl and styles_url, make substitutions html = getContent(html_tpl).replace("{styles}", getContent(styles_url)) + # Replace each token (channel) from channels for n in tokens: html = html.replace("".join(["{", n, "}"]), "".join(channels[n])) sys.stdout.write(html) + def get_furl(release_files, fname, skey): + # Pass s3://algorand-releases/ file objects; also the filename and path + # from s3://algorand-dev-deb-repo. + # + # If the filename is in the algorand-releases bucket, use the url from the + # releases bucket. Otherwise, use the URL from the + # s3://algorand-dev-deb-repo bucket. + # + # algorand-releases and algorand-dev-deb-repo match: + # https://releases.algorand.com/[rpath] + # Else: + # http://algorand-dev-deb-repo.s3-website-us-east-1.amazonaws.com/[spath] rfpath = release_files.get(fname) if rfpath is not None: return releases_prefix + rfpath["Key"] else: return staging_prefix + skey + def main(): s3 = boto3.client("s3") channels = {} + # Should use tokens array instead for channel in ["stable", "beta", "indexer"]: - staging_response = s3.list_objects_v2(Bucket=staging_bucket, Prefix="releases/" + channel + "/", MaxKeys=100) + # Fetch contents of e.g. s3://algorand-dev-deb-repo/releases/beta/ + # Note: MaxKeys will limit to last 100 releases, which is more than + # enough. Consider dropping this to 2. + staging_response = s3.list_objects_v2( + Bucket=staging_bucket, + Prefix="releases/" + channel + "/", MaxKeys=100) + + # Populate release_sets, e.g.: + # 'releases/beta/f9fa9a084_2.5.2' => [file_obj1, file_obj2, ...] release_sets = get_stage_release_set(staging_response) + + # List everything from the releases bucket s3://algorand-releases/ releases_response = s3.list_objects_v2(Bucket=releases_bucket) + + # Return dict keyed by filename of file_objs from + # s3://algorand-releases/ release_files = objects_by_fname(releases_response["Contents"]) table = [] + # Loop through all the releases in e.g. + # s3://algorand-dev-deb-repo/releases/beta/ for key, rset in release_sets.items(): + # key: releases/beta/f9fa9a084_2.5.2 + # rset: [file_obj1, file_obj2, ...] + + # Scan rset objs and return all the hashes data as a string hashftext = get_hashes_data(s3, rset) + + # Create a dict of fhashes[filename] = hash_obj + # hash_obj[CHECKSUM] = HASH_STRING + # E.g. hash_obj['md5'] = '7e19496802ca7f3bec68ba580ccb7042' fhashes = read_hashes(hashftext.splitlines()) + + # Build a dict keyed by filename with value of a dict, keyed by + # "file" (full path) and "Size" files = release_set_files(rset) for fname, info in files.items(): if "file" not in info: continue + + # Use algorand-releases URL if avail; otherwise + # algorand-dev-deb-repo URL furl = get_furl(release_files, fname, info['file']) + ftext = ''.format(furl, fname) + # sig file obj from algorand-dev-deb-repo sig = info.get(".sig") stext = "" if sig is not None: - sfname = sig["Key"].rsplit("/", 1)[-1] + sfname = sig["Key"].rsplit("/", 1)[-1] # filename + # Use algorand-releases URL if available surl = get_furl(release_files, sfname, sig["Key"]) stext = '.sig'.format(surl) size = info.get("Size", "") @@ -172,12 +270,12 @@ def main(): table.append("".join(tbody)) # Only add the spacer *after* every set. - # It's not readily apparent to me why `indexer` would have a dict with a single - # item. This needs additional investigation. + # It's not readily apparent to me why `indexer` would have a dict + # with a single item. This needs additional investigation. # - # For instance, when creating the "indexer" table, the first line was empty b/c - # it added a spacer. This was b/c there were two dicts and the first only - # contained one item, which was useless. + # For instance, when creating the "indexer" table, the first line + # was empty b/c it added a spacer. This was b/c there were two + # dicts and the first only contained one item, which was useless. # # For now, just ignore those dicts. if len(files.items()) > 1: @@ -187,6 +285,6 @@ def main(): build_page(channels) + if __name__ == "__main__": main() - From b6d47148a454c25d0a82b15477556e8cc5845618 Mon Sep 17 00:00:00 2001 From: John Lee Date: Tue, 23 Mar 2021 19:42:57 -0400 Subject: [PATCH 137/215] Add pushing docker tag to betanet (#2004) --- scripts/release/mule/deploy/docker/docker.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/release/mule/deploy/docker/docker.sh b/scripts/release/mule/deploy/docker/docker.sh index a81a26306c..f0b0ffc503 100755 --- a/scripts/release/mule/deploy/docker/docker.sh +++ b/scripts/release/mule/deploy/docker/docker.sh @@ -38,7 +38,8 @@ then ./build_releases.sh --tagname "$VERSION" elif [ "$NETWORK" = betanet ] then - ./build_releases.sh --network betanet + ./build_releases.sh --network betanet + ./build_releases.sh --network betanet --tagname "$VERSION" fi popd From 07b8b8f722a0b7936453d0a043cb22d4c98b8aae Mon Sep 17 00:00:00 2001 From: "shiqi.zheng@algorand.com" Date: Wed, 24 Mar 2021 09:17:19 -0400 Subject: [PATCH 138/215] update swagger annotation for APIV1RequestEnvelope --- daemon/kmd/lib/kmdapi/requests.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/daemon/kmd/lib/kmdapi/requests.go b/daemon/kmd/lib/kmdapi/requests.go index 0411dddbff..7e18c1219a 100644 --- a/daemon/kmd/lib/kmdapi/requests.go +++ b/daemon/kmd/lib/kmdapi/requests.go @@ -27,7 +27,7 @@ type APIV1Request interface{} // we need to tell swagger to ignore due to bug (g // APIV1RequestEnvelope is a common envelope that all API V1 requests must embed // -// swagger:ignore +// swagger:model VersionsRequest type APIV1RequestEnvelope struct { // we need to tell swagger to ignore due to bug (go-swagger/issues/1436) _struct struct{} `codec:",omitempty,omitemptyarray"` } @@ -165,7 +165,7 @@ type APIV1POSTTransactionSignRequest struct { // Note: SDK and goal usually generate `SignedTxn` objects // in that case, the field `txn` / `Transaction` of the // generated `SignedTxn` object needs to be used - // + // // swagger:strfmt byte Transaction []byte `json:"transaction"` PublicKey crypto.PublicKey `json:"public_key"` From dc3d83604fd3f09c688ecac00b006c02c17de99c Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 25 Mar 2021 15:02:19 -0400 Subject: [PATCH 139/215] Upgrade the websocket library to the latest one. --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index b76929bcee..ca6088c6cb 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/algorand/graphtrace v0.0.0-20201117160756-e524ed1a6f64 github.com/algorand/msgp v1.1.47 github.com/algorand/oapi-codegen v1.3.5-algorand5 - github.com/algorand/websocket v1.4.1 + github.com/algorand/websocket v1.4.2 github.com/aws/aws-sdk-go v1.16.5 github.com/cpuguy83/go-md2man v1.0.8 // indirect github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018 diff --git a/go.sum b/go.sum index 37e5fb82da..0ea720a6ce 100644 --- a/go.sum +++ b/go.sum @@ -35,8 +35,8 @@ github.com/algorand/msgp v1.1.47 h1:xeU6G/Mb1iudJe4L5X38YrOY+VHhvHQDZXxyXYHTzOw= github.com/algorand/msgp v1.1.47/go.mod h1:LtOntbYiCHj/Sl/Sqxtf8CZOrDt2a8Dv3tLaS6mcnUE= github.com/algorand/oapi-codegen v1.3.5-algorand5 h1:y576Ca2/guQddQrQA7dtL5KcOx5xQgPeIupiuFMGyCI= github.com/algorand/oapi-codegen v1.3.5-algorand5/go.mod h1:/k0Ywn0lnt92uBMyE+yiRf/Wo3/chxHHsAfenD09EbY= -github.com/algorand/websocket v1.4.1 h1:FPoNHI8i2VZWZzhCscY8JTzsAE7Vv73753cMbzb3udk= -github.com/algorand/websocket v1.4.1/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8= +github.com/algorand/websocket v1.4.2 h1:zMB7ukz+c7tcef8rVqmKQTv6KQtxXtCFuiAqKaE7n9I= +github.com/algorand/websocket v1.4.2/go.mod h1:0nFSn+xppw/GZS9hgWPS3b8/4FcA3Pj7XQxm+wqHGx8= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= From 6bc8aa515a6647fd6806ce12e2a89c7316be308b Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 25 Mar 2021 17:54:08 -0400 Subject: [PATCH 140/215] Adjust the outgoing buffer sizes according to realistic agreement service needs. --- network/wsNetwork.go | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/network/wsNetwork.go b/network/wsNetwork.go index ce07d24457..c939997dc1 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -598,6 +598,17 @@ func (wn *WebsocketNetwork) GetPeers(options ...PeerOption) []Peer { return outPeers } +// find the max value across the given uint64 numbers. +func max(numbers ...uint64) (maxNum uint64) { + maxNum = 0 // this is the lowest uint64 value. + for _, num := range numbers { + if num > maxNum { + maxNum = num + } + } + return +} + func (wn *WebsocketNetwork) setup() { var preferredResolver dnssec.ResolverIf if wn.config.DNSSecurityRelayAddrEnforced() { @@ -626,14 +637,18 @@ func (wn *WebsocketNetwork) setup() { wn.server.MaxHeaderBytes = httpServerMaxHeaderBytes wn.ctx, wn.ctxCancel = context.WithCancel(context.Background()) wn.relayMessages = wn.config.NetAddress != "" || wn.config.ForceRelayMessages - // roughly estimate the number of messages that could be sent over the lifespan of a single round. - wn.outgoingMessagesBufferSize = int(config.Consensus[protocol.ConsensusCurrentVersion].NumProposers*2 + - config.Consensus[protocol.ConsensusCurrentVersion].SoftCommitteeSize + - config.Consensus[protocol.ConsensusCurrentVersion].CertCommitteeSize + - config.Consensus[protocol.ConsensusCurrentVersion].NextCommitteeSize + - config.Consensus[protocol.ConsensusCurrentVersion].LateCommitteeSize + - config.Consensus[protocol.ConsensusCurrentVersion].RedoCommitteeSize + - config.Consensus[protocol.ConsensusCurrentVersion].DownCommitteeSize) + // roughly estimate the number of messages that could be seen at any given moment. + // For the late/redo/down committee, which happen in parallel, we need to allocate + // extra space there. + wn.outgoingMessagesBufferSize = int( + max(config.Consensus[protocol.ConsensusCurrentVersion].NumProposers, + config.Consensus[protocol.ConsensusCurrentVersion].SoftCommitteeSize, + config.Consensus[protocol.ConsensusCurrentVersion].CertCommitteeSize, + config.Consensus[protocol.ConsensusCurrentVersion].NextCommitteeSize) + + max(config.Consensus[protocol.ConsensusCurrentVersion].LateCommitteeSize, + config.Consensus[protocol.ConsensusCurrentVersion].RedoCommitteeSize, + config.Consensus[protocol.ConsensusCurrentVersion].DownCommitteeSize), + ) wn.broadcastQueueHighPrio = make(chan broadcastRequest, wn.outgoingMessagesBufferSize) wn.broadcastQueueBulk = make(chan broadcastRequest, 100) From a4d33818928fa5bf80ba9c790f8b6e3fb37e33fd Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy Date: Thu, 25 Mar 2021 16:20:42 -0400 Subject: [PATCH 141/215] Presice calculation of paysetHint for block eval --- data/pools/transactionPool.go | 18 +++++++++++------- ledger/appcow.go | 2 +- ledger/appcow_test.go | 2 +- ledger/cow.go | 4 ++-- ledger/cow_test.go | 4 ++-- ledger/eval.go | 4 ++-- ledger/ledgercore/statedelta.go | 11 +++++++---- 7 files changed, 26 insertions(+), 19 deletions(-) diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index 489a29e6de..58cf837c7e 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -109,7 +109,7 @@ func MakeTransactionPool(ledger *ledger.Ledger, cfg config.Local, log logging.Lo } pool.cond.L = &pool.mu pool.assemblyCond.L = &pool.assemblyMu - pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round)) + pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round), 0) return &pool } @@ -162,7 +162,7 @@ func (pool *TransactionPool) Reset() { pool.numPendingWholeBlocks = 0 pool.pendingBlockEvaluator = nil pool.statusCache.reset() - pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round)) + pool.recomputeBlockEvaluator(make(map[transactions.Txid]basics.Round), 0) } // NumExpired returns the number of transactions that expired at the @@ -468,10 +468,10 @@ func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledgercor var knownCommitted uint var unknownCommitted uint - commitedTxids := delta.Txids + committedTxids := delta.Txids if pool.logProcessBlockStats { pool.pendingMu.RLock() - for txid := range commitedTxids { + for txid := range committedTxids { if _, ok := pool.pendingTxids[txid]; ok { knownCommitted++ } else { @@ -512,7 +512,7 @@ func (pool *TransactionPool) OnNewBlock(block bookkeeping.Block, delta ledgercor // Recompute the pool by starting from the new latest block. // This has the side-effect of discarding transactions that // have been committed (or that are otherwise no longer valid). - stats = pool.recomputeBlockEvaluator(commitedTxids) + stats = pool.recomputeBlockEvaluator(committedTxids, knownCommitted) } stats.KnownCommittedCount = knownCommitted @@ -625,7 +625,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluator(txgroup []transactions.S // recomputeBlockEvaluator constructs a new BlockEvaluator and feeds all // in-pool transactions to it (removing any transactions that are rejected // by the BlockEvaluator). Expects that the pool.mu mutex would be already taken. -func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transactions.Txid]basics.Round) (stats telemetryspec.ProcessBlockMetrics) { +func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transactions.Txid]basics.Round, knownCommitted uint) (stats telemetryspec.ProcessBlockMetrics) { pool.pendingBlockEvaluator = nil latest := pool.ledger.Latest() @@ -665,7 +665,11 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact next := bookkeeping.MakeBlock(prev) pool.numPendingWholeBlocks = 0 - pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, pendingCount) + hint := pendingCount - int(knownCommitted) + if hint < 0 || int(knownCommitted) < 0 { + hint = 0 + } + pool.pendingBlockEvaluator, err = pool.ledger.StartEvaluator(next.BlockHeader, hint) if err != nil { pool.log.Warnf("TransactionPool.recomputeBlockEvaluator: cannot start evaluator: %v", err) return diff --git a/ledger/appcow.go b/ledger/appcow.go index f3742b8c21..aa0519defd 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -410,7 +410,7 @@ func MakeDebugBalances(l ledgerForCowBase, round basics.Round, proto protocol.Co // Execution happens in a child cow and all modifications are merged into parent if the program passes func (cb *roundCowState) StatefulEval(params logic.EvalParams, aidx basics.AppIndex, program []byte) (pass bool, evalDelta basics.EvalDelta, err error) { // Make a child cow to eval our program in - calf := cb.child() + calf := cb.child(1) params.Ledger, err = newLogicLedger(calf, aidx) if err != nil { return false, basics.EvalDelta{}, err diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go index fce2491516..df2affb017 100644 --- a/ledger/appcow_test.go +++ b/ledger/appcow_test.go @@ -305,7 +305,7 @@ func TestCowStorage(t *testing.T) { // Make a child if childDepth < maxChildDepth && rand.Float32() < 0.1 { lastParent = cow - cow = cow.child() + cow = cow.child(1) childDepth++ } diff --git a/ledger/cow.go b/ledger/cow.go index c351bb0edc..7f5edcfc2d 100644 --- a/ledger/cow.go +++ b/ledger/cow.go @@ -195,12 +195,12 @@ func (cb *roundCowState) setCompactCertNext(rnd basics.Round) { cb.mods.CompactCertNext = rnd } -func (cb *roundCowState) child() *roundCowState { +func (cb *roundCowState) child(hint int) *roundCowState { return &roundCowState{ lookupParent: cb, commitParent: cb, proto: cb.proto, - mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, 1, cb.mods.CompactCertNext), + mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, hint, cb.mods.CompactCertNext), sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta), } } diff --git a/ledger/cow_test.go b/ledger/cow_test.go index d70e92ab87..307f4f3583 100644 --- a/ledger/cow_test.go +++ b/ledger/cow_test.go @@ -105,7 +105,7 @@ func TestCowBalance(t *testing.T) { c0 := makeRoundCowState(&ml, bookkeeping.BlockHeader{}, 0, 0) checkCow(t, c0, accts0) - c1 := c0.child() + c1 := c0.child(0) checkCow(t, c0, accts0) checkCow(t, c1, accts0) @@ -114,7 +114,7 @@ func TestCowBalance(t *testing.T) { checkCow(t, c0, accts0) checkCow(t, c1, accts1) - c2 := c1.child() + c2 := c1.child(0) checkCow(t, c0, accts0) checkCow(t, c1, accts1) checkCow(t, c2, accts1) diff --git a/ledger/eval.go b/ledger/eval.go index 8432138e9d..398777782c 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -579,7 +579,7 @@ func (eval *BlockEvaluator) TestTransactionGroup(txgroup []transactions.SignedTx return fmt.Errorf("group size %d exceeds maximum %d", len(txgroup), eval.proto.MaxTxGroupSize) } - cow := eval.state.child() + cow := eval.state.child(len(txgroup)) var group transactions.TxGroup for gi, txn := range txgroup { @@ -713,7 +713,7 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit var group transactions.TxGroup var groupTxBytes int - cow := eval.state.child() + cow := eval.state.child(len(txgroup)) // Prepare eval params for any ApplicationCall transactions in the group evalParams := eval.prepareEvalParams(txgroup) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 9bff85b7cb..5ed0fdb969 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -90,16 +90,19 @@ type AccountDeltas struct { acctsCache map[basics.Address]int } -// MakeStateDelta creates a new instance of StateDelta +// MakeStateDelta creates a new instance of StateDelta. +// hint is amount of transactions for evaluation, 2 * hint is for sender and receiver balance records. +// This does not play well for AssetConfig and ApplicationCall transactions on scale func MakeStateDelta(hdr *bookkeeping.BlockHeader, prevTimestamp int64, hint int, compactCertNext basics.Round) StateDelta { return StateDelta{ Accts: AccountDeltas{ accts: make([]basics.BalanceRecord, 0, hint*2), acctsCache: make(map[basics.Address]int, hint*2), }, - Txids: make(map[transactions.Txid]basics.Round, hint), - Txleases: make(map[Txlease]basics.Round, hint), - Creatables: make(map[basics.CreatableIndex]ModifiedCreatable, hint), + Txids: make(map[transactions.Txid]basics.Round, hint), + Txleases: make(map[Txlease]basics.Round, hint), + // asset or application creation are considered as rare events so do not pre-allocate space for them + Creatables: make(map[basics.CreatableIndex]ModifiedCreatable), Hdr: hdr, PrevTimestamp: prevTimestamp, initialTransactionsCount: hint, From 4686b15eaa0183998534170f624cd4a3f6142f46 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy Date: Fri, 26 Mar 2021 11:36:35 -0400 Subject: [PATCH 142/215] Adjust OptimizeAllocatedMemory * Correct Creatables reallocation * Fix other reallocation conditions for a case when actual length * greater than capacity --- ledger/ledgercore/statedelta.go | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/ledger/ledgercore/statedelta.go b/ledger/ledgercore/statedelta.go index 5ed0fdb969..f2d2e3feb7 100644 --- a/ledger/ledgercore/statedelta.go +++ b/ledger/ledgercore/statedelta.go @@ -171,14 +171,16 @@ func (ad *AccountDeltas) upsert(br basics.BalanceRecord) { // For each data structure, reallocate if it would save us at least 50MB aggregate func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { // accts takes up 232 bytes per entry, and is saved for 320 rounds - if uint64(2*sd.initialTransactionsCount-len(sd.Accts.accts))*accountArrayEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { + if uint64(cap(sd.Accts.accts)-len(sd.Accts.accts))*accountArrayEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { accts := make([]basics.BalanceRecord, len(sd.Accts.acctsCache)) copy(accts, sd.Accts.accts) sd.Accts.accts = accts } // acctsCache takes up 64 bytes per entry, and is saved for 320 rounds - if uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { + // realloc if original allocation capacity greater than length of data, and space difference is significant + if 2*sd.initialTransactionsCount > len(sd.Accts.acctsCache) && + uint64(2*sd.initialTransactionsCount-len(sd.Accts.acctsCache))*accountMapCacheEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { acctsCache := make(map[basics.Address]int, len(sd.Accts.acctsCache)) for k, v := range sd.Accts.acctsCache { acctsCache[k] = v @@ -187,7 +189,8 @@ func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { } // TxLeases takes up 112 bytes per entry, and is saved for 1000 rounds - if uint64(sd.initialTransactionsCount-len(sd.Txleases))*txleasesEntrySize*proto.MaxTxnLife > stateDeltaTargetOptimizationThreshold { + if sd.initialTransactionsCount > len(sd.Txleases) && + uint64(sd.initialTransactionsCount-len(sd.Txleases))*txleasesEntrySize*proto.MaxTxnLife > stateDeltaTargetOptimizationThreshold { txLeases := make(map[Txlease]basics.Round, len(sd.Txleases)) for k, v := range sd.Txleases { txLeases[k] = v @@ -196,7 +199,7 @@ func (sd *StateDelta) OptimizeAllocatedMemory(proto config.ConsensusParams) { } // Creatables takes up 100 bytes per entry, and is saved for 320 rounds - if uint64(sd.initialTransactionsCount-len(sd.Creatables))*creatablesEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { + if uint64(len(sd.Creatables))*creatablesEntrySize*proto.MaxBalLookback > stateDeltaTargetOptimizationThreshold { creatableDeltas := make(map[basics.CreatableIndex]ModifiedCreatable, len(sd.Creatables)) for k, v := range sd.Creatables { creatableDeltas[k] = v From 492e89811a151bc47970023ab0f9f6b5a5f1d415 Mon Sep 17 00:00:00 2001 From: algonautshant <55754073+algonautshant@users.noreply.github.com> Date: Fri, 26 Mar 2021 15:47:17 -0400 Subject: [PATCH 143/215] BlockService redirects when does not have the round (#2002) Block service will redirect the http block request to another http peer if it does not have the round. --- catchup/catchpointService.go | 32 ++-- catchup/pref_test.go | 10 +- catchup/service.go | 74 ++++++--- catchup/service_test.go | 16 +- catchup/universalFetcher.go | 7 +- catchup/universalFetcher_test.go | 7 +- config/config.go | 11 ++ config/local_defaults.go | 2 + installer/config.json.example | 2 + network/wsNetwork.go | 2 +- node/node.go | 2 +- rpcs/blockService.go | 129 +++++++++++++--- rpcs/blockService_test.go | 208 ++++++++++++++++++++++++++ rpcs/txService_test.go | 5 + test/testdata/configs/config-v16.json | 2 + 15 files changed, 430 insertions(+), 79 deletions(-) diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index 043471a9a5..6969c35f3d 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -101,12 +101,6 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat net: net, ledger: l, config: cfg, - blocksDownloadPeerSelector: makePeerSelector( - net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, - }), } service.lastBlockHeader, err = l.BlockHdr(l.Latest()) if err != nil { @@ -116,7 +110,7 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat if err != nil { return nil, err } - + service.initDownloadPeerSelector() return service, nil } @@ -138,17 +132,12 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo net: net, ledger: l, config: cfg, - blocksDownloadPeerSelector: makePeerSelector( - net, - []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, - }), } service.lastBlockHeader, err = l.BlockHdr(l.Latest()) if err != nil { return nil, err } + service.initDownloadPeerSelector() return service, nil } @@ -713,3 +702,20 @@ func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(aquiredBlocks cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + aquiredBlocksDelta) cs.stats.VerifiedBlocks = uint64(int64(cs.stats.VerifiedBlocks) + verifiedBlocksDelta) } + +func (cs *CatchpointCatchupService) initDownloadPeerSelector() { + if cs.config.EnableCatchupFromArchiveServers { + cs.blocksDownloadPeerSelector = makePeerSelector( + cs.net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + }) + } else { + cs.blocksDownloadPeerSelector = makePeerSelector( + cs.net, + []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookRelays}, + }) + } +} diff --git a/catchup/pref_test.go b/catchup/pref_test.go index 7337bec783..a3222683c3 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -47,7 +47,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { // Create a network and block service net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(config.GetDefaultLocal(), remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.TestingLog(b), config.GetDefaultLocal(), remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) nodeA.start() @@ -60,11 +60,11 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { for i := 0; i < b.N; i++ { inMem := true - local, err := data.LoadLedger(logging.Base(), b.Name()+"empty"+strconv.Itoa(i), inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) + local, err := data.LoadLedger(logging.TestingLog(b), b.Name()+"empty"+strconv.Itoa(i), inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) require.NoError(b, err) // Make Service - syncer := MakeService(logging.Base(), defaultConfig, net, local, new(mockedAuthenticator), nil) + syncer := MakeService(logging.TestingLog(b), defaultConfig, net, local, new(mockedAuthenticator), nil) b.StartTimer() syncer.Start() for w := 0; w < 1000; w++ { @@ -146,10 +146,10 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true - emptyLedger, err = data.LoadLedger(logging.Base(), t.Name()+"empty", inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) + emptyLedger, err = data.LoadLedger(logging.TestingLog(t), t.Name()+"empty", inMem, protocol.ConsensusCurrentVersion, genesisBalances, "", crypto.Digest{}, nil, cfg) require.NoError(t, err) - ledger, err = datatest.FabricateLedger(logging.Base(), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks)) + ledger, err = datatest.FabricateLedger(logging.TestingLog(t), t.Name(), parts, genesisBalances, emptyLedger.LastRound()+basics.Round(numBlocks)) require.NoError(t, err) require.Equal(t, ledger.LastRound(), emptyLedger.LastRound()+basics.Round(numBlocks)) return ledger, emptyLedger, release, genesisBalances diff --git a/catchup/service.go b/catchup/service.go index 2b3a222e78..0674e8440a 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -559,7 +559,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy peer, getPeerErr := peerSelector.GetNextPeer() if getPeerErr != nil { s.log.Debugf("fetchRound: was unable to obtain a peer to retrieve the block from") - s.net.RequestConnectOutgoing(true, s.ctx.Done()) + s.net.RequestConnectOutgoing(true, s.ctx.Done()) continue } @@ -657,34 +657,64 @@ func (s *Service) handleUnsupportedRound(nextUnsupportedRound basics.Round) { func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector { var peerClasses []peerClass - if pipelineFetch { - if s.cfg.NetAddress != "" { // Relay node - peerClasses = []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}, + if s.cfg.EnableCatchupFromArchiveServers { + if pipelineFetch { + if s.cfg.NetAddress != "" { // Relay node + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersConnectedIn}, + } + } else { + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + } } } else { - peerClasses = []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + if s.cfg.NetAddress != "" { // Relay node + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers}, + } + } else { + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}, + } } } } else { - if s.cfg.NetAddress != "" { // Relay node - peerClasses = []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRankInitialFourthPriority, peerClass: network.PeersPhonebookArchivers}, + if pipelineFetch { + if s.cfg.NetAddress != "" { // Relay node + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersConnectedIn}, + } + } else { + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + } } } else { - peerClasses = []peerClass{ - {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, - {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, - {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookArchivers}, + if s.cfg.NetAddress != "" { // Relay node + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn}, + {initialRank: peerRankInitialThirdPriority, peerClass: network.PeersPhonebookRelays}, + } + } else { + peerClasses = []peerClass{ + {initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}, + } } } } diff --git a/catchup/service_test.go b/catchup/service_test.go index 09afbcedf2..8d641070a2 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -140,7 +140,7 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -173,7 +173,7 @@ func TestPeriodicSync(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -226,7 +226,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -285,7 +285,7 @@ func TestAbruptWrites(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -341,7 +341,7 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -394,7 +394,7 @@ func TestServiceFetchBlocksMalformed(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -541,7 +541,7 @@ func helperTestOnSwitchToUnSupportedProtocol( // Create a network and block service net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(config, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), config, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) @@ -729,7 +729,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { // Create a network and block service blockServiceConfig := config.GetDefaultLocal() net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, remote, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, remote, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 844b3a2d00..9d33d1f20f 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -22,8 +22,6 @@ import ( "errors" "fmt" "net/http" - "path" - "strconv" "time" "github.com/algorand/go-deadlock" @@ -80,7 +78,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro } else { return nil, nil, time.Duration(0), fmt.Errorf("fetchBlock: UniversalFetcher only supports HTTPPeer and UnicastPeer") } - downloadDuration = time.Now().Sub(blockDownloadStartTime) + downloadDuration = time.Now().Sub(blockDownloadStartTime) if err != nil { return nil, nil, time.Duration(0), err } @@ -211,7 +209,7 @@ func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data return nil, err } - parsedURL.Path = hf.net.SubstituteGenesisID(path.Join(parsedURL.Path, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(r), 36))) + parsedURL.Path = rpcs.FormatBlockQuery(uint64(r), parsedURL.Path, hf.net) blockURL := parsedURL.String() hf.log.Debugf("block GET %#v peer %#v %T", blockURL, hf.peer, hf.peer) request, err := http.NewRequest("GET", blockURL, nil) @@ -272,4 +270,3 @@ func (hf *HTTPFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data func (hf *HTTPFetcher) address() string { return hf.rootURL } - diff --git a/catchup/universalFetcher_test.go b/catchup/universalFetcher_test.go index f8ca52c792..14180625da 100644 --- a/catchup/universalFetcher_test.go +++ b/catchup/universalFetcher_test.go @@ -34,7 +34,6 @@ import ( func TestUGetBlockWs(t *testing.T) { cfg := config.GetDefaultLocal() - cfg.EnableCatchupFromArchiveServers = true ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { @@ -48,7 +47,7 @@ func TestUGetBlockWs(t *testing.T) { net := &httpTestPeerSource{} up := makeTestUnicastPeer(net, t) - ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID") ls.Start() fetcher := makeUniversalBlockFetcher(logging.TestingLog(t), net, cfg) @@ -76,7 +75,6 @@ func TestUGetBlockWs(t *testing.T) { func TestUGetBlockHttp(t *testing.T) { cfg := config.GetDefaultLocal() - cfg.EnableCatchupFromArchiveServers = true ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{}) if err != nil { @@ -86,9 +84,10 @@ func TestUGetBlockHttp(t *testing.T) { blockServiceConfig := config.GetDefaultLocal() blockServiceConfig.EnableBlockService = true + blockServiceConfig.EnableBlockServiceFallbackToArchiver = false net := &httpTestPeerSource{} - ls := rpcs.MakeBlockService(blockServiceConfig, ledger, net, "test genesisID") + ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID") nodeA := basicRPCNode{} nodeA.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls) diff --git a/config/config.go b/config/config.go index 92aa673deb..f9740aab6a 100644 --- a/config/config.go +++ b/config/config.go @@ -377,6 +377,17 @@ type Local struct { // connections that are originating from the local machine. Setting this to "true", allow to create large // local-machine networks that won't trip the incoming connection limit observed by relays. DisableLocalhostConnectionRateLimit bool `version[16]:"true"` + + // BlockServiceCustomFallbackEndpoints is a comma delimited list of endpoints which the block service uses to + // redirect the http requests to in case it does not have the round. If it is not specified, will check + // EnableBlockServiceFallbackToArchiver. + BlockServiceCustomFallbackEndpoints string `version[16]:""` + + // EnableBlockServiceFallbackToArchiver controls whether the block service redirects the http requests to + // an archiver or return StatusNotFound (404) when in does not have the requested round, and + // BlockServiceCustomFallbackEndpoints is empty. + // The archiver is randomly selected, if none is available, will return StatusNotFound (404). + EnableBlockServiceFallbackToArchiver bool `version[16]:"true"` } // Filenames of config files within the configdir (e.g. ~/.algorand) diff --git a/config/local_defaults.go b/config/local_defaults.go index 4bf1b8f140..f8b3f9ce41 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -25,6 +25,7 @@ var defaultLocal = Local{ AnnounceParticipationKey: true, Archival: false, BaseLoggerDebugLevel: 4, + BlockServiceCustomFallbackEndpoints: "", BroadcastConnectionsLimit: -1, CadaverSizeTarget: 1073741824, CatchpointFileHistoryLength: 365, @@ -47,6 +48,7 @@ var defaultLocal = Local{ EnableAgreementTimeMetrics: false, EnableAssembleStats: false, EnableBlockService: false, + EnableBlockServiceFallbackToArchiver: true, EnableCatchupFromArchiveServers: false, EnableDeveloperAPI: false, EnableGossipBlockService: true, diff --git a/installer/config.json.example b/installer/config.json.example index 39c96b11a6..e7201905da 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -4,6 +4,7 @@ "AnnounceParticipationKey": true, "Archival": false, "BaseLoggerDebugLevel": 4, + "BlockServiceCustomFallbackEndpoints": "", "BroadcastConnectionsLimit": -1, "CadaverSizeTarget": 1073741824, "CatchpointFileHistoryLength": 365, @@ -26,6 +27,7 @@ "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, "EnableBlockService": false, + "EnableBlockServiceFallbackToArchiver": true, "EnableCatchupFromArchiveServers": false, "EnableDeveloperAPI": false, "EnableGossipBlockService": true, diff --git a/network/wsNetwork.go b/network/wsNetwork.go index c939997dc1..38f961e754 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -1785,7 +1785,7 @@ func (wn *WebsocketNetwork) getDNSAddrs(dnsBootstrap string) (relaysAddresses [] } relaysAddresses = nil } - if wn.config.EnableCatchupFromArchiveServers { + if wn.config.EnableCatchupFromArchiveServers || wn.config.EnableBlockServiceFallbackToArchiver { archiverAddresses, err = tools_network.ReadFromSRV("archive", "tcp", dnsBootstrap, wn.config.FallbackDNSResolverAddress, wn.config.DNSSecuritySRVEnforced()) if err != nil { // only log this warning on testnet or devnet diff --git a/node/node.go b/node/node.go index 851afd8e90..57784f487a 100644 --- a/node/node.go +++ b/node/node.go @@ -227,7 +227,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd } } - node.blockService = rpcs.MakeBlockService(cfg, node.ledger, p2pNode, node.genesisID) + node.blockService = rpcs.MakeBlockService(node.log, cfg, node.ledger, p2pNode, node.genesisID) node.ledgerService = rpcs.MakeLedgerService(cfg, node.ledger, p2pNode, node.genesisID) rpcs.RegisterTxService(node.transactionPool, p2pNode, node.genesisID, cfg.TxPoolSize, cfg.TxSyncServeResponseSize) diff --git a/rpcs/blockService.go b/rpcs/blockService.go index fec1fe4021..677446a7fa 100644 --- a/rpcs/blockService.go +++ b/rpcs/blockService.go @@ -20,7 +20,9 @@ import ( "context" "encoding/binary" "net/http" + "path" "strconv" + "strings" "github.com/gorilla/mux" @@ -28,6 +30,7 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -66,6 +69,9 @@ type BlockService struct { net network.GossipNode enableService bool enableServiceOverGossip bool + fallbackEndpoints fallbackEndpoints + enableArchiverFallback bool + log logging.Logger } // EncodedBlockCert defines how GetBlockBytes encodes a block and its certificate @@ -84,8 +90,13 @@ type PreEncodedBlockCert struct { Certificate codec.Raw `codec:"cert"` } +type fallbackEndpoints struct { + endpoints []string + lastUsed int +} + // MakeBlockService creates a BlockService around the provider Ledger and registers it for HTTP callback on the block serving path -func MakeBlockService(config config.Local, ledger *data.Ledger, net network.GossipNode, genesisID string) *BlockService { +func MakeBlockService(log logging.Logger, config config.Local, ledger *data.Ledger, net network.GossipNode, genesisID string) *BlockService { service := &BlockService{ ledger: ledger, genesisID: genesisID, @@ -93,6 +104,9 @@ func MakeBlockService(config config.Local, ledger *data.Ledger, net network.Goss net: net, enableService: config.EnableBlockService, enableServiceOverGossip: config.EnableGossipBlockService, + fallbackEndpoints: makeFallbackEndpoints(log, config.BlockServiceCustomFallbackEndpoints), + enableArchiverFallback: config.EnableBlockServiceFallbackToArchiver, + log: log, } if service.enableService { net.RegisterHTTPHandler(BlockServiceBlockPath, service) @@ -129,19 +143,19 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re genesisID, hasGenesisID := pathVars["genesisID"] if hasVersionStr { if versionStr != "1" { - logging.Base().Debug("http block bad version", versionStr) + bs.log.Debug("http block bad version", versionStr) response.WriteHeader(http.StatusBadRequest) return } } if hasGenesisID { if bs.genesisID != genesisID { - logging.Base().Debugf("http block bad genesisID mine=%#v theirs=%#v", bs.genesisID, genesisID) + bs.log.Debugf("http block bad genesisID mine=%#v theirs=%#v", bs.genesisID, genesisID) response.WriteHeader(http.StatusBadRequest) return } } else { - logging.Base().Debug("http block no genesisID") + bs.log.Debug("http block no genesisID") response.WriteHeader(http.StatusBadRequest) return } @@ -150,13 +164,13 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re request.Body = http.MaxBytesReader(response, request.Body, blockServerMaxBodyLength) err := request.ParseForm() if err != nil { - logging.Base().Debug("http block parse form err", err) + bs.log.Debug("http block parse form err", err) response.WriteHeader(http.StatusBadRequest) return } roundStrs, ok := request.Form["b"] if !ok || len(roundStrs) != 1 { - logging.Base().Debug("http block bad block id form arg") + bs.log.Debug("http block bad block id form arg") response.WriteHeader(http.StatusBadRequest) return } @@ -165,12 +179,12 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re if ok { if len(versionStrs) == 1 { if versionStrs[0] != "1" { - logging.Base().Debug("http block bad version", versionStr) + bs.log.Debug("http block bad version", versionStr) response.WriteHeader(http.StatusBadRequest) return } } else { - logging.Base().Debug("http block wrong number of v args", len(versionStrs)) + bs.log.Debug("http block wrong number of v args", len(versionStrs)) response.WriteHeader(http.StatusBadRequest) return } @@ -180,7 +194,7 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re } round, err := strconv.ParseUint(roundStr, 36, 64) if err != nil { - logging.Base().Debug("http block round parse fail", roundStr, err) + bs.log.Debug("http block round parse fail", roundStr, err) response.WriteHeader(http.StatusBadRequest) return } @@ -189,12 +203,15 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re switch err.(type) { case ledgercore.ErrNoEntry: // entry cound not be found. - response.Header().Set("Cache-Control", blockResponseMissingBlockCacheControl) - response.WriteHeader(http.StatusNotFound) + ok := bs.redirectRequest(round, response, request) + if !ok { + response.Header().Set("Cache-Control", blockResponseMissingBlockCacheControl) + response.WriteHeader(http.StatusNotFound) + } return default: // unexpected error. - logging.Base().Warnf("ServeHTTP : failed to retrieve block %d %v", round, err) + bs.log.Warnf("ServeHTTP : failed to retrieve block %d %v", round, err) response.WriteHeader(http.StatusInternalServerError) return } @@ -206,7 +223,7 @@ func (bs *BlockService) ServeHTTP(response http.ResponseWriter, request *http.Re response.WriteHeader(http.StatusOK) _, err = response.Write(encodedBlockCert) if err != nil { - logging.Base().Warn("http block write failed ", err) + bs.log.Warn("http block write failed ", err) } } @@ -251,14 +268,14 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc topics, err := network.UnmarshallTopics(reqMsg.Data) if err != nil { - logging.Base().Infof("BlockService handleCatchupReq: %s", err.Error()) + bs.log.Infof("BlockService handleCatchupReq: %s", err.Error()) respTopics = network.Topics{ network.MakeTopic(network.ErrorKey, []byte(err.Error()))} return } roundBytes, found := topics.GetValue(RoundKey) if !found { - logging.Base().Infof("BlockService handleCatchupReq: %s", noRoundNumberErrMsg) + bs.log.Infof("BlockService handleCatchupReq: %s", noRoundNumberErrMsg) respTopics = network.Topics{ network.MakeTopic(network.ErrorKey, []byte(noRoundNumberErrMsg))} @@ -266,7 +283,7 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc } requestType, found := topics.GetValue(RequestDataTypeKey) if !found { - logging.Base().Infof("BlockService handleCatchupReq: %s", noDataTypeErrMsg) + bs.log.Infof("BlockService handleCatchupReq: %s", noDataTypeErrMsg) respTopics = network.Topics{ network.MakeTopic(network.ErrorKey, []byte(noDataTypeErrMsg))} @@ -275,23 +292,74 @@ func (bs *BlockService) handleCatchupReq(ctx context.Context, reqMsg network.Inc round, read := binary.Uvarint(roundBytes) if read <= 0 { - logging.Base().Infof("BlockService handleCatchupReq: %s", roundNumberParseErrMsg) + bs.log.Infof("BlockService handleCatchupReq: %s", roundNumberParseErrMsg) respTopics = network.Topics{ network.MakeTopic(network.ErrorKey, []byte(roundNumberParseErrMsg))} return } - respTopics = topicBlockBytes(bs.ledger, basics.Round(round), string(requestType)) + respTopics = topicBlockBytes(bs.log, bs.ledger, basics.Round(round), string(requestType)) return } -func topicBlockBytes(dataLedger *data.Ledger, round basics.Round, requestType string) network.Topics { +// redirectRequest redirects the request to the next round robin fallback endpoing if available, otherwise, +// if EnableBlockServiceFallbackToArchiver is enabled, redirects to a random archiver. +func (bs *BlockService) redirectRequest(round uint64, response http.ResponseWriter, request *http.Request) (ok bool) { + peerAddress := bs.getNextCustomFallbackEndpoint() + if peerAddress == "" && bs.enableArchiverFallback { + peerAddress = bs.getRandomArchiver() + } + if peerAddress == "" { + return false + } + + parsedURL, err := network.ParseHostOrURL(peerAddress) + if err != nil { + bs.log.Debugf("redirectRequest: %s", err.Error()) + return false + } + parsedURL.Path = FormatBlockQuery(round, parsedURL.Path, bs.net) + http.Redirect(response, request, parsedURL.String(), http.StatusTemporaryRedirect) + bs.log.Debugf("redirectRequest: redirected block request to %s", parsedURL.String()) + return true +} + +// getNextCustomFallbackEndpoint returns the next custorm fallback endpoint in RR ordering +func (bs *BlockService) getNextCustomFallbackEndpoint() (endpointAddress string) { + if len(bs.fallbackEndpoints.endpoints) == 0 { + return + } + endpointAddress = bs.fallbackEndpoints.endpoints[bs.fallbackEndpoints.lastUsed] + bs.fallbackEndpoints.lastUsed = (bs.fallbackEndpoints.lastUsed + 1) % len(bs.fallbackEndpoints.endpoints) + return +} + +// getRandomArchiver returns a random archiver address +func (bs *BlockService) getRandomArchiver() (endpointAddress string) { + peers := bs.net.GetPeers(network.PeersPhonebookArchivers) + httpPeers := make([]network.HTTPPeer, 0, len(peers)) + + for _, peer := range peers { + httpPeer, validHTTPPeer := peer.(network.HTTPPeer) + if validHTTPPeer { + httpPeers = append(httpPeers, httpPeer) + } + } + if len(httpPeers) == 0 { + return + } + randIndex := crypto.RandUint64() % uint64(len(httpPeers)) + endpointAddress = httpPeers[randIndex].GetAddress() + return +} + +func topicBlockBytes(log logging.Logger, dataLedger *data.Ledger, round basics.Round, requestType string) network.Topics { blk, cert, err := dataLedger.EncodedBlockCert(round) if err != nil { switch err.(type) { case ledgercore.ErrNoEntry: default: - logging.Base().Infof("BlockService topicBlockBytes: %s", err) + log.Infof("BlockService topicBlockBytes: %s", err) } return network.Topics{ network.MakeTopic(network.ErrorKey, []byte(blockNotAvailabeErrMsg))} @@ -326,3 +394,24 @@ func RawBlockBytes(l *data.Ledger, round basics.Round) ([]byte, error) { Certificate: cert, }), nil } + +// FormatBlockQuery formats a block request query for the given network and round number +func FormatBlockQuery(round uint64, parsedURL string, net network.GossipNode) string { + return net.SubstituteGenesisID(path.Join(parsedURL, "/v1/{genesisID}/block/"+strconv.FormatUint(uint64(round), 36))) +} + +func makeFallbackEndpoints(log logging.Logger, customFallbackEndpoints string) (fe fallbackEndpoints) { + if customFallbackEndpoints == "" { + return + } + endpoints := strings.Split(customFallbackEndpoints, ",") + for _, ep := range endpoints { + parsed, err := network.ParseHostOrURL(ep) + if err != nil { + log.Warnf("makeFallbackEndpoints: error parsing %s %s", ep, err.Error()) + continue + } + fe.endpoints = append(fe.endpoints, parsed.String()) + } + return +} diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go index 042988eff9..343e5571cc 100644 --- a/rpcs/blockService_test.go +++ b/rpcs/blockService_test.go @@ -18,10 +18,19 @@ package rpcs import ( "context" + "fmt" + "net/http" "testing" + "time" "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/agreement" + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" ) @@ -56,6 +65,7 @@ func TestHandleCatchupReqNegative(t *testing.T) { } ls := BlockService{ ledger: nil, + log: logging.TestingLog(t), } // case where topics is nil @@ -99,3 +109,201 @@ func TestHandleCatchupReqNegative(t *testing.T) { require.Equal(t, true, found) require.Equal(t, roundNumberParseErrMsg, string(val)) } + +// TestRedirectBasic tests the case when the block service redirects the request to elsewhere +func TestRedirectFallbackArchiver(t *testing.T) { + log := logging.TestingLog(t) + + ledger1 := makeLedger(t, "l1") + defer ledger1.Close() + ledger2 := makeLedger(t, "l2") + defer ledger2.Close() + addBlock(t, ledger1) + addBlock(t, ledger2) + addBlock(t, ledger2) + + net1 := &httpTestPeerSource{} + net2 := &httpTestPeerSource{} + + config := config.GetDefaultLocal() + bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}") + bs2 := MakeBlockService(log, config, ledger2, net2, "{genesisID}") + + nodeA := &basicRPCNode{} + nodeB := &basicRPCNode{} + + nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) + nodeA.start() + defer nodeA.stop() + + nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) + nodeB.start() + defer nodeB.stop() + + net1.addPeer(nodeB.rootURL()) + + parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + require.NoError(t, err) + + client := http.Client{} + + ctx := context.Background() + parsedURL.Path = FormatBlockQuery(uint64(2), parsedURL.Path, net1) + blockURL := parsedURL.String() + request, err := http.NewRequest("GET", blockURL, nil) + require.NoError(t, err) + requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second) + defer requestCancel() + request = request.WithContext(requestCtx) + network.SetUserAgentHeader(request.Header) + response, err := client.Do(request) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, response.StatusCode) +} + +// TestRedirectBasic tests the case when the block service redirects the request to elsewhere +func TestRedirectFallbackEndpoints(t *testing.T) { + log := logging.TestingLog(t) + + ledger1 := makeLedger(t, "l1") + defer ledger1.Close() + ledger2 := makeLedger(t, "l2") + defer ledger2.Close() + addBlock(t, ledger2) + + net1 := &httpTestPeerSource{} + net2 := &httpTestPeerSource{} + + nodeA := &basicRPCNode{} + nodeB := &basicRPCNode{} + nodeA.start() + defer nodeA.stop() + nodeB.start() + defer nodeB.stop() + + config := config.GetDefaultLocal() + // Set the first to a bad address, the second to self, and the third to the one that has the block. + // If RR is right, should succeed. + config.BlockServiceCustomFallbackEndpoints = fmt.Sprintf("://badaddress,%s,%s", nodeA.rootURL(), nodeB.rootURL()) + bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}") + bs2 := MakeBlockService(log, config, ledger2, net2, "{genesisID}") + + nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) + nodeB.RegisterHTTPHandler(BlockServiceBlockPath, bs2) + + parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + require.NoError(t, err) + + client := http.Client{} + + ctx := context.Background() + parsedURL.Path = FormatBlockQuery(uint64(1), parsedURL.Path, net1) + blockURL := parsedURL.String() + request, err := http.NewRequest("GET", blockURL, nil) + require.NoError(t, err) + requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second) + defer requestCancel() + request = request.WithContext(requestCtx) + network.SetUserAgentHeader(request.Header) + response, err := client.Do(request) + require.NoError(t, err) + + require.Equal(t, http.StatusOK, response.StatusCode) +} + +// TestRedirectExceptions tests exception cases: +// - the case when the peer is not a valid http peer +// - the case when the block service keeps redirecting and cannot get a block +func TestRedirectExceptions(t *testing.T) { + log := logging.TestingLog(t) + + ledger1 := makeLedger(t, "l1") + defer ledger1.Close() + addBlock(t, ledger1) + + net1 := &httpTestPeerSource{} + + config := config.GetDefaultLocal() + bs1 := MakeBlockService(log, config, ledger1, net1, "{genesisID}") + + nodeA := &basicRPCNode{} + + nodeA.RegisterHTTPHandler(BlockServiceBlockPath, bs1) + nodeA.start() + defer nodeA.stop() + + net1.peers = append(net1.peers, "invalidPeer") + + parsedURL, err := network.ParseHostOrURL(nodeA.rootURL()) + require.NoError(t, err) + + client := http.Client{} + + ctx := context.Background() + parsedURL.Path = FormatBlockQuery(uint64(2), parsedURL.Path, net1) + blockURL := parsedURL.String() + request, err := http.NewRequest("GET", blockURL, nil) + require.NoError(t, err) + requestCtx, requestCancel := context.WithTimeout(ctx, time.Duration(config.CatchupHTTPBlockFetchTimeoutSec)*time.Second) + defer requestCancel() + request = request.WithContext(requestCtx) + network.SetUserAgentHeader(request.Header) + + response, err := client.Do(request) + require.NoError(t, err) + require.Equal(t, response.StatusCode, http.StatusNotFound) + + net1.addPeer(nodeA.rootURL()) + _, err = client.Do(request) + require.Error(t, err) + require.Contains(t, err.Error(), "stopped after 10 redirects") +} + +var poolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} +var sinkAddr = basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} + +func makeLedger(t *testing.T, namePostfix string) *data.Ledger { + proto := config.Consensus[protocol.ConsensusCurrentVersion] + genesis := make(map[basics.Address]basics.AccountData) + genesis[sinkAddr] = basics.AccountData{ + Status: basics.Online, + MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000}, + } + genesis[poolAddr] = basics.AccountData{ + Status: basics.Online, + MicroAlgos: basics.MicroAlgos{Raw: proto.MinBalance * 2000000}, + } + + log := logging.TestingLog(t) + genBal := data.MakeGenesisBalances(genesis, sinkAddr, poolAddr) + genHash := crypto.Digest{0x42} + cfg := config.GetDefaultLocal() + const inMem = true + + ledger, err := data.LoadLedger( + log, t.Name()+namePostfix, inMem, protocol.ConsensusCurrentVersion, genBal, "", genHash, + nil, cfg, + ) + require.NoError(t, err) + return ledger +} + +func addBlock(t *testing.T, ledger *data.Ledger) { + blk, err := ledger.Block(ledger.LastRound()) + require.NoError(t, err) + blk.BlockHeader.Round++ + blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000) + blk.TxnRoot, err = blk.PaysetCommit() + require.NoError(t, err) + + var cert agreement.Certificate + cert.Proposal.BlockDigest = blk.Digest() + + err = ledger.AddBlock(blk, cert) + require.NoError(t, err) + + hdr, err := ledger.BlockHdr(blk.BlockHeader.Round) + require.NoError(t, err) + require.Equal(t, blk.BlockHeader, hdr) +} diff --git a/rpcs/txService_test.go b/rpcs/txService_test.go index 8e984bf488..b43ea7f911 100644 --- a/rpcs/txService_test.go +++ b/rpcs/txService_test.go @@ -51,6 +51,11 @@ func (s *httpTestPeerSource) GetPeers(options ...network.PeerOption) []network.P return s.peers } +func (s *httpTestPeerSource) addPeer(rootURL string) { + peer := testHTTPPeer(rootURL) + s.peers = append(s.peers, &peer) +} + // implement network.HTTPPeer type testHTTPPeer string diff --git a/test/testdata/configs/config-v16.json b/test/testdata/configs/config-v16.json index 39c96b11a6..e7201905da 100644 --- a/test/testdata/configs/config-v16.json +++ b/test/testdata/configs/config-v16.json @@ -4,6 +4,7 @@ "AnnounceParticipationKey": true, "Archival": false, "BaseLoggerDebugLevel": 4, + "BlockServiceCustomFallbackEndpoints": "", "BroadcastConnectionsLimit": -1, "CadaverSizeTarget": 1073741824, "CatchpointFileHistoryLength": 365, @@ -26,6 +27,7 @@ "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, "EnableBlockService": false, + "EnableBlockServiceFallbackToArchiver": true, "EnableCatchupFromArchiveServers": false, "EnableDeveloperAPI": false, "EnableGossipBlockService": true, From 253946dec292fe24d46c02d0b45b0e63fae6ddd4 Mon Sep 17 00:00:00 2001 From: John Lee Date: Fri, 26 Mar 2021 16:14:20 -0400 Subject: [PATCH 144/215] Cache docker images in Travis and re-load them (#2012) --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 6912858958..d4da7339b0 100644 --- a/.travis.yml +++ b/.travis.yml @@ -165,6 +165,7 @@ jobs: cache: directories: - crypto/lib + - $HOME/docker_cache before_install: - |- @@ -189,6 +190,7 @@ before_install: export MAKE=mingw32-make # so that Autotools can find it ;; esac + docker load -i $HOME/docker_cache/images.tar || true before_cache: - |- @@ -198,6 +200,7 @@ before_cache: $msys2 pacman --sync --clean --noconfirm ;; esac + docker save -o $HOME/docker_cache/images.tar $(docker images -a -q) addons: apt: From 3a4d94b5a4619f976212e3874401abcfecc01142 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 26 Mar 2021 20:43:31 -0400 Subject: [PATCH 145/215] fix typo (#2021) The word received was misspelled multiple times in our codebase as recieved. This PR replaces all the typos with it's correct spelling. --- agreement/demux.go | 2 +- network/connPerfMon.go | 2 +- network/wsNetwork_test.go | 2 +- test/commandandcontrol/cc_agent/component/agent.go | 2 +- test/commandandcontrol/cc_service/main.go | 4 ++-- util/metrics/counter_test.go | 6 +++--- util/metrics/gauge_test.go | 2 +- util/metrics/segment_test.go | 2 +- 8 files changed, 11 insertions(+), 11 deletions(-) diff --git a/agreement/demux.go b/agreement/demux.go index ebad85a9c2..a65e5552c7 100644 --- a/agreement/demux.go +++ b/agreement/demux.go @@ -241,7 +241,7 @@ func (d *demux) next(s *Service, deadline time.Duration, fastDeadline time.Durat fastPartitionRecoveryEnabled := false if proto, err := d.ledger.ConsensusVersion(ParamsRound(currentRound)); err != nil { logging.Base().Warnf("demux: could not get consensus parameters for round %d: %v", ParamsRound(currentRound), err) - // this might happen during catchup, since the Ledger.Wait fires as soon as a new block is recieved by the ledger, which could be + // this might happen during catchup, since the Ledger.Wait fires as soon as a new block is received by the ledger, which could be // far before it's being committed. In these cases, it should be safe to default to the current consensus version. On subsequent // iterations, it will get "corrected" since the ledger would finish flushing the blocks to disk. fastPartitionRecoveryEnabled = config.Consensus[protocol.ConsensusCurrentVersion].FastPartitionRecovery diff --git a/network/connPerfMon.go b/network/connPerfMon.go index 4a1355056f..79a1bf5642 100644 --- a/network/connPerfMon.go +++ b/network/connPerfMon.go @@ -233,7 +233,7 @@ func (pm *connectionPerformanceMonitor) notifyPresync(msg *IncomingMessage) { return } pm.lastIncomingMsgTime = msg.Received - // otherwise, once we recieved a message from each of the peers, move to the sync stage. + // otherwise, once we received a message from each of the peers, move to the sync stage. pm.advanceStage(pmStageSync, msg.Received) } diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 78b2f1471c..1e37d06619 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -771,7 +771,7 @@ func TestDupFilter(t *testing.T) { waitReady(t, netC, readyTimeout.C) t.Log("c ready") - // TODO: this test has two halves that exercise inbound de-dup and outbound non-send due to recieved hash. But it doesn't properly _test_ them as it doesn't measure _why_ it receives each message exactly once. The second half below could actualy be because of the same inbound de-dup as this first half. You can see the actions of either in metrics. + // TODO: this test has two halves that exercise inbound de-dup and outbound non-send due to received hash. But it doesn't properly _test_ them as it doesn't measure _why_ it receives each message exactly once. The second half below could actualy be because of the same inbound de-dup as this first half. You can see the actions of either in metrics. // algod_network_duplicate_message_received_total{} 2 // algod_outgoing_network_message_filtered_out_total{} 2 // Maybe we should just .Set(0) those counters and use them in this test? diff --git a/test/commandandcontrol/cc_agent/component/agent.go b/test/commandandcontrol/cc_agent/component/agent.go index 64143992a9..f467c93035 100644 --- a/test/commandandcontrol/cc_agent/component/agent.go +++ b/test/commandandcontrol/cc_agent/component/agent.go @@ -138,7 +138,7 @@ func (status CommandStatus) String() string { // ProcessRequest processes the command received via the CC Service func (agent *Agent) ProcessRequest(managementServiceRequest lib.CCServiceRequest) (err error) { - log.Infof("recieved command for %s\n", managementServiceRequest.Component) + log.Infof("received command for %s\n", managementServiceRequest.Component) err = agent.ServiceConnection.WriteMessage(websocket.TextMessage, []byte(fmt.Sprintf("received request %+v ", managementServiceRequest))) if err != nil { log.Errorf("problem sending ack to client , %v", err) diff --git a/test/commandandcontrol/cc_service/main.go b/test/commandandcontrol/cc_service/main.go index 06e3ac4226..e42c16933d 100644 --- a/test/commandandcontrol/cc_service/main.go +++ b/test/commandandcontrol/cc_service/main.go @@ -103,11 +103,11 @@ func monitorAgent(ws *websocket.Conn) { } switch messageType { case websocket.TextMessage: - log.Infof("recieved text from agent: %s", message) + log.Infof("received text from agent: %s", message) clientBroadcast <- message break default: - log.Infof("recieved other from agent: %s", message) + log.Infof("received other from agent: %s", message) break } } diff --git a/util/metrics/counter_test.go b/util/metrics/counter_test.go index 473ac10481..a6c5b3ead5 100644 --- a/util/metrics/counter_test.go +++ b/util/metrics/counter_test.go @@ -53,7 +53,7 @@ func TestMetricCounter(t *testing.T) { // wait half-a cycle time.Sleep(test.sampleRate / 2) } - // wait two reporting cycles to ensure we recieved all the messages. + // wait two reporting cycles to ensure we received all the messages. time.Sleep(test.sampleRate * 2) metricService.Shutdown() @@ -98,7 +98,7 @@ func TestMetricCounterFastInts(t *testing.T) { time.Sleep(test.sampleRate / 2) } counter.AddUint64(2, nil) - // wait two reporting cycles to ensure we recieved all the messages. + // wait two reporting cycles to ensure we received all the messages. time.Sleep(test.sampleRate * 2) metricService.Shutdown() @@ -145,7 +145,7 @@ func TestMetricCounterMixed(t *testing.T) { time.Sleep(test.sampleRate / 2) } counter.AddUint64(2, nil) - // wait two reporting cycles to ensure we recieved all the messages. + // wait two reporting cycles to ensure we received all the messages. time.Sleep(test.sampleRate * 2) metricService.Shutdown() diff --git a/util/metrics/gauge_test.go b/util/metrics/gauge_test.go index 47de020bd5..e2262ce7b3 100644 --- a/util/metrics/gauge_test.go +++ b/util/metrics/gauge_test.go @@ -54,7 +54,7 @@ func TestMetricGauge(t *testing.T) { time.Sleep(test.sampleRate / 2) } - // wait two reporting cycles to ensure we recieved all the messages. + // wait two reporting cycles to ensure we received all the messages. time.Sleep(test.sampleRate * 2) metricService.Shutdown() diff --git a/util/metrics/segment_test.go b/util/metrics/segment_test.go index 0d12269043..3bd07eaa9b 100644 --- a/util/metrics/segment_test.go +++ b/util/metrics/segment_test.go @@ -57,7 +57,7 @@ func TestMetricSegment(t *testing.T) { } segmentTest() segmentTest() - // wait two reporting cycles to ensure we recieved all the messages. + // wait two reporting cycles to ensure we received all the messages. time.Sleep(test.sampleRate * 2) metricService.Shutdown() From f1e03f5d07595c708e4dba6b21989e96cd87d016 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 29 Mar 2021 10:23:53 -0400 Subject: [PATCH 146/215] Better error testing. (#2025) Improve TestCompactCerts e2e test error handling. --- test/e2e-go/features/compactcert/compactcert_test.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go index e36fe36a56..24b821b798 100644 --- a/test/e2e-go/features/compactcert/compactcert_test.go +++ b/test/e2e-go/features/compactcert/compactcert_test.go @@ -80,7 +80,9 @@ func TestCompactCerts(t *testing.T) { _, err = node0Client.SendPaymentFromUnencryptedWallet(node0Account, node1Account, minTxnFee, rnd, nil) r.NoError(err) - fixture.WaitForRound(rnd, 30*time.Second) + err = fixture.WaitForRound(rnd, 30*time.Second) + r.NoError(err) + blk, err := libgoal.Block(rnd) r.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd) From 85f7f5ce5e0ef5d52a891ee2789431178200b18f Mon Sep 17 00:00:00 2001 From: algonautshant <55754073+algonautshant@users.noreply.github.com> Date: Mon, 29 Mar 2021 22:43:55 -0400 Subject: [PATCH 147/215] clear unused test code (#2026) Remove testing tools which are no longer used & fix bug in catchup unit test. --- catchup/fetcher_test.go | 129 ---------------------------------------- catchup/service_test.go | 5 +- 2 files changed, 4 insertions(+), 130 deletions(-) diff --git a/catchup/fetcher_test.go b/catchup/fetcher_test.go index 7140ca0848..fd48d0902d 100644 --- a/catchup/fetcher_test.go +++ b/catchup/fetcher_test.go @@ -18,14 +18,11 @@ package catchup import ( "context" - "errors" "net" "net/http" - "net/rpc" "net/url" "strings" "testing" - "time" "github.com/gorilla/mux" "github.com/stretchr/testify/require" @@ -41,134 +38,8 @@ import ( "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/rpcs" - "github.com/algorand/go-algorand/util/bloom" ) -type mockRunner struct { - ran bool - done chan *rpc.Call - failWithNil bool - failWithError bool - txgroups [][]transactions.SignedTxn -} - -type mockRPCClient struct { - client *mockRunner - closed bool - rootURL string - log logging.Logger -} - -func (client *mockRPCClient) Close() error { - client.closed = true - return nil -} - -func (client *mockRPCClient) Address() string { - return "mock.address." -} -func (client *mockRPCClient) Sync(ctx context.Context, bloom *bloom.Filter) (txgroups [][]transactions.SignedTxn, err error) { - client.log.Info("MockRPCClient.Sync") - select { - case <-ctx.Done(): - return nil, errors.New("cancelled") - default: - } - if client.client.failWithNil { - return nil, errors.New("old failWithNil") - } - if client.client.failWithError { - return nil, errors.New("failing call") - } - return client.client.txgroups, nil -} -func (client *mockRPCClient) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { - return nil, nil -} - -// network.HTTPPeer interface -func (client *mockRPCClient) GetAddress() string { - return client.rootURL -} -func (client *mockRPCClient) GetHTTPClient() *http.Client { - return nil -} - -type mockClientAggregator struct { - mocks.MockNetwork - peers []network.Peer -} - -func (mca *mockClientAggregator) GetPeers(options ...network.PeerOption) []network.Peer { - return mca.peers -} - -const numberOfPeers = 10 - -func makeMockClientAggregator(t *testing.T, failWithNil bool, failWithError bool) *mockClientAggregator { - clients := make([]network.Peer, 0) - for i := 0; i < numberOfPeers; i++ { - runner := mockRunner{failWithNil: failWithNil, failWithError: failWithError, done: make(chan *rpc.Call)} - clients = append(clients, &mockRPCClient{client: &runner, log: logging.TestingLog(t)}) - } - t.Logf("len(mca.clients) = %d", len(clients)) - return &mockClientAggregator{peers: clients} -} - -type dummyFetcher struct { - failWithNil bool - failWithError bool - fetchTimeout time.Duration -} - -// FetcherClient interface -func (df *dummyFetcher) getBlockBytes(ctx context.Context, r basics.Round) (data []byte, err error) { - if df.failWithNil { - return nil, nil - } - if df.failWithError { - return nil, errors.New("failing call") - } - - timer := time.NewTimer(df.fetchTimeout) - defer timer.Stop() - - // Fill in the dummy response with the correct round - dummyBlock := rpcs.EncodedBlockCert{ - Block: bookkeeping.Block{ - BlockHeader: bookkeeping.BlockHeader{ - Round: r, - }, - }, - Certificate: agreement.Certificate{ - Round: r, - }, - } - - encodedData := protocol.Encode(&dummyBlock) - - select { - case <-timer.C: - case <-ctx.Done(): - return nil, ctx.Err() - } - - return encodedData, nil -} - -// FetcherClient interface -func (df *dummyFetcher) Address() string { - //logging.Base().Debug("dummyFetcher Address") - return "dummyFetcher address" -} - -// FetcherClient interface -func (df *dummyFetcher) Close() error { - //logging.Base().Debug("dummyFetcher Close") - return nil -} - func buildTestLedger(t *testing.T, blk bookkeeping.Block) (ledger *data.Ledger, next basics.Round, b bookkeeping.Block, err error) { var user basics.Address user[0] = 123 diff --git a/catchup/service_test.go b/catchup/service_test.go index 8d641070a2..6ed0d814fe 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -262,6 +262,9 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { require.Equal(t, *block, localBlock) } +// TestAbruptWrites emulates the fact that the agreement can also generate new rounds +// When caught up, and the agreement service is taking the lead, the sync() stops and +// yields to the agreement. Agreement is emulated by the go func() loop in the test func TestAbruptWrites(t *testing.T) { numberOfBlocks := 100 @@ -299,7 +302,6 @@ func TestAbruptWrites(t *testing.T) { var wg sync.WaitGroup wg.Add(1) - defer wg.Wait() go func() { defer wg.Done() for i := basics.Round(lastRound + 1); i <= basics.Round(numberOfBlocks); i++ { @@ -317,6 +319,7 @@ func TestAbruptWrites(t *testing.T) { s.testStart() s.sync() + wg.Wait() require.Equal(t, remote.LastRound(), local.LastRound()) } From abaaa80f460e389131223daeee2225bad032f254 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 31 Mar 2021 12:05:23 -0400 Subject: [PATCH 148/215] fix ::AlgorandGoal::Abort (#2035) testing: fix node shutdown on error --- test/e2e-go/cli/goal/expect/goalExpectCommon.exp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index d8fe5d22c8..e7b1df3304 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -55,7 +55,7 @@ proc ::AlgorandGoal::Abort { ERROR } { if { [info exists ::GLOBAL_TEST_ALGO_DIR] } { puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR" - ::AlgorandGoal::StopNode $::GLOBAL_TEST_ROOT_DIR + ::AlgorandGoal::StopNode $::GLOBAL_TEST_ALGO_DIR } exit 1 From 5b441e37cde66c169752c0c35616ce3d09f93706 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Wed, 31 Mar 2021 15:00:42 -0400 Subject: [PATCH 149/215] Add comments to disassembly of constant load lines to show constant (#1970) Add comments to disassembly of constant load lines to show constant --- data/transactions/logic/assembler.go | 286 +++++++++++++--------- data/transactions/logic/assembler_test.go | 112 ++++++--- data/transactions/logic/eval.go | 27 +- data/transactions/logic/opcodes.go | 20 +- 4 files changed, 289 insertions(+), 156 deletions(-) diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 746c5d941b..5cf8860ef4 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -1297,14 +1297,19 @@ func AssembleStringWithVersion(text string, version uint64) (*OpStream, error) { } type disassembleState struct { - program []byte - pc int - out io.Writer - labelCount int - pendingLabels map[int]string + program []byte + pc int + out io.Writer + + numericTargets bool + labelCount int + pendingLabels map[int]string nextpc int err error + + intc []uint64 + bytec [][]byte } func (dis *disassembleState) putLabel(label string, target int) { @@ -1321,29 +1326,22 @@ func (dis *disassembleState) outputLabelIfNeeded() (err error) { return } -type disassembleFunc func(dis *disassembleState, spec *OpSpec) +type disassembleFunc func(dis *disassembleState, spec *OpSpec) (string, error) // Basic disasemble, and extra bytes of opcode are decoded as bytes integers. -func disDefault(dis *disassembleState, spec *OpSpec) { +func disDefault(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + spec.Details.Size - 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + spec.Details.Size - _, dis.err = fmt.Fprintf(dis.out, "%s", spec.Name) - if dis.err != nil { - return - } + out := spec.Name for s := 1; s < spec.Details.Size; s++ { b := uint(dis.program[dis.pc+s]) - _, dis.err = fmt.Fprintf(dis.out, " %d", b) - if dis.err != nil { - return - } + out += fmt.Sprintf(" %d", b) } - _, dis.err = fmt.Fprintf(dis.out, "\n") + return out, nil } var errShortIntcblock = errors.New("intcblock ran past end of program") @@ -1490,76 +1488,149 @@ func checkByteConstBlock(cx *evalContext) int { return 1 } -func disIntcblock(dis *disassembleState, spec *OpSpec) { - var intc []uint64 - intc, dis.nextpc, dis.err = parseIntcblock(dis.program, dis.pc) - if dis.err != nil { - return - } - _, dis.err = fmt.Fprintf(dis.out, "intcblock") - if dis.err != nil { - return +func disIntcblock(dis *disassembleState, spec *OpSpec) (string, error) { + intc, nextpc, err := parseIntcblock(dis.program, dis.pc) + if err != nil { + return "", err } + dis.nextpc = nextpc + out := spec.Name for _, iv := range intc { - _, dis.err = fmt.Fprintf(dis.out, " %d", iv) - if dis.err != nil { - return - } + dis.intc = append(dis.intc, iv) + out += fmt.Sprintf(" %d", iv) } - _, dis.err = dis.out.Write([]byte("\n")) + return out, nil } -func disBytecblock(dis *disassembleState, spec *OpSpec) { - var bytec [][]byte - bytec, dis.nextpc, dis.err = parseBytecBlock(dis.program, dis.pc) - if dis.err != nil { - return +func disIntc(dis *disassembleState, spec *OpSpec) (string, error) { + lastIdx := dis.pc + spec.Details.Size - 1 + if len(dis.program) <= lastIdx { + missing := lastIdx - len(dis.program) + 1 + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } - _, dis.err = fmt.Fprintf(dis.out, "bytecblock") - if dis.err != nil { - return + dis.nextpc = dis.pc + spec.Details.Size + var suffix string + var b int + switch spec.Opcode { + case 0x22: + suffix = "_0" + b = 0 + case 0x23: + suffix = "_1" + b = 1 + case 0x24: + suffix = "_2" + b = 2 + case 0x25: + suffix = "_3" + b = 3 + case 0x21: + b = int(dis.program[dis.pc+1]) + suffix = fmt.Sprintf(" %d", b) + default: + return "", fmt.Errorf("disIntc on %v", spec) } + if b < len(dis.intc) { + return fmt.Sprintf("intc%s // %d", suffix, dis.intc[b]), nil + } + return fmt.Sprintf("intc%s", suffix), nil +} + +func disBytecblock(dis *disassembleState, spec *OpSpec) (string, error) { + bytec, nextpc, err := parseBytecBlock(dis.program, dis.pc) + if err != nil { + return "", err + } + dis.nextpc = nextpc + out := spec.Name for _, bv := range bytec { - _, dis.err = fmt.Fprintf(dis.out, " 0x%s", hex.EncodeToString(bv)) - if dis.err != nil { - return + dis.bytec = append(dis.bytec, bv) + out += fmt.Sprintf(" 0x%s", hex.EncodeToString(bv)) + } + return out, nil +} + +func allPrintableASCII(bytes []byte) bool { + for _, b := range bytes { + if b < 32 || b > 126 { + return false } } - _, dis.err = dis.out.Write([]byte("\n")) + return true } +func guessByteFormat(bytes []byte) string { + var short basics.Address -func disPushInt(dis *disassembleState, spec *OpSpec) { + if len(bytes) == len(short) { + copy(short[:], bytes[:]) + return fmt.Sprintf("addr %s", short.String()) + } + if allPrintableASCII(bytes) { + return fmt.Sprintf("\"%s\"", string(bytes)) + } + return "0x" + hex.EncodeToString(bytes) +} + +func disBytec(dis *disassembleState, spec *OpSpec) (string, error) { + lastIdx := dis.pc + spec.Details.Size - 1 + if len(dis.program) <= lastIdx { + missing := lastIdx - len(dis.program) + 1 + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) + } + dis.nextpc = dis.pc + spec.Details.Size + var suffix string + var b int + switch spec.Opcode { + case 0x28: + suffix = "_0" + b = 0 + case 0x29: + suffix = "_1" + b = 1 + case 0x2a: + suffix = "_2" + b = 2 + case 0x2b: + suffix = "_3" + b = 3 + case 0x27: + b = int(dis.program[dis.pc+1]) + suffix = fmt.Sprintf(" %d", b) + } + if b < len(dis.bytec) { + return fmt.Sprintf("bytec%s // %s", suffix, guessByteFormat(dis.bytec[b])), nil + } + return fmt.Sprintf("bytec%s", suffix), nil +} + +func disPushInt(dis *disassembleState, spec *OpSpec) (string, error) { pos := dis.pc + 1 val, bytesUsed := binary.Uvarint(dis.program[pos:]) if bytesUsed <= 0 { - dis.err = fmt.Errorf("could not decode int at pc=%d", pos) - return + return "", fmt.Errorf("could not decode int at pc=%d", pos) } - pos += bytesUsed - _, dis.err = fmt.Fprintf(dis.out, "%s %d\n", spec.Name, val) - dis.nextpc = pos + dis.nextpc = pos + bytesUsed + return fmt.Sprintf("%s %d", spec.Name, val), nil } func checkPushInt(cx *evalContext) int { opPushInt(cx) return 1 } -func disPushBytes(dis *disassembleState, spec *OpSpec) { +func disPushBytes(dis *disassembleState, spec *OpSpec) (string, error) { pos := dis.pc + 1 length, bytesUsed := binary.Uvarint(dis.program[pos:]) if bytesUsed <= 0 { - dis.err = fmt.Errorf("could not decode bytes length at pc=%d", pos) - return + return "", fmt.Errorf("could not decode bytes length at pc=%d", pos) } pos += bytesUsed end := uint64(pos) + length if end > uint64(len(dis.program)) || end < uint64(pos) { - dis.err = fmt.Errorf("pushbytes too long %d %d", end, pos) - return + return "", fmt.Errorf("pushbytes too long %d %d", end, pos) } bytes := dis.program[pos:end] - _, dis.err = fmt.Fprintf(dis.out, "%s 0x%s\n", spec.Name, hex.EncodeToString(bytes)) dis.nextpc = int(end) + return fmt.Sprintf("%s 0x%s", spec.Name, hex.EncodeToString(bytes)), nil } func checkPushBytes(cx *evalContext) int { opPushBytes(cx) @@ -1567,141 +1638,132 @@ func checkPushBytes(cx *evalContext) int { } // This is also used to disassemble gtxns -func disTxn(dis *disassembleState, spec *OpSpec) { +func disTxn(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 2 txarg := dis.program[dis.pc+1] if int(txarg) >= len(TxnFieldNames) { - dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) - return + return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) } - _, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, TxnFieldNames[txarg]) + return fmt.Sprintf("%s %s", spec.Name, TxnFieldNames[txarg]), nil } // This is also used to disassemble gtxnsa -func disTxna(dis *disassembleState, spec *OpSpec) { +func disTxna(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 3 txarg := dis.program[dis.pc+1] if int(txarg) >= len(TxnFieldNames) { - dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) - return + return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) } arrayFieldIdx := dis.program[dis.pc+2] - _, dis.err = fmt.Fprintf(dis.out, "%s %s %d\n", spec.Name, TxnFieldNames[txarg], arrayFieldIdx) + return fmt.Sprintf("%s %s %d", spec.Name, TxnFieldNames[txarg], arrayFieldIdx), nil } -func disGtxn(dis *disassembleState, spec *OpSpec) { +func disGtxn(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 3 gi := dis.program[dis.pc+1] txarg := dis.program[dis.pc+2] if int(txarg) >= len(TxnFieldNames) { - dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) - return + return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) } - _, dis.err = fmt.Fprintf(dis.out, "gtxn %d %s\n", gi, TxnFieldNames[txarg]) + return fmt.Sprintf("gtxn %d %s", gi, TxnFieldNames[txarg]), nil } -func disGtxna(dis *disassembleState, spec *OpSpec) { +func disGtxna(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 3 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 4 gi := dis.program[dis.pc+1] txarg := dis.program[dis.pc+2] if int(txarg) >= len(TxnFieldNames) { - dis.err = fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) - return + return "", fmt.Errorf("invalid txn arg index %d at pc=%d", txarg, dis.pc) } arrayFieldIdx := dis.program[dis.pc+3] - _, dis.err = fmt.Fprintf(dis.out, "gtxna %d %s %d\n", gi, TxnFieldNames[txarg], arrayFieldIdx) + return fmt.Sprintf("gtxna %d %s %d", gi, TxnFieldNames[txarg], arrayFieldIdx), nil } -func disGlobal(dis *disassembleState, spec *OpSpec) { +func disGlobal(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 2 garg := dis.program[dis.pc+1] if int(garg) >= len(GlobalFieldNames) { - dis.err = fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc) - return + return "", fmt.Errorf("invalid global arg index %d at pc=%d", garg, dis.pc) } - _, dis.err = fmt.Fprintf(dis.out, "global %s\n", GlobalFieldNames[garg]) + return fmt.Sprintf("global %s", GlobalFieldNames[garg]), nil } -func disBranch(dis *disassembleState, spec *OpSpec) { +func disBranch(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 2 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 3 offset := (uint(dis.program[dis.pc+1]) << 8) | uint(dis.program[dis.pc+2]) target := int(offset) + dis.pc + 3 - label, labelExists := dis.pendingLabels[target] - if !labelExists { - dis.labelCount++ - label = fmt.Sprintf("label%d", dis.labelCount) - dis.putLabel(label, target) + var label string + if dis.numericTargets { + label = fmt.Sprintf("+%d", offset+3) // +3 so it's easy to calculate destination from current + } else { + if known, ok := dis.pendingLabels[target]; ok { + label = known + } else { + dis.labelCount++ + label = fmt.Sprintf("label%d", dis.labelCount) + dis.putLabel(label, target) + } } - _, dis.err = fmt.Fprintf(dis.out, "%s %s\n", spec.Name, label) + return fmt.Sprintf("%s %s", spec.Name, label), nil } -func disAssetHolding(dis *disassembleState, spec *OpSpec) { +func disAssetHolding(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 2 arg := dis.program[dis.pc+1] if int(arg) >= len(AssetHoldingFieldNames) { - dis.err = fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc) - return + return "", fmt.Errorf("invalid asset holding arg index %d at pc=%d", arg, dis.pc) } - _, dis.err = fmt.Fprintf(dis.out, "asset_holding_get %s\n", AssetHoldingFieldNames[arg]) + return fmt.Sprintf("asset_holding_get %s", AssetHoldingFieldNames[arg]), nil } -func disAssetParams(dis *disassembleState, spec *OpSpec) { +func disAssetParams(dis *disassembleState, spec *OpSpec) (string, error) { lastIdx := dis.pc + 1 if len(dis.program) <= lastIdx { missing := lastIdx - len(dis.program) + 1 - dis.err = fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) - return + return "", fmt.Errorf("unexpected %s opcode end: missing %d bytes", spec.Name, missing) } dis.nextpc = dis.pc + 2 arg := dis.program[dis.pc+1] if int(arg) >= len(AssetParamsFieldNames) { - dis.err = fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc) - return + return "", fmt.Errorf("invalid asset params arg index %d at pc=%d", arg, dis.pc) } - _, dis.err = fmt.Fprintf(dis.out, "asset_params_get %s\n", AssetParamsFieldNames[arg]) + return fmt.Sprintf("asset_params_get %s", AssetParamsFieldNames[arg]), nil } type disInfo struct { @@ -1725,7 +1787,7 @@ func disassembleInstrumented(program []byte) (text string, ds disInfo, err error text = out.String() return } - fmt.Fprintf(dis.out, "// version %d\n", version) + fmt.Fprintf(dis.out, "#pragma version %d\n", version) dis.pc = vlen for dis.pc < len(program) { err = dis.outputLabelIfNeeded() @@ -1750,11 +1812,13 @@ func disassembleInstrumented(program []byte) (text string, ds disInfo, err error ds.pcOffset = append(ds.pcOffset, PCOffset{dis.pc, out.Len()}) // Actually do the disassembly - op.dis(&dis, &op) - if dis.err != nil { - err = dis.err + var line string + line, err = op.dis(&dis, &op) + if err != nil { return } + out.WriteString(line) + out.WriteRune('\n') dis.pc = dis.nextpc } err = dis.outputLabelIfNeeded() diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index 0e435cb721..c2d521b79d 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -834,20 +834,23 @@ int 2` func TestAssembleDisassemble(t *testing.T) { // Specifically constructed program text that should be recreated by Disassemble() - // TODO: disassemble to int/byte psuedo-ops instead of raw intcblock/bytecblock/intc/bytec t.Parallel() - text := fmt.Sprintf(`// version %d + text := fmt.Sprintf(`#pragma version %d intcblock 0 1 2 3 4 5 -bytecblock 0xcafed00d 0x1337 0x2001 0xdeadbeef 0x70077007 -intc_1 -intc_0 +bytecblock 0xcafed00d 0x1337 0x68656c6c6f 0xdeadbeef 0x70077007 0x0102030405060708091011121314151617181920212223242526272829303132 +bytec_2 // "hello" +pop +bytec 5 // addr AEBAGBAFAYDQQCIQCEJBGFAVCYLRQGJAEERCGJBFEYTSQKJQGEZHVJ5ZZY +pop +intc_1 // 1 +intc_0 // 0 + -intc 4 +intc 4 // 4 * -bytec_1 -bytec_0 +bytec_1 // 0x1337 +bytec_0 // 0xcafed00d == -bytec 4 +bytec 4 // 0x70077007 len + arg_0 @@ -944,7 +947,7 @@ gtxn 12 Fee func TestAssembleDisassembleCycle(t *testing.T) { // Test that disassembly re-assembles to the same program bytes. - // It disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes. + // Disassembly won't necessarily perfectly recreate the source text, but assembling the result of Disassemble() should be the same program bytes. t.Parallel() tests := map[uint64]string{ @@ -955,24 +958,56 @@ func TestAssembleDisassembleCycle(t *testing.T) { // This confirms that each program compiles to the same bytes // (except the leading version indicator), when compiled under - // original and max versions. That doesn't *have* to be true, - // as we can introduce optimizations in later versions that - // change the bytecode emitted. But currently it is, so we - // test it for now to catch any suprises. + // original version, unspecified version (so it should pick up + // the pragma) and current version with pragma removed. That + // doesn't *have* to be true, as we can introduce + // optimizations in later versions that change the bytecode + // emitted. But currently it is, so we test it for now to + // catch any suprises. for v, source := range tests { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops := testProg(t, source, v) t2, err := Disassemble(ops.Program) require.NoError(t, err) - ops2 := testProg(t, t2, AssemblerMaxVersion) - if err != nil { - t.Log(t2) - } - require.Equal(t, ops.Program[1:], ops2.Program[1:]) + none := testProg(t, t2, assemblerNoVersion) + require.Equal(t, ops.Program[1:], none.Program[1:]) + t3 := "// " + t2 // This comments out the #pragma version + current := testProg(t, t3, AssemblerMaxVersion) + require.Equal(t, ops.Program[1:], current.Program[1:]) }) } } +func TestConstantDisassembly(t *testing.T) { + t.Parallel() + + ops := testProg(t, "int 47", AssemblerMaxVersion) + out, err := Disassemble(ops.Program) + require.NoError(t, err) + require.Contains(t, out, "// 47") + + ops = testProg(t, "byte \"john\"", AssemblerMaxVersion) + out, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Contains(t, out, "// \"john\"") + + ops = testProg(t, "byte \"!&~\"", AssemblerMaxVersion) + out, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Contains(t, out, "// \"!&~\"") + + ops = testProg(t, "byte 0x010720", AssemblerMaxVersion) + out, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Contains(t, out, "// 0x010720") + + ops = testProg(t, "addr AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", AssemblerMaxVersion) + out, err = Disassemble(ops.Program) + require.NoError(t, err) + require.Contains(t, out, "// addr AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ") + +} + func TestAssembleDisassembleErrors(t *testing.T) { t.Parallel() @@ -1124,9 +1159,10 @@ func TestAssembleAsset(t *testing.T) { func TestDisassembleSingleOp(t *testing.T) { t.Parallel() + for v := uint64(1); v <= AssemblerMaxVersion; v++ { // test ensures no double arg_0 entries in disassembly listing - sample := fmt.Sprintf("// version %d\narg_0\n", v) + sample := fmt.Sprintf("#pragma version %d\narg_0\n", v) ops, err := AssembleStringWithVersion(sample, v) require.NoError(t, err) require.Equal(t, 2, len(ops.Program)) @@ -1136,25 +1172,41 @@ func TestDisassembleSingleOp(t *testing.T) { } } +func TestDisassembleInt(t *testing.T) { + t.Parallel() + txnSample := fmt.Sprintf("#pragma version %d\nint 17\nint 27\nint 37\nint 47\nint 5\n", AssemblerMaxVersion) + ops := testProg(t, txnSample, AssemblerMaxVersion) + disassembled, err := Disassemble(ops.Program) + require.NoError(t, err) + // Would ne nice to check that these appear in the + // disassembled output in the right order, but I don't want to + // hardcode checks that they are in certain intc slots. + require.Contains(t, disassembled, "// 17") + require.Contains(t, disassembled, "// 27") + require.Contains(t, disassembled, "// 37") + require.Contains(t, disassembled, "// 47") + require.Contains(t, disassembled, "// 5") +} + func TestDisassembleTxna(t *testing.T) { t.Parallel() // txn was 1, but this tests both introduction := OpsByName[LogicVersion]["gtxna"].Version for v := introduction; v <= AssemblerMaxVersion; v++ { // check txn and txna are properly disassembled - txnSample := fmt.Sprintf("// version %d\ntxn Sender\n", v) + txnSample := fmt.Sprintf("#pragma version %d\ntxn Sender\n", v) ops := testProg(t, txnSample, v) disassembled, err := Disassemble(ops.Program) require.NoError(t, err) require.Equal(t, txnSample, disassembled) - txnaSample := fmt.Sprintf("// version %d\ntxna Accounts 0\n", v) + txnaSample := fmt.Sprintf("#pragma version %d\ntxna Accounts 0\n", v) ops = testProg(t, txnaSample, v) disassembled, err = Disassemble(ops.Program) require.NoError(t, err) require.Equal(t, txnaSample, disassembled) - txnSample2 := fmt.Sprintf("// version %d\ntxn Accounts 0\n", v) + txnSample2 := fmt.Sprintf("#pragma version %d\ntxn Accounts 0\n", v) ops = testProg(t, txnSample2, v) disassembled, err = Disassemble(ops.Program) require.NoError(t, err) @@ -1169,19 +1221,19 @@ func TestDisassembleGtxna(t *testing.T) { introduction := OpsByName[LogicVersion]["gtxna"].Version for v := introduction; v <= AssemblerMaxVersion; v++ { - gtxnSample := fmt.Sprintf("// version %d\ngtxn 0 Sender\n", v) + gtxnSample := fmt.Sprintf("#pragma version %d\ngtxn 0 Sender\n", v) ops := testProg(t, gtxnSample, v) disassembled, err := Disassemble(ops.Program) require.NoError(t, err) require.Equal(t, gtxnSample, disassembled) - gtxnaSample := fmt.Sprintf("// version %d\ngtxna 0 Accounts 0\n", v) + gtxnaSample := fmt.Sprintf("#pragma version %d\ngtxna 0 Accounts 0\n", v) ops = testProg(t, gtxnaSample, v) disassembled, err = Disassemble(ops.Program) require.NoError(t, err) require.Equal(t, gtxnaSample, disassembled) - gtxnSample2 := fmt.Sprintf("// version %d\ngtxn 0 Accounts 0\n", v) + gtxnSample2 := fmt.Sprintf("#pragma version %d\ngtxn 0 Accounts 0\n", v) ops = testProg(t, gtxnSample2, v) disassembled, err = Disassemble(ops.Program) require.NoError(t, err) @@ -1193,14 +1245,14 @@ func TestDisassembleGtxna(t *testing.T) { func TestDisassemblePushConst(t *testing.T) { t.Parallel() // check pushint and pushbytes are properly disassembled - intSample := fmt.Sprintf("// version %d\npushint 1\n", AssemblerMaxVersion) + intSample := fmt.Sprintf("#pragma version %d\npushint 1\n", AssemblerMaxVersion) ops, err := AssembleStringWithVersion(intSample, AssemblerMaxVersion) require.NoError(t, err) disassembled, err := Disassemble(ops.Program) require.NoError(t, err) require.Equal(t, intSample, disassembled) - bytesSample := fmt.Sprintf("// version %d\npushbytes 0x01\n", AssemblerMaxVersion) + bytesSample := fmt.Sprintf("#pragma version %d\npushbytes 0x01\n", AssemblerMaxVersion) ops, err = AssembleStringWithVersion(bytesSample, AssemblerMaxVersion) require.NoError(t, err) disassembled, err = Disassemble(ops.Program) @@ -1214,9 +1266,9 @@ func TestDisassembleLastLabel(t *testing.T) { // starting from TEAL v2 branching to the last line are legal for v := uint64(2); v <= AssemblerMaxVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { - source := fmt.Sprintf(`// version %d + source := fmt.Sprintf(`#pragma version %d intcblock 1 -intc_0 +intc_0 // 1 bnz label1 label1: `, v) diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 7f4e099fb9..fd69df15cc 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -570,11 +570,28 @@ func (cx *evalContext) step() { cx.cost += deets.Cost spec.op(cx) if cx.Trace != nil { - immArgsString := " " - if spec.Name != "bnz" { - for i := 1; i < spec.Details.Size; i++ { - immArgsString += fmt.Sprintf("0x%02x ", cx.program[cx.pc+i]) + // This code used to do a little disassembly on its + // own, but then it missed out on some nuances like + // getting the field names instead of constants in the + // txn opcodes. To get them, we conjure up a + // disassembleState from the current execution state, + // and use the existing disassembly routines. It + // feels a little funny to make a disassembleState + // right here, rather than build it as we go, or + // perhaps we could have an interface that allows + // disassembly to use the cx directly. But for now, + // we don't want to worry about the dissassembly + // routines mucking about in the excution context + // (changing the pc, for example) and this gives a big + // improvement of dryrun readability + dstate := &disassembleState{program: cx.program, pc: cx.pc, numericTargets: true, intc: cx.intc, bytec: cx.bytec} + var sourceLine string + sourceLine, err := spec.dis(dstate, spec) + if err != nil { + if cx.err == nil { // don't override an error from evaluation + cx.err = err } + return } var stackString string if len(cx.stack) == 0 { @@ -596,7 +613,7 @@ func (cx *evalContext) step() { } } } - fmt.Fprintf(cx.Trace, "%3d %s%s=> %s\n", cx.pc, spec.Name, immArgsString, stackString) + fmt.Fprintf(cx.Trace, "%3d %s => %s\n", cx.pc, sourceLine, stackString) } if cx.err != nil { return diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 641dbd4aba..030bca9874 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -153,17 +153,17 @@ var OpSpecs = []OpSpec{ {0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opDefault}, {0x20, "intcblock", opIntConstBlock, assembleIntCBlock, disIntcblock, nil, nil, 1, modeAny, varies(checkIntConstBlock, "uint ...", immInts)}, - {0x21, "intc", opIntConstLoad, assembleIntC, disDefault, nil, oneInt, 1, modeAny, immediates("i")}, - {0x22, "intc_0", opIntConst0, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault}, - {0x23, "intc_1", opIntConst1, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault}, - {0x24, "intc_2", opIntConst2, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault}, - {0x25, "intc_3", opIntConst3, asmDefault, disDefault, nil, oneInt, 1, modeAny, opDefault}, + {0x21, "intc", opIntConstLoad, assembleIntC, disIntc, nil, oneInt, 1, modeAny, immediates("i")}, + {0x22, "intc_0", opIntConst0, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault}, + {0x23, "intc_1", opIntConst1, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault}, + {0x24, "intc_2", opIntConst2, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault}, + {0x25, "intc_3", opIntConst3, asmDefault, disIntc, nil, oneInt, 1, modeAny, opDefault}, {0x26, "bytecblock", opByteConstBlock, assembleByteCBlock, disBytecblock, nil, nil, 1, modeAny, varies(checkByteConstBlock, "bytes ...", immBytess)}, - {0x27, "bytec", opByteConstLoad, assembleByteC, disDefault, nil, oneBytes, 1, modeAny, immediates("i")}, - {0x28, "bytec_0", opByteConst0, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault}, - {0x29, "bytec_1", opByteConst1, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault}, - {0x2a, "bytec_2", opByteConst2, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault}, - {0x2b, "bytec_3", opByteConst3, asmDefault, disDefault, nil, oneBytes, 1, modeAny, opDefault}, + {0x27, "bytec", opByteConstLoad, assembleByteC, disBytec, nil, oneBytes, 1, modeAny, immediates("i")}, + {0x28, "bytec_0", opByteConst0, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault}, + {0x29, "bytec_1", opByteConst1, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault}, + {0x2a, "bytec_2", opByteConst2, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault}, + {0x2b, "bytec_3", opByteConst3, asmDefault, disBytec, nil, oneBytes, 1, modeAny, opDefault}, {0x2c, "arg", opArg, assembleArg, disDefault, nil, oneBytes, 1, runModeSignature, immediates("n")}, {0x2d, "arg_0", opArg0, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault}, {0x2e, "arg_1", opArg1, asmDefault, disDefault, nil, oneBytes, 1, runModeSignature, opDefault}, From c652a89cc0614a84b214cd79d1fd30758916fb2b Mon Sep 17 00:00:00 2001 From: Ben Guidarelli Date: Wed, 31 Mar 2021 15:28:09 -0400 Subject: [PATCH 150/215] adding extra docs for pushbytes and pushint (#2033) teal documentation: Adding extra docs for pushbytes and pushint --- data/transactions/logic/TEAL_opcodes.md | 4 ++++ data/transactions/logic/doc.go | 2 ++ 2 files changed, 6 insertions(+) diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md index fd92df4988..14ef15102b 100644 --- a/data/transactions/logic/TEAL_opcodes.md +++ b/data/transactions/logic/TEAL_opcodes.md @@ -840,6 +840,8 @@ params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 other - push the following program bytes to the stack - LogicSigVersion >= 3 +pushbytes args are not added to the bytecblock during assembly processes + ## pushint uint - Opcode: 0x81 {varuint int} @@ -847,3 +849,5 @@ params: txn.ForeignAssets offset. Return: did_exist flag (1 if exist and 0 other - Pushes: uint64 - push immediate UINT to the stack as an integer - LogicSigVersion >= 3 + +pushint args are not added to the intcblock during assembly processes diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 22dcdb66b9..22127adc4b 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -164,6 +164,8 @@ var opDocExtras = map[string]string{ "gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.", "btoi": "`btoi` panics if the input is longer than 8 bytes.", "concat": "`concat` panics if the result would be greater than 4096 bytes.", + "pushbytes": "pushbytes args are not added to the bytecblock during assembly processes", + "pushint": "pushint args are not added to the intcblock during assembly processes", "getbit": "see explanation of bit ordering in setbit", "setbit": "bit indexing begins with low-order bits in integers. Setting bit 4 to 1 on the integer 0 yields 16 (`int 0x0010`, or 2^4). Indexing begins in the first bytes of a byte-string (as seen in getbyte and substring). Setting bits 0 through 11 to 1 in a 4 byte-array of 0s yields `byte 0xfff00000`", "app_opted_in": "params: account index, application id (top of the stack on opcode entry). Return: 1 if opted in and 0 otherwise.", From b5770613cb9a70f1443ddcc25103889b4d0b3631 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 31 Mar 2021 16:10:22 -0400 Subject: [PATCH 151/215] delete pid file on process killing. (#2032) xisting code wait 30 seconds between sending SIGTERM and SIGKILL. When sending the SIGKILL, the process is deleted, but the pid file remains on disk. Leaving the pid file on disk could cause subsequent failures - and we could easily avoid them by clearing this file if we SIGKILL'ed the process. --- nodecontrol/NodeController.go | 10 +++++----- nodecontrol/algodControl.go | 12 +++++++++--- nodecontrol/kmdControl.go | 12 +++++++++--- 3 files changed, 23 insertions(+), 11 deletions(-) diff --git a/nodecontrol/NodeController.go b/nodecontrol/NodeController.go index 0673dce9ad..ab8f84c417 100644 --- a/nodecontrol/NodeController.go +++ b/nodecontrol/NodeController.go @@ -111,26 +111,26 @@ func (nc NodeController) stopProcesses() (kmdAlreadyStopped bool, err error) { return } -func killPID(pid int) error { +func killPID(pid int) (killed bool, err error) { process, err := util.FindProcess(pid) if process == nil || err != nil { - return err + return false, err } err = util.KillProcess(pid, syscall.SIGTERM) if err != nil { - return err + return false, err } waitLong := time.After(time.Second * 30) for { // Send null signal - if process still exists, it'll return nil // So when we get an error, assume it's gone. if err = process.Signal(syscall.Signal(0)); err != nil { - return nil + return false, nil } select { case <-waitLong: - return util.KillProcess(pid, syscall.SIGKILL) + return true, util.KillProcess(pid, syscall.SIGKILL) case <-time.After(time.Millisecond * 100): } } diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go index a0fde4f58c..6ba5413bbb 100644 --- a/nodecontrol/algodControl.go +++ b/nodecontrol/algodControl.go @@ -160,9 +160,15 @@ func (nc *NodeController) StopAlgod() (err error) { algodPID, err := nc.GetAlgodPID() if err == nil { // Kill algod by PID - err = killPID(int(algodPID)) - if err != nil { - return + killed, killErr := killPID(int(algodPID)) + if killErr != nil { + return killErr + } + // if we ended up killing the process, make sure to delete the pid file to avoid + // potential downstream issues. + if killed { + // delete the pid file. + os.Remove(nc.algodPidFile) } } else { return &NodeNotRunningError{algodDataDir: nc.algodDataDir} diff --git a/nodecontrol/kmdControl.go b/nodecontrol/kmdControl.go index a608cac160..08fd8a2c3c 100644 --- a/nodecontrol/kmdControl.go +++ b/nodecontrol/kmdControl.go @@ -121,9 +121,15 @@ func (kc *KMDController) StopKMD() (alreadyStopped bool, err error) { kmdPID, err := kc.GetKMDPID() if err == nil { // Kill kmd by PID - err = killPID(int(kmdPID)) - if err != nil { - return + killed, killErr := killPID(int(kmdPID)) + if killErr != nil { + return false, killErr + } + // if we ended up killing the process, make sure to delete the pid file to avoid + // potential downstream issues. + if killed { + // delete the pid file. + os.Remove(kc.kmdPIDPath) } } else { err = nil From 46ea7e94b50d7ce84fb235d5bf249bd11ecdc48c Mon Sep 17 00:00:00 2001 From: egieseke Date: Wed, 31 Mar 2021 17:59:09 -0400 Subject: [PATCH 152/215] Fix for wrong channel when packaging. (#1957) --- scripts/release/mule/package/deb/package.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/release/mule/package/deb/package.sh b/scripts/release/mule/package/deb/package.sh index bf6a5dacfe..6cd9f653ea 100755 --- a/scripts/release/mule/package/deb/package.sh +++ b/scripts/release/mule/package/deb/package.sh @@ -12,7 +12,7 @@ if [ -z "$NETWORK" ]; then exit 1 fi -CHANNEL=$("./scripts/release/mule/common/get_channel.sh" "$NETWORK") +CHANNEL=${CHANNEL:-$(./scripts/release/mule/common/get_channel.sh "$NETWORK")} VERSION=${VERSION:-$(./scripts/compute_build_number.sh -f)} # A make target in Makefile.mule may pass the name as an argument. PACKAGE_NAME="$1" From 920c6ee62440341320aebd2f75d0d0919230f2a0 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 31 Mar 2021 20:05:39 -0400 Subject: [PATCH 153/215] Fix testing bug : avoid datarace on a node crash during e2e test (#2031) ## Overview Synchronize `testing.T` access across go-routines to avoid generating a data race in case of an asynchronous node crash feedback. ## Summary When the node controller notice that the node exits, it reports it back to the test fixture. However, the test fixture cannot report that to the underlying test since doing so would create a data race : the `testing.T` is not meant to support concurrency. To address that, this PR provides an abstraction over the `testing.T` called `TestingTB`, which retain the same functionality, but uses a mutex to synchronize the access. --- test/e2e-go/cli/algod/cleanup_test.go | 2 +- test/e2e-go/cli/algod/stdstreams_test.go | 2 +- test/e2e-go/cli/goal/account_test.go | 8 +- test/e2e-go/cli/goal/clerk_test.go | 4 +- test/e2e-go/cli/goal/node_cleanup_test.go | 3 +- test/e2e-go/cli/perf/libgoal_test.go | 7 +- test/e2e-go/cli/perf/payment_test.go | 18 +- .../features/auction/auctionCancel_test.go | 6 +- .../features/auction/auctionErrors_test.go | 10 +- .../features/auction/basicAuction_test.go | 14 +- .../features/catchup/basicCatchup_test.go | 17 +- .../catchup/catchpointCatchup_test.go | 6 +- .../features/compactcert/compactcert_test.go | 2 +- .../e2e-go/features/multisig/multisig_test.go | 8 +- .../onlineOfflineParticipation_test.go | 4 +- .../participationRewards_test.go | 18 +- .../partitionRecovery_test.go | 8 +- test/e2e-go/features/teal/compile_test.go | 2 +- .../features/transactions/accountv2_test.go | 4 +- .../features/transactions/asset_test.go | 12 +- .../transactions/close_account_test.go | 2 +- .../features/transactions/group_test.go | 6 +- .../features/transactions/lease_test.go | 12 +- .../transactions/onlineStatusChange_test.go | 2 +- .../features/transactions/proof_test.go | 2 +- .../features/transactions/sendReceive_test.go | 2 +- .../transactions/transactionPool_test.go | 6 +- test/e2e-go/kmd/e2e_kmd_server_client_test.go | 11 +- test/e2e-go/kmd/e2e_kmd_sqlite_test.go | 8 +- test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go | 181 +++++----- .../kmd/e2e_kmd_wallet_multisig_test.go | 101 +++--- test/e2e-go/kmd/e2e_kmd_wallet_test.go | 78 +++-- test/e2e-go/perf/basic_test.go | 26 +- test/e2e-go/restAPI/restClient_test.go | 330 ++++++++++-------- .../createManyAndGoOnline_test.go | 2 +- .../upgrades/application_support_test.go | 326 ++++++++--------- test/e2e-go/upgrades/rekey_support_test.go | 16 +- .../upgrades/send_receive_upgrade_test.go | 2 +- test/framework/fixtures/auctionFixture.go | 5 +- test/framework/fixtures/expectFixture.go | 17 +- test/framework/fixtures/fixture.go | 133 ++++++- test/framework/fixtures/kmdFixture.go | 28 +- test/framework/fixtures/libgoalFixture.go | 14 +- test/framework/fixtures/restClientFixture.go | 4 +- 44 files changed, 830 insertions(+), 639 deletions(-) diff --git a/test/e2e-go/cli/algod/cleanup_test.go b/test/e2e-go/cli/algod/cleanup_test.go index e893a68e77..310a8e82d7 100644 --- a/test/e2e-go/cli/algod/cleanup_test.go +++ b/test/e2e-go/cli/algod/cleanup_test.go @@ -28,7 +28,7 @@ import ( func TestNodeControllerCleanup(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesPartialPartkeyOnlyWallets.json")) diff --git a/test/e2e-go/cli/algod/stdstreams_test.go b/test/e2e-go/cli/algod/stdstreams_test.go index 0a8f33f475..27b87fcb86 100644 --- a/test/e2e-go/cli/algod/stdstreams_test.go +++ b/test/e2e-go/cli/algod/stdstreams_test.go @@ -44,7 +44,7 @@ func TestAlgodLogsToFile(t *testing.T) { } func testNodeCreatesLogFiles(t *testing.T, nc nodecontrol.NodeController, redirect bool) { - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) stdOutFile := filepath.Join(nc.GetDataDir(), nodecontrol.StdOutFilename) exists := util.FileExists(stdOutFile) diff --git a/test/e2e-go/cli/goal/account_test.go b/test/e2e-go/cli/goal/account_test.go index 7afc124c23..492b4c29dc 100644 --- a/test/e2e-go/cli/goal/account_test.go +++ b/test/e2e-go/cli/goal/account_test.go @@ -29,7 +29,7 @@ const statusOnline = "[online]" func TestAccountNew(t *testing.T) { defer fixture.SetTestContext(t)() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) newAcctName := "new_account" @@ -54,7 +54,7 @@ func TestAccountNew(t *testing.T) { func TestAccountNewDuplicateFails(t *testing.T) { defer fixture.SetTestContext(t)() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) newAcctName := "duplicate_account" @@ -69,7 +69,7 @@ func TestAccountNewDuplicateFails(t *testing.T) { func TestAccountRename(t *testing.T) { defer fixture.SetTestContext(t)() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) initialAcctName := "initial" newAcctName := "renamed" @@ -99,7 +99,7 @@ func TestAccountRename(t *testing.T) { // Importing an account multiple times should not be considered an error by goal func TestAccountMultipleImportRootKey(t *testing.T) { defer fixture.SetTestContext(t)() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) walletName := "" createUnencryptedWallet := false diff --git a/test/e2e-go/cli/goal/clerk_test.go b/test/e2e-go/cli/goal/clerk_test.go index 2ee2a618dd..5ddc11d63d 100644 --- a/test/e2e-go/cli/goal/clerk_test.go +++ b/test/e2e-go/cli/goal/clerk_test.go @@ -22,11 +22,13 @@ import ( "time" "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/test/framework/fixtures" ) func TestClerkSendNoteEncoding(t *testing.T) { defer fixture.SetTestContext(t)() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) // wait for consensus on first round prior to sending transactions, time out after 2 minutes err := fixture.WaitForRound(2, time.Duration(2*time.Minute)) diff --git a/test/e2e-go/cli/goal/node_cleanup_test.go b/test/e2e-go/cli/goal/node_cleanup_test.go index 00d06c00bb..857ad76c3d 100644 --- a/test/e2e-go/cli/goal/node_cleanup_test.go +++ b/test/e2e-go/cli/goal/node_cleanup_test.go @@ -22,11 +22,12 @@ import ( "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/nodecontrol" + "github.com/algorand/go-algorand/test/framework/fixtures" ) func TestGoalNodeCleanup(t *testing.T) { defer fixture.SetTestContext(t)() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) primaryDir := fixture.PrimaryDataDir() nc := nodecontrol.MakeNodeController(fixture.GetBinDir(), primaryDir) diff --git a/test/e2e-go/cli/perf/libgoal_test.go b/test/e2e-go/cli/perf/libgoal_test.go index 8ebc471df9..817ddeef5d 100644 --- a/test/e2e-go/cli/perf/libgoal_test.go +++ b/test/e2e-go/cli/perf/libgoal_test.go @@ -34,19 +34,20 @@ func BenchmarkLibGoalPerf(b *testing.B) { binDir := fixture.GetBinDir() c, err := libgoal.MakeClientWithBinDir(binDir, fixture.PrimaryDataDir(), fixture.PrimaryDataDir(), libgoal.FullClient) - require.NoError(b, err) + a := require.New(fixtures.SynchronizedTest(b)) + a.NoError(err) b.Run("algod", func(b *testing.B) { for i := 0; i < b.N; i++ { _, err := c.AlgodVersions() - require.NoError(b, err) + a.NoError(err) } }) b.Run("kmd", func(b *testing.B) { for i := 0; i < b.N; i++ { _, err := c.GetUnencryptedWalletHandle() - require.NoError(b, err) + a.NoError(err) } }) } diff --git a/test/e2e-go/cli/perf/payment_test.go b/test/e2e-go/cli/perf/payment_test.go index ed785fdba1..bf5a7e8a14 100644 --- a/test/e2e-go/cli/perf/payment_test.go +++ b/test/e2e-go/cli/perf/payment_test.go @@ -35,21 +35,23 @@ func BenchmarkSendPayment(b *testing.B) { defer fixture.Shutdown() binDir := fixture.GetBinDir() + a := require.New(fixtures.SynchronizedTest(b)) + c, err := libgoal.MakeClientWithBinDir(binDir, fixture.PrimaryDataDir(), fixture.PrimaryDataDir(), libgoal.FullClient) - require.NoError(b, err) + a.NoError(err) wallet, err := c.GetUnencryptedWalletHandle() - require.NoError(b, err) + a.NoError(err) addrs, err := c.ListAddresses(wallet) - require.NoError(b, err) - require.True(b, len(addrs) > 0) + a.NoError(err) + a.True(len(addrs) > 0) addr := addrs[0] b.Run("getwallet", func(b *testing.B) { for i := 0; i < b.N; i++ { _, err = c.GetUnencryptedWalletHandle() - require.NoError(b, err) + a.NoError(err) } }) @@ -59,14 +61,14 @@ func BenchmarkSendPayment(b *testing.B) { var nonce [8]byte crypto.RandBytes(nonce[:]) tx, err = c.ConstructPayment(addr, addr, 1, 1, nonce[:], "", [32]byte{}, 0, 0) - require.NoError(b, err) + a.NoError(err) } }) b.Run("signtxn", func(b *testing.B) { for i := 0; i < b.N; i++ { _, err = c.SignTransactionWithWallet(wallet, nil, tx) - require.NoError(b, err) + a.NoError(err) } }) @@ -75,7 +77,7 @@ func BenchmarkSendPayment(b *testing.B) { var nonce [8]byte crypto.RandBytes(nonce[:]) _, err := c.SendPaymentFromWallet(wallet, nil, addr, addr, 1, 1, nonce[:], "", 0, 0) - require.NoError(b, err) + a.NoError(err) } }) } diff --git a/test/e2e-go/features/auction/auctionCancel_test.go b/test/e2e-go/features/auction/auctionCancel_test.go index 2d652b38a6..b7b03d8aac 100644 --- a/test/e2e-go/features/auction/auctionCancel_test.go +++ b/test/e2e-go/features/auction/auctionCancel_test.go @@ -31,7 +31,7 @@ func TestStartAndCancelAuctionNoBids(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "ThreeNodesEvenDist.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -62,7 +62,7 @@ func TestStartAndCancelAuctionOneUserTenBids(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -122,7 +122,7 @@ func TestStartAndCancelAuctionOneUserTenBids(t *testing.T) { func TestStartAndCancelAuctionEarlyOneUserTenBids(t *testing.T) { t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") diff --git a/test/e2e-go/features/auction/auctionErrors_test.go b/test/e2e-go/features/auction/auctionErrors_test.go index 3251223f3f..1ef1ef6a29 100644 --- a/test/e2e-go/features/auction/auctionErrors_test.go +++ b/test/e2e-go/features/auction/auctionErrors_test.go @@ -35,7 +35,7 @@ func TestInvalidDeposit(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") @@ -123,7 +123,7 @@ func TestNoDepositAssociatedWithBid(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") @@ -192,7 +192,7 @@ func TestNoDepositAssociatedWithBid(t *testing.T) { func TestDeadbeatBid(t *testing.T) { // an error is expected when an account attempts to overbid t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") @@ -290,7 +290,7 @@ func TestStartAndPartitionAuctionTenUsersTenBidsEach(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -299,7 +299,7 @@ func TestStartAndPartitionAuctionTenUsersTenBidsEach(t *testing.T) { libGoalClient := fixture.GetLibGoalClient() minTxnFee, minAcctBalance, err := fixture.CurrentMinFeeAndBalance() - require.NoError(t, err) + r.NoError(err) // create wallets to bid with, and note their balances before the auction. wallets, _ := fixture.GetWalletsSortedByBalance() diff --git a/test/e2e-go/features/auction/basicAuction_test.go b/test/e2e-go/features/auction/basicAuction_test.go index d833ae7d93..4c055763ba 100644 --- a/test/e2e-go/features/auction/basicAuction_test.go +++ b/test/e2e-go/features/auction/basicAuction_test.go @@ -43,7 +43,7 @@ func TestStartAndEndAuctionNoBids(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "ThreeNodesEvenDist.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -84,7 +84,7 @@ func TestStartAndEndAuctionOneUserOneBid(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -153,7 +153,7 @@ func TestStartAndEndAuctionOneUserTenBids(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -222,7 +222,7 @@ func TestStartAndEndAuctionOneUserTenBids(t *testing.T) { func TestStartAndEndAuctionTenUsersOneBidEach(t *testing.T) { t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -317,7 +317,7 @@ func TestStartAndEndAuctionTenUsersTenBidsEach(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") auctionParamFile := filepath.Join("auctions", "AuctionParams_1.json") @@ -326,7 +326,7 @@ func TestStartAndEndAuctionTenUsersTenBidsEach(t *testing.T) { libGoalClient := fixture.GetLibGoalClient() minTxnFee, minAcctBalance, err := fixture.CurrentMinFeeAndBalance() - require.NoError(t, err) + r.NoError(err) // create wallets to bid with, and note their balances before the auction. wallets, _ := fixture.GetWalletsSortedByBalance() @@ -414,7 +414,7 @@ func TestDecayingPrice(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.AuctionFixture netTemplate := filepath.Join("nettemplates", "TwoNodes50Each.json") // "price goes from 10 to 1, decreasing by 1 each block for 10 blocks." diff --git a/test/e2e-go/features/catchup/basicCatchup_test.go b/test/e2e-go/features/catchup/basicCatchup_test.go index cfe350e298..32370a928c 100644 --- a/test/e2e-go/features/catchup/basicCatchup_test.go +++ b/test/e2e-go/features/catchup/basicCatchup_test.go @@ -35,7 +35,7 @@ func TestBasicCatchup(t *testing.T) { t.Skip() } t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) // Overview of this test: // Start a two-node network (primary has 0%, secondary has 100%) @@ -78,13 +78,14 @@ func TestBasicCatchup(t *testing.T) { func TestCatchupOverGossip(t *testing.T) { t.Parallel() + syncTest := fixtures.SynchronizedTest(t) supportedVersions := network.SupportedProtocolVersions - require.LessOrEqual(t, len(supportedVersions), 3) + require.LessOrEqual(syncTest, len(supportedVersions), 3) // ledger node upgraded version, fetcher node upgraded version // Run with the default values. Instead of "", pass the default value // to exercise loading it from the config file. - runCatchupOverGossip(t, supportedVersions[0], supportedVersions[0]) + runCatchupOverGossip(syncTest, supportedVersions[0], supportedVersions[0]) for i := 1; i < len(supportedVersions); i++ { runCatchupOverGossip(t, supportedVersions[i], "") runCatchupOverGossip(t, "", supportedVersions[i]) @@ -92,7 +93,7 @@ func TestCatchupOverGossip(t *testing.T) { } } -func runCatchupOverGossip(t *testing.T, +func runCatchupOverGossip(t fixtures.TestingTB, ledgerNodeDowngradeTo, fetcherNodeDowngradeTo string) { @@ -117,7 +118,7 @@ func runCatchupOverGossip(t *testing.T, a.NoError(err) cfg, err := config.LoadConfigFromDisk(dir) a.NoError(err) - require.Empty(t, cfg.NetworkProtocolVersion) + a.Empty(cfg.NetworkProtocolVersion) cfg.NetworkProtocolVersion = ledgerNodeDowngradeTo cfg.SaveToDisk(dir) } @@ -127,7 +128,7 @@ func runCatchupOverGossip(t *testing.T, dir := fixture.PrimaryDataDir() cfg, err := config.LoadConfigFromDisk(dir) a.NoError(err) - require.Empty(t, cfg.NetworkProtocolVersion) + a.Empty(cfg.NetworkProtocolVersion) cfg.NetworkProtocolVersion = fetcherNodeDowngradeTo cfg.SaveToDisk(dir) } @@ -177,7 +178,7 @@ func runCatchupOverGossip(t *testing.T, if time.Now().Sub(waitStart) > time.Minute { // it's taking too long. - require.FailNow(t, "Waiting too long for catchup to complete") + a.FailNow("Waiting too long for catchup to complete") } time.Sleep(50 * time.Millisecond) @@ -198,7 +199,7 @@ func TestStoppedCatchupOnUnsupported(t *testing.T) { t.Skip() } t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) consensus := make(config.ConsensusProtocols) // The following two protocols: testUnupgradedProtocol and testUnupgradedToProtocol diff --git a/test/e2e-go/features/catchup/catchpointCatchup_test.go b/test/e2e-go/features/catchup/catchpointCatchup_test.go index 9297f13a01..38a6ecd40b 100644 --- a/test/e2e-go/features/catchup/catchpointCatchup_test.go +++ b/test/e2e-go/features/catchup/catchpointCatchup_test.go @@ -41,7 +41,7 @@ type nodeExitErrorCollector struct { errors []error messages []string mu deadlock.Mutex - t *testing.T + t fixtures.TestingTB } func (ec *nodeExitErrorCollector) nodeExitWithError(nc *nodecontrol.NodeController, err error) { @@ -82,7 +82,7 @@ func TestBasicCatchpointCatchup(t *testing.T) { if testing.Short() { t.Skip() } - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) log := logging.TestingLog(t) // Overview of this test: @@ -115,7 +115,7 @@ func TestBasicCatchpointCatchup(t *testing.T) { var fixture fixtures.RestClientFixture fixture.SetConsensus(consensus) - errorsCollector := nodeExitErrorCollector{t: t} + errorsCollector := nodeExitErrorCollector{t: fixtures.SynchronizedTest(t)} defer errorsCollector.Print() // Give the second node (which starts up last) all the stake so that its proposal always has better credentials, diff --git a/test/e2e-go/features/compactcert/compactcert_test.go b/test/e2e-go/features/compactcert/compactcert_test.go index 24b821b798..d8148c5bbc 100644 --- a/test/e2e-go/features/compactcert/compactcert_test.go +++ b/test/e2e-go/features/compactcert/compactcert_test.go @@ -36,7 +36,7 @@ import ( func TestCompactCerts(t *testing.T) { t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) configurableConsensus := make(config.ConsensusProtocols) consensusVersion := protocol.ConsensusVersion("test-fast-compactcert") diff --git a/test/e2e-go/features/multisig/multisig_test.go b/test/e2e-go/features/multisig/multisig_test.go index 189af4f06f..c24b4bc5ff 100644 --- a/test/e2e-go/features/multisig/multisig_test.go +++ b/test/e2e-go/features/multisig/multisig_test.go @@ -37,7 +37,7 @@ func TestBasicMultisig(t *testing.T) { fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer fixture.Shutdown() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) // create three addrs client := fixture.LibGoalClient @@ -112,7 +112,7 @@ func TestZeroThreshold(t *testing.T) { fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer fixture.Shutdown() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) client := fixture.LibGoalClient walletHandle, err := client.GetUnencryptedWalletHandle() r.NoError(err, "Getting default wallet handle should not return error") @@ -139,7 +139,7 @@ func TestZeroSigners(t *testing.T) { fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer fixture.Shutdown() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) client := fixture.LibGoalClient walletHandle, err := client.GetUnencryptedWalletHandle() r.NoError(err, "Getting default wallet handle should not return error") @@ -162,7 +162,7 @@ func TestDuplicateKeys(t *testing.T) { fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer fixture.Shutdown() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) // create one addr client := fixture.LibGoalClient diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go index 99a5afb4de..b6e3296ceb 100644 --- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go +++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go @@ -29,7 +29,7 @@ import ( func TestParticipationKeyOnlyAccountParticipatesCorrectly(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesPartialPartkeyOnlyWallets.json")) @@ -105,7 +105,7 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { t.Skip() // temporary disable the test since it's failing. t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodesOneOnline.json")) diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go index 68a540ebe2..055feb407e 100644 --- a/test/e2e-go/features/participation/participationRewards_test.go +++ b/test/e2e-go/features/participation/participationRewards_test.go @@ -42,14 +42,15 @@ func getFirstAccountFromNamedNode(fixture *fixtures.RestClientFixture, r *requir func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round uint64) (uint64, error) { block, err := fixture.AlgodClient.Block(round) - require.NoError(t, err) + a := require.New(fixtures.SynchronizedTest(t)) + a.NoError(err) for { round++ err := fixture.WaitForRoundWithTimeout(round + 1) - require.NoError(t, err) + a.NoError(err) nextBlock, err := fixture.AlgodClient.Block(round) - require.NoError(t, err) + a.NoError(err) if nextBlock.RewardsLevel > block.RewardsLevel { // reward level increased, rewards were granted @@ -64,18 +65,19 @@ func waitUntilRewards(t *testing.T, fixture *fixtures.RestClientFixture, round u } func spendToNonParticipating(t *testing.T, fixture *fixtures.RestClientFixture, lastRound uint64, account string, balance uint64, minFee uint64) uint64 { + a := require.New(fixtures.SynchronizedTest(t)) // move a lot of Algos to a non participating account -- the incentive pool poolAddr := basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} // hardcoded; change if the pool address changes pd := poolAddr drainTx, err := fixture.LibGoalClient.SendPaymentFromUnencryptedWallet(account, pd.String(), minFee, balance-balance/100-minFee, nil) - require.NoError(t, err) + a.NoError(err) fixture.WaitForAllTxnsToConfirm(lastRound+uint64(10), map[string]string{drainTx.ID().String(): account}) return balance / 100 } func TestOnlineOfflineRewards(t *testing.T) { t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "FourNodes.json")) @@ -137,7 +139,7 @@ func TestPartkeyOnlyRewards(t *testing.T) { t.Skip() } t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "FourNodes.json")) @@ -180,7 +182,7 @@ func TestPartkeyOnlyRewards(t *testing.T) { func TestRewardUnitThreshold(t *testing.T) { t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "FourNodes.json")) @@ -299,7 +301,7 @@ var defaultPoolAddr = basics.Address{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0 func TestRewardRateRecalculation(t *testing.T) { t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) // consensusTestRapidRewardRecalculation is a version of ConsensusCurrentVersion // that decreases the RewardsRateRefreshInterval greatly. diff --git a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go index 961b5489a9..813ee3216f 100644 --- a/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go +++ b/test/e2e-go/features/partitionRecovery/partitionRecovery_test.go @@ -34,7 +34,7 @@ func TestBasicPartitionRecovery(t *testing.T) { t.Skip() } t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) // Overview of this test: // Start a two-node network (with 50% each) @@ -114,7 +114,7 @@ func TestPartitionRecoveryStaggerRestart(t *testing.T) { } func runTestWithStaggeredStopStart(t *testing.T, fixture *fixtures.RestClientFixture) { - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) // Get Node1 so we can wait until it has reached the target round nc1, err := fixture.GetNodeController("Node1") @@ -159,7 +159,7 @@ func TestBasicPartitionRecoveryPartOffline(t *testing.T) { t.Skip() } t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) // Overview of this test: // Start a three-node network capable of making progress. @@ -210,7 +210,7 @@ func TestPartitionHalfOffline(t *testing.T) { t.Skip() } t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) // Overview of this test: // Start a TenNodeDistributed network diff --git a/test/e2e-go/features/teal/compile_test.go b/test/e2e-go/features/teal/compile_test.go index 8622ce55c6..16822febd8 100644 --- a/test/e2e-go/features/teal/compile_test.go +++ b/test/e2e-go/features/teal/compile_test.go @@ -31,7 +31,7 @@ func TestTealCompile(t *testing.T) { if testing.Short() { t.Skip() } - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.SetupNoStart(t, filepath.Join("nettemplates", "OneNodeFuture.json")) diff --git a/test/e2e-go/features/transactions/accountv2_test.go b/test/e2e-go/features/transactions/accountv2_test.go index 53cfd19a46..d403ffe20c 100644 --- a/test/e2e-go/features/transactions/accountv2_test.go +++ b/test/e2e-go/features/transactions/accountv2_test.go @@ -34,7 +34,7 @@ import ( ) func checkEvalDelta(t *testing.T, client *libgoal.Client, startRnd, endRnd uint64, gval uint64, lval uint64) { - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) foundGlobal := false foundLocal := false @@ -76,7 +76,7 @@ func checkEvalDelta(t *testing.T, client *libgoal.Client, startRnd, endRnd uint6 func TestAccountInformationV2(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture proto, ok := config.Consensus[protocol.ConsensusFuture] diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go index e0fd11ec5e..4068bf03fa 100644 --- a/test/e2e-go/features/transactions/asset_test.go +++ b/test/e2e-go/features/transactions/asset_test.go @@ -54,7 +54,7 @@ func helperFillSignBroadcast(client libgoal.Client, wh []byte, sender string, tx func TestAssetValidRounds(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) @@ -188,7 +188,7 @@ func TestAssetConfig(t *testing.T) { t.Skip() } t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) @@ -420,7 +420,7 @@ func TestAssetConfig(t *testing.T) { func TestAssetInformation(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) @@ -512,7 +512,7 @@ func TestAssetInformation(t *testing.T) { func TestAssetGroupCreateSendDestroy(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) @@ -653,7 +653,7 @@ func TestAssetGroupCreateSendDestroy(t *testing.T) { func TestAssetSend(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) @@ -1049,7 +1049,7 @@ func setupTestAndNetwork(t *testing.T, networkTemplate string, consensus config. Assertions *require.Assertions, Fixture *fixtures.RestClientFixture, Client *libgoal.Client, Account0 string) { t.Parallel() - asser := require.New(t) + asser := require.New(fixtures.SynchronizedTest(t)) if 0 == len(networkTemplate) { // If the networkTemplate is not specified, used the default one networkTemplate = "TwoNodes50Each.json" diff --git a/test/e2e-go/features/transactions/close_account_test.go b/test/e2e-go/features/transactions/close_account_test.go index 77e134717c..66f1a0138b 100644 --- a/test/e2e-go/features/transactions/close_account_test.go +++ b/test/e2e-go/features/transactions/close_account_test.go @@ -27,7 +27,7 @@ import ( func TestAccountsCanClose(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV15.json")) diff --git a/test/e2e-go/features/transactions/group_test.go b/test/e2e-go/features/transactions/group_test.go index 74b71bccfc..b13929f2f3 100644 --- a/test/e2e-go/features/transactions/group_test.go +++ b/test/e2e-go/features/transactions/group_test.go @@ -31,7 +31,7 @@ import ( func TestGroupTransactions(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) @@ -101,7 +101,7 @@ func TestGroupTransactions(t *testing.T) { func TestGroupTransactionsDifferentSizes(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachFuture.json")) @@ -207,7 +207,7 @@ func TestGroupTransactionsDifferentSizes(t *testing.T) { func TestGroupTransactionsSubmission(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) diff --git a/test/e2e-go/features/transactions/lease_test.go b/test/e2e-go/features/transactions/lease_test.go index 5c2102cd6c..571776f372 100644 --- a/test/e2e-go/features/transactions/lease_test.go +++ b/test/e2e-go/features/transactions/lease_test.go @@ -28,7 +28,7 @@ import ( func TestLeaseTransactionsSameSender(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) @@ -87,7 +87,7 @@ func TestLeaseTransactionsSameSender(t *testing.T) { func TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachV22.json")) @@ -159,7 +159,7 @@ func TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7(t *testing.T) { func TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) @@ -218,7 +218,7 @@ func TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7(t *testing.T) { func TestLeaseTransactionsSameSenderDifferentLease(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) @@ -279,7 +279,7 @@ func TestLeaseTransactionsSameSenderDifferentLease(t *testing.T) { func TestLeaseTransactionsDifferentSender(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) @@ -353,7 +353,7 @@ func TestLeaseTransactionsDifferentSender(t *testing.T) { func TestOverlappingLeases(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go index 8e4b5de6cd..c56c75f185 100644 --- a/test/e2e-go/features/transactions/onlineStatusChange_test.go +++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go @@ -40,7 +40,7 @@ func TestAccountsCanChangeOnlineStateInTheFuture(t *testing.T) { func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, templatePath) diff --git a/test/e2e-go/features/transactions/proof_test.go b/test/e2e-go/features/transactions/proof_test.go index 3409191df8..c604c2b2bf 100644 --- a/test/e2e-go/features/transactions/proof_test.go +++ b/test/e2e-go/features/transactions/proof_test.go @@ -30,7 +30,7 @@ import ( func TestTxnMerkleProof(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "OneNodeFuture.json")) diff --git a/test/e2e-go/features/transactions/sendReceive_test.go b/test/e2e-go/features/transactions/sendReceive_test.go index a19ace6768..b91fe8b84b 100644 --- a/test/e2e-go/features/transactions/sendReceive_test.go +++ b/test/e2e-go/features/transactions/sendReceive_test.go @@ -49,7 +49,7 @@ func TestAccountsCanSendMoney(t *testing.T) { func testAccountsCanSendMoney(t *testing.T, templatePath string, numberOfSends int) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, templatePath) diff --git a/test/e2e-go/features/transactions/transactionPool_test.go b/test/e2e-go/features/transactions/transactionPool_test.go index d07d8a1c57..a6d235b182 100644 --- a/test/e2e-go/features/transactions/transactionPool_test.go +++ b/test/e2e-go/features/transactions/transactionPool_test.go @@ -29,7 +29,7 @@ import ( func TestTransactionPoolOrderingAndClearing(t *testing.T) { t.Skip("test is flaky as of 2019-06-18") t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50EachOneOnline.json")) @@ -46,7 +46,7 @@ func TestTransactionPoolOrderingAndClearing(t *testing.T) { stoppedRound := curStatus.LastRound minTxnFee, minAcctBalance, err := fixture.MinFeeAndBalance(curStatus.LastRound) - require.NoError(t, err) + r.NoError(err) // put transactions in the pool - they cannot be removed from the pool while the node is stopped numTransactions := 25 @@ -115,7 +115,7 @@ func TestTransactionPoolExponentialFees(t *testing.T) { t.Skip("new FIFO pool does not have exponential fee txn replacement") t.Parallel() - r := require.New(t) + r := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) diff --git a/test/e2e-go/kmd/e2e_kmd_server_client_test.go b/test/e2e-go/kmd/e2e_kmd_server_client_test.go index b08219f8a2..cb7c13c254 100644 --- a/test/e2e-go/kmd/e2e_kmd_server_client_test.go +++ b/test/e2e-go/kmd/e2e_kmd_server_client_test.go @@ -28,6 +28,7 @@ import ( ) func TestServerStartsStopsSuccessfully(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Setup(t) @@ -37,11 +38,12 @@ func TestServerStartsStopsSuccessfully(t *testing.T) { req := kmdapi.VersionsRequest{} resp := kmdapi.VersionsResponse{} err := f.Client.DoV1Request(req, &resp) - require.NoError(t, err) + a.NoError(err) } func TestBadAuthFails(t *testing.T) { t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) var f fixtures.KMDFixture f.Setup(t) defer f.Shutdown() @@ -49,16 +51,17 @@ func TestBadAuthFails(t *testing.T) { // Make a client with a bad token badAPIToken := strings.Repeat("x", 64) client, err := client.MakeKMDClient(f.Sock, badAPIToken) - require.NoError(t, err) + a.NoError(err) // Test that `GET /v1/wallets` fails with the bad token req := kmdapi.APIV1GETWalletsRequest{} resp := kmdapi.APIV1GETWalletsResponse{} err = client.DoV1Request(req, &resp) - require.Error(t, err) + a.Error(err) } func TestGoodAuthSucceeds(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Setup(t) @@ -69,5 +72,5 @@ func TestGoodAuthSucceeds(t *testing.T) { req := kmdapi.APIV1GETWalletsRequest{} resp := kmdapi.APIV1GETWalletsResponse{} err := f.Client.DoV1Request(req, &resp) - require.NoError(t, err) + a.NoError(err) } diff --git a/test/e2e-go/kmd/e2e_kmd_sqlite_test.go b/test/e2e-go/kmd/e2e_kmd_sqlite_test.go index 45cee971eb..63dda845df 100644 --- a/test/e2e-go/kmd/e2e_kmd_sqlite_test.go +++ b/test/e2e-go/kmd/e2e_kmd_sqlite_test.go @@ -26,6 +26,7 @@ import ( ) func TestNonAbsSQLiteWalletConfigFails(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Initialize(t) @@ -35,12 +36,13 @@ func TestNonAbsSQLiteWalletConfigFails(t *testing.T) { cfg := `{"drivers":{"sqlite":{"wallets_dir":"not/absolute"}}}` err := f.TestConfig([]byte(cfg)) // Should return an error - require.NotNil(t, err) + a.NotNil(err) // Should return the correct error - require.Equal(t, err, config.ErrSQLiteWalletNotAbsolute) + a.Equal(err, config.ErrSQLiteWalletNotAbsolute) } func TestAbsSQLiteWalletConfigSucceeds(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Initialize(t) @@ -50,5 +52,5 @@ func TestAbsSQLiteWalletConfigSucceeds(t *testing.T) { cfg := `{"drivers":{"sqlite":{"wallets_dir":"/very/absolute"}}}` err := f.TestConfig([]byte(cfg)) // Error should be nil - require.Nil(t, err) + a.Nil(err) } diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go index f10388a446..b119f716f6 100644 --- a/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go +++ b/test/e2e-go/kmd/e2e_kmd_wallet_keyops_test.go @@ -32,6 +32,7 @@ import ( ) func TestGenerateAndListKeys(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -43,10 +44,10 @@ func TestGenerateAndListKeys(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Key should not be empty - require.NotEmpty(t, resp0.Address) + a.NotEmpty(resp0.Address) // List public keys req1 := kmdapi.APIV1POSTKeyListRequest{ @@ -54,13 +55,13 @@ func TestGenerateAndListKeys(t *testing.T) { } resp1 := kmdapi.APIV1POSTKeyListResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // List should have exactly one entry - require.Equal(t, len(resp1.Addresses), 1) + a.Equal(len(resp1.Addresses), 1) // Only entry should equal generated public key - require.Equal(t, resp1.Addresses[0], resp0.Address) + a.Equal(resp1.Addresses[0], resp0.Address) // Generate another key req2 := kmdapi.APIV1POSTKeyRequest{ @@ -68,7 +69,7 @@ func TestGenerateAndListKeys(t *testing.T) { } resp2 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // List public keys req3 := kmdapi.APIV1POSTKeyListRequest{ @@ -76,13 +77,14 @@ func TestGenerateAndListKeys(t *testing.T) { } resp3 := kmdapi.APIV1POSTKeyListResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // List should have exactly two entries - require.Equal(t, len(resp3.Addresses), 2) + a.Equal(len(resp3.Addresses), 2) } func TestImportKey(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -100,10 +102,10 @@ func TestImportKey(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyImportResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Public key should be that of the key we imported - require.Equal(t, resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress()) + a.Equal(resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress()) // Try to import the same key req1 := kmdapi.APIV1POSTKeyImportRequest{ @@ -114,7 +116,7 @@ func TestImportKey(t *testing.T) { err = f.Client.DoV1Request(req1, &resp1) // Should fail (duplicate key) - require.Error(t, err) + a.Error(err) // List public keys req2 := kmdapi.APIV1POSTKeyListRequest{ @@ -122,16 +124,17 @@ func TestImportKey(t *testing.T) { } resp2 := kmdapi.APIV1POSTKeyListResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // List should have exactly one entry - require.Equal(t, len(resp2.Addresses), 1) + a.Equal(len(resp2.Addresses), 1) // Only entry should equal generated public key - require.Equal(t, resp2.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress()) + a.Equal(resp2.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress()) } func TestExportKey(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -149,10 +152,10 @@ func TestExportKey(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyImportResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Public key should be that of the key we imported - require.Equal(t, resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress()) + a.Equal(resp0.Address, basics.Address(secrets.SignatureVerifier).GetUserAddress()) // List public keys req1 := kmdapi.APIV1POSTKeyListRequest{ @@ -160,13 +163,13 @@ func TestExportKey(t *testing.T) { } resp1 := kmdapi.APIV1POSTKeyListResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // List should have exactly one entry - require.Equal(t, len(resp1.Addresses), 1) + a.Equal(len(resp1.Addresses), 1) // Only entry should equal generated public key - require.Equal(t, resp1.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress()) + a.Equal(resp1.Addresses[0], basics.Address(secrets.SignatureVerifier).GetUserAddress()) // Export the key req2 := kmdapi.APIV1POSTKeyExportRequest{ @@ -176,10 +179,10 @@ func TestExportKey(t *testing.T) { } resp2 := kmdapi.APIV1POSTKeyExportResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // Response should be same secret key - require.Equal(t, resp2.PrivateKey, crypto.PrivateKey(secrets.SK)) + a.Equal(resp2.PrivateKey, crypto.PrivateKey(secrets.SK)) // Export with wrong password should fail req3 := kmdapi.APIV1POSTKeyExportRequest{ @@ -189,10 +192,11 @@ func TestExportKey(t *testing.T) { } resp3 := kmdapi.APIV1POSTKeyExportResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.Error(t, err) + a.Error(err) } func TestDeleteKey(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -204,10 +208,10 @@ func TestDeleteKey(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Token should not be empty - require.NotEqual(t, resp0.Address, crypto.Digest{}) + a.NotEqual(resp0.Address, crypto.Digest{}) // List public keys req1 := kmdapi.APIV1POSTKeyListRequest{ @@ -215,13 +219,13 @@ func TestDeleteKey(t *testing.T) { } resp1 := kmdapi.APIV1POSTKeyListResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // List should have exactly one entry - require.Equal(t, len(resp1.Addresses), 1) + a.Equal(len(resp1.Addresses), 1) // Only entry should equal generated public key - require.Equal(t, resp1.Addresses[0], resp0.Address) + a.Equal(resp1.Addresses[0], resp0.Address) // Delete with wrong password should fail req2 := kmdapi.APIV1DELETEKeyRequest{ @@ -231,7 +235,7 @@ func TestDeleteKey(t *testing.T) { } resp2 := kmdapi.APIV1DELETEKeyResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.Error(t, err) + a.Error(err) // Try to delete the key req3 := kmdapi.APIV1DELETEKeyRequest{ @@ -241,7 +245,7 @@ func TestDeleteKey(t *testing.T) { } resp3 := kmdapi.APIV1DELETEKeyResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // List public keys req4 := kmdapi.APIV1POSTKeyListRequest{ @@ -249,13 +253,14 @@ func TestDeleteKey(t *testing.T) { } resp4 := kmdapi.APIV1POSTKeyListResponse{} err = f.Client.DoV1Request(req4, &resp4) - require.NoError(t, err) + a.NoError(err) // List should have exactly zero entries - require.Equal(t, len(resp4.Addresses), 0) + a.Equal(len(resp4.Addresses), 0) } func TestSignTransaction(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -274,7 +279,7 @@ func TestSignTransaction(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyImportResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Make a transaction tx := transactions.Transaction{ @@ -300,19 +305,20 @@ func TestSignTransaction(t *testing.T) { } resp1 := kmdapi.APIV1POSTTransactionSignResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // SignedTxn signature should not be empty var stx transactions.SignedTxn err = protocol.Decode(resp1.SignedTransaction, &stx) - require.NoError(t, err) - require.NotEqual(t, stx.Sig, crypto.Signature{}) + a.NoError(err) + a.NotEqual(stx.Sig, crypto.Signature{}) // TODO The SignedTxn should actually verify - // require.NoError(t, stx.Verify()) + // a.NoError(stx.Verify()) } func TestSignProgram(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -331,7 +337,7 @@ func TestSignProgram(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyImportResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) program := []byte("blah blah blah, not a real program, just some bytes to sign, kmd does not have a program interpreter to know if the program is legitimate, but it _does_ prefix the program with protocol.Program and we can verify that here below") @@ -346,19 +352,20 @@ func TestSignProgram(t *testing.T) { } resp1 := kmdapi.APIV1POSTProgramSignResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // SignedTxn signature should not be empty - require.NotEmpty(t, len(resp1.Signature), 0) + a.NotEmpty(len(resp1.Signature), 0) var sig crypto.Signature copy(sig[:], resp1.Signature) - require.NotEqual(t, sig, crypto.Signature{}) + a.NotEqual(sig, crypto.Signature{}) ph := logic.Program(program) - require.True(t, secrets.SignatureVerifier.Verify(ph, sig)) + a.True(secrets.SignatureVerifier.Verify(ph, sig)) } func BenchmarkSignTransaction(b *testing.B) { + a := require.New(fixtures.SynchronizedTest(b)) var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(b) defer f.Shutdown() @@ -376,7 +383,7 @@ func BenchmarkSignTransaction(b *testing.B) { } resp0 := kmdapi.APIV1POSTKeyImportResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(b, err) + a.NoError(err) // Make a transaction tx := transactions.Transaction{ @@ -404,12 +411,13 @@ func BenchmarkSignTransaction(b *testing.B) { } resp1 := kmdapi.APIV1POSTTransactionSignResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(b, err) + a.NoError(err) } }) } func TestMasterKeyImportExport(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -421,11 +429,11 @@ func TestMasterKeyImportExport(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Key should not be empty key0 := resp0.Address - require.NotEqual(t, key0, crypto.Digest{}) + a.NotEqual(key0, crypto.Digest{}) // Generate another key req1 := kmdapi.APIV1POSTKeyRequest{ @@ -433,11 +441,11 @@ func TestMasterKeyImportExport(t *testing.T) { } resp1 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Key should not be empty key1 := resp1.Address - require.NotEqual(t, key1, crypto.Digest{}) + a.NotEqual(key1, crypto.Digest{}) // Export master key with incorrect password should fail req2 := kmdapi.APIV1POSTMasterKeyExportRequest{ @@ -446,7 +454,7 @@ func TestMasterKeyImportExport(t *testing.T) { } resp2 := kmdapi.APIV1POSTMasterKeyExportResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.Error(t, err) + a.Error(err) // Export master key with correct password should succeed req3 := kmdapi.APIV1POSTMasterKeyExportRequest{ @@ -455,11 +463,11 @@ func TestMasterKeyImportExport(t *testing.T) { } resp3 := kmdapi.APIV1POSTMasterKeyExportResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // MDK should not be blank mdk0 := resp3.MasterDerivationKey - require.NotEqual(t, mdk0, crypto.MasterDerivationKey{}) + a.NotEqual(mdk0, crypto.MasterDerivationKey{}) // Create another wallet, don't import the MDK pw := "unrelated-password" @@ -470,11 +478,11 @@ func TestMasterKeyImportExport(t *testing.T) { } resp4 := kmdapi.APIV1POSTWalletResponse{} err = f.Client.DoV1Request(req4, &resp4) - require.NoError(t, err) + a.NoError(err) // Get the new wallet ID unrelatedWalletID := resp4.Wallet.ID - require.NotEmpty(t, unrelatedWalletID) + a.NotEmpty(unrelatedWalletID) // Get a wallet token req5 := kmdapi.APIV1POSTWalletInitRequest{ @@ -483,7 +491,7 @@ func TestMasterKeyImportExport(t *testing.T) { } resp5 := kmdapi.APIV1POSTWalletInitResponse{} err = f.Client.DoV1Request(req5, &resp5) - require.NoError(t, err) + a.NoError(err) // Generate a key for the unrelated wallet req6 := kmdapi.APIV1POSTKeyRequest{ @@ -491,15 +499,15 @@ func TestMasterKeyImportExport(t *testing.T) { } resp6 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req6, &resp6) - require.NoError(t, err) + a.NoError(err) // Key should not be empty key2 := resp6.Address - require.NotEqual(t, key2, crypto.Digest{}) + a.NotEqual(key2, crypto.Digest{}) // Key should not be equal to either of the keys from the first wallet - require.NotEqual(t, key2, key0) - require.NotEqual(t, key2, key1) + a.NotEqual(key2, key0) + a.NotEqual(key2, key1) // Create another wallet, import the MDK pw = "related-password" @@ -511,11 +519,11 @@ func TestMasterKeyImportExport(t *testing.T) { } resp7 := kmdapi.APIV1POSTWalletResponse{} err = f.Client.DoV1Request(req7, &resp7) - require.NoError(t, err) + a.NoError(err) // Get the new wallet ID relatedWalletID := resp7.Wallet.ID - require.NotEmpty(t, relatedWalletID) + a.NotEmpty(relatedWalletID) // Get a wallet token req8 := kmdapi.APIV1POSTWalletInitRequest{ @@ -524,7 +532,7 @@ func TestMasterKeyImportExport(t *testing.T) { } resp8 := kmdapi.APIV1POSTWalletInitResponse{} err = f.Client.DoV1Request(req8, &resp8) - require.NoError(t, err) + a.NoError(err) relatedWalletHandleToken := resp8.WalletHandleToken @@ -534,11 +542,11 @@ func TestMasterKeyImportExport(t *testing.T) { } resp9 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req9, &resp9) - require.NoError(t, err) + a.NoError(err) // Key should not be empty key3 := resp9.Address - require.NotEqual(t, key3, crypto.Digest{}) + a.NotEqual(key3, crypto.Digest{}) // Generate another key req10 := kmdapi.APIV1POSTKeyRequest{ @@ -546,17 +554,17 @@ func TestMasterKeyImportExport(t *testing.T) { } resp10 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req10, &resp10) - require.NoError(t, err) + a.NoError(err) // Key should not be empty key4 := resp1.Address - require.NotEqual(t, key4, crypto.Digest{}) + a.NotEqual(key4, crypto.Digest{}) // key3 should be the same as key0 - require.Equal(t, key3, key0) + a.Equal(key3, key0) // key4 should be the same as key1 - require.Equal(t, key4, key1) + a.Equal(key4, key1) // Export master key for related wallet req11 := kmdapi.APIV1POSTMasterKeyExportRequest{ @@ -565,17 +573,18 @@ func TestMasterKeyImportExport(t *testing.T) { } resp11 := kmdapi.APIV1POSTMasterKeyExportResponse{} err = f.Client.DoV1Request(req11, &resp11) - require.NoError(t, err) + a.NoError(err) // MDK should not be blank mdk1 := resp11.MasterDerivationKey - require.NotEqual(t, mdk1, crypto.MasterDerivationKey{}) + a.NotEqual(mdk1, crypto.MasterDerivationKey{}) // MDK should be the same as the first mdk - require.Equal(t, mdk0, mdk1) + a.Equal(mdk0, mdk1) } func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -589,11 +598,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp := kmdapi.APIV1POSTKeyResponse{} err := f.Client.DoV1Request(req, &resp) - require.NoError(t, err) + a.NoError(err) // Key should not be empty addr := resp.Address - require.NotEmpty(t, addr) + a.NotEmpty(addr) addrs = append(addrs, addr) } @@ -604,11 +613,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp0 := kmdapi.APIV1POSTMasterKeyExportResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // MDK should not be blank mdk := resp0.MasterDerivationKey - require.NotEqual(t, mdk, crypto.MasterDerivationKey{}) + a.NotEqual(mdk, crypto.MasterDerivationKey{}) // Create another wallet, import the MDK pw := "related-password" @@ -620,11 +629,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp1 := kmdapi.APIV1POSTWalletResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Get the new wallet ID relatedWalletID := resp1.Wallet.ID - require.NotEmpty(t, relatedWalletID) + a.NotEmpty(relatedWalletID) // Get a wallet token req2 := kmdapi.APIV1POSTWalletInitRequest{ @@ -633,7 +642,7 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp2 := kmdapi.APIV1POSTWalletInitResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) relatedWalletHandleToken := resp2.WalletHandleToken @@ -643,14 +652,14 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp3 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // Key should not be empty addr0 := resp3.Address - require.NotEmpty(t, addr0) + a.NotEmpty(addr0) // key0 should be the same as keys[0] - require.Equal(t, addr0, addrs[0]) + a.Equal(addr0, addrs[0]) // Export keys[1]'s secret key from the first wallet req4 := kmdapi.APIV1POSTKeyExportRequest{ @@ -660,11 +669,11 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp4 := kmdapi.APIV1POSTKeyExportResponse{} err = f.Client.DoV1Request(req4, &resp4) - require.NoError(t, err) + a.NoError(err) // Exported secret should not be blank key1Secret := resp4.PrivateKey - require.NotEqual(t, key1Secret, crypto.PrivateKey{}) + a.NotEqual(key1Secret, crypto.PrivateKey{}) // Import keys[1] into the second wallet req5 := kmdapi.APIV1POSTKeyImportRequest{ @@ -673,10 +682,10 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp5 := kmdapi.APIV1POSTKeyImportResponse{} err = f.Client.DoV1Request(req5, &resp5) - require.NoError(t, err) + a.NoError(err) // Address should be addrs[1] - require.Equal(t, resp5.Address, addrs[1]) + a.Equal(resp5.Address, addrs[1]) // Generate another key in the second wallet req6 := kmdapi.APIV1POSTKeyRequest{ @@ -684,12 +693,12 @@ func TestMasterKeyGeneratePastImportedKeys(t *testing.T) { } resp6 := kmdapi.APIV1POSTKeyResponse{} err = f.Client.DoV1Request(req6, &resp6) - require.NoError(t, err) + a.NoError(err) // Address should not be empty addr1 := resp6.Address - require.NotEmpty(t, addr1) + a.NotEmpty(addr1) // Address should be equal to addrs[2] - require.Equal(t, addr1, addrs[2]) + a.Equal(addr1, addrs[2]) } diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go index e8fa47930e..3a5576a6ee 100644 --- a/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go +++ b/test/e2e-go/kmd/e2e_kmd_wallet_multisig_test.go @@ -32,12 +32,14 @@ import ( ) func addrToPK(t *testing.T, addr string) crypto.PublicKey { + req := require.New(fixtures.SynchronizedTest(t)) a, err := basics.UnmarshalChecksumAddress(addr) - require.NoError(t, err) + req.NoError(err) return crypto.PublicKey(a) } func TestMultisigImportList(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -49,12 +51,12 @@ func TestMultisigImportList(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) addr0 := resp0.Address pk0 := addrToPK(t, addr0) err = f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) addr1 := resp0.Address pk1 := addrToPK(t, addr1) @@ -67,7 +69,7 @@ func TestMultisigImportList(t *testing.T) { } resp1 := kmdapi.APIV1POSTMultisigImportResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) addr := resp1.Address // List multisig addresses and make sure it's there @@ -76,14 +78,15 @@ func TestMultisigImportList(t *testing.T) { } resp2 := kmdapi.APIV1POSTMultisigListResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // Make sure the imported multisig address is there - require.Equal(t, len(resp2.Addresses), 1) - require.Equal(t, resp2.Addresses[0], addr) + a.Equal(len(resp2.Addresses), 1) + a.Equal(resp2.Addresses[0], addr) } func TestMultisigExportDelete(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -95,12 +98,12 @@ func TestMultisigExportDelete(t *testing.T) { } resp0 := kmdapi.APIV1POSTKeyResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) addr0 := resp0.Address pk0 := addrToPK(t, addr0) err = f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) addr1 := resp0.Address pk1 := addrToPK(t, addr1) @@ -113,7 +116,7 @@ func TestMultisigExportDelete(t *testing.T) { } resp1 := kmdapi.APIV1POSTMultisigImportResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) addr := resp1.Address // Export the multisig preimage @@ -123,12 +126,12 @@ func TestMultisigExportDelete(t *testing.T) { } resp2 := kmdapi.APIV1POSTMultisigExportResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // Make sure the exported preimage is correct - require.Equal(t, req1.Version, resp2.Version) - require.Equal(t, req1.Threshold, resp2.Threshold) - require.Equal(t, req1.PKs, resp2.PKs) + a.Equal(req1.Version, resp2.Version) + a.Equal(req1.Threshold, resp2.Threshold) + a.Equal(req1.PKs, resp2.PKs) // Delete the multisig preimage req3 := kmdapi.APIV1DELETEMultisigRequest{ @@ -138,7 +141,7 @@ func TestMultisigExportDelete(t *testing.T) { } resp3 := kmdapi.APIV1DELETEMultisigResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // List multisig addresses and make sure it's empty req4 := kmdapi.APIV1POSTMultisigListRequest{ @@ -146,30 +149,31 @@ func TestMultisigExportDelete(t *testing.T) { } resp4 := kmdapi.APIV1POSTMultisigListResponse{} err = f.Client.DoV1Request(req4, &resp4) - require.NoError(t, err) + a.NoError(err) // Make sure the imported multisig address is gone - require.Equal(t, len(resp4.Addresses), 0) + a.Equal(len(resp4.Addresses), 0) } func TestMultisigSign(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) defer f.Shutdown() resp, err := f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk1 := addrToPK(t, resp.Address) resp, err = f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk2 := addrToPK(t, resp.Address) pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported // Create a 2-of-3 multisig account from the three public keys resp1, err := f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3}) - require.NoError(t, err) + a.NoError(err) msigAddr := addrToPK(t, resp1.Address) // Make a transaction spending from the multisig address @@ -197,11 +201,11 @@ func TestMultisigSign(t *testing.T) { } resp2 := kmdapi.APIV1POSTMultisigTransactionSignResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) var msig crypto.MultisigSig err = protocol.Decode(resp2.Multisig, &msig) - require.NoError(t, err) + a.NoError(err) // Try to add another signature req3 := kmdapi.APIV1POSTMultisigTransactionSignRequest{ @@ -213,39 +217,40 @@ func TestMultisigSign(t *testing.T) { } resp3 := kmdapi.APIV1POSTMultisigTransactionSignResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // Assemble them into a signed transaction and see if it verifies _, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, msig) - require.NoError(t, err) + a.NoError(err) // TODO See if the signature verifies // err = stxn.Verify() - // require.NoError(t, err) + // a.NoError(err) } func TestMultisigSignWithSigner(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) defer f.Shutdown() resp, err := f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk1 := addrToPK(t, resp.Address) resp, err = f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk2 := addrToPK(t, resp.Address) pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported sender, err := f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pkSender := addrToPK(t, sender.Address) // Create a 2-of-3 multisig account from the three public keys resp1, err := f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3}) - require.NoError(t, err) + a.NoError(err) msigAddr := addrToPK(t, resp1.Address) // Make a transaction spending from the multisig address @@ -278,11 +283,11 @@ func TestMultisigSignWithSigner(t *testing.T) { } resp2 := kmdapi.APIV1POSTMultisigTransactionSignResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) var msig crypto.MultisigSig err = protocol.Decode(resp2.Multisig, &msig) - require.NoError(t, err) + a.NoError(err) // Try to add another signature req3 := kmdapi.APIV1POSTMultisigTransactionSignRequest{ @@ -295,35 +300,36 @@ func TestMultisigSignWithSigner(t *testing.T) { } resp3 := kmdapi.APIV1POSTMultisigTransactionSignResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) // Assemble them into a signed transaction and see if it verifies _, err = transactions.AssembleSignedTxn(tx, crypto.Signature{}, msig) - require.NoError(t, err) + a.NoError(err) } func TestMultisigSignWithWrongSigner(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) defer f.Shutdown() resp, err := f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk1 := addrToPK(t, resp.Address) resp, err = f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk2 := addrToPK(t, resp.Address) pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported sender, err := f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pkSender := addrToPK(t, sender.Address) // Create a 2-of-3 multisig account from the three public keys _, err = f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3}) - require.NoError(t, err) + a.NoError(err) // Make a transaction spending from the multisig address tx := transactions.Transaction{ @@ -355,28 +361,29 @@ func TestMultisigSignWithWrongSigner(t *testing.T) { resp2 := kmdapi.APIV1POSTMultisigTransactionSignResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.Error(t, err) + a.Error(err) } func TestMultisigSignProgram(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) defer f.Shutdown() resp, err := f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk1 := addrToPK(t, resp.Address) resp, err = f.Client.GenerateKey([]byte(walletHandleToken)) - require.NoError(t, err) + a.NoError(err) pk2 := addrToPK(t, resp.Address) pk3 := crypto.PublicKey{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1} // some public key we haven't imported // Create a 2-of-3 multisig account from the three public keys resp1, err := f.Client.ImportMultisigAddr([]byte(walletHandleToken), 1, 2, []crypto.PublicKey{pk1, pk2, pk3}) - require.NoError(t, err) + a.NoError(err) msigAddr := addrToPK(t, resp1.Address) program := []byte("blah blah blah, not a real program, just some bytes to sign, kmd does not have a program interpreter to know if the program is legitimate, but it _does_ prefix the program with protocol.Program and we can verify that here below") @@ -392,11 +399,11 @@ func TestMultisigSignProgram(t *testing.T) { } resp2 := kmdapi.APIV1POSTMultisigProgramSignResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) var msig crypto.MultisigSig err = protocol.Decode(resp2.Multisig, &msig) - require.NoError(t, err) + a.NoError(err) // Try to add another signature req3 := kmdapi.APIV1POSTMultisigProgramSignRequest{ @@ -409,12 +416,12 @@ func TestMultisigSignProgram(t *testing.T) { } resp3 := kmdapi.APIV1POSTMultisigProgramSignResponse{} err = f.Client.DoV1Request(req3, &resp3) - require.NoError(t, err) + a.NoError(err) err = protocol.Decode(resp3.Multisig, &msig) - require.NoError(t, err) + a.NoError(err) ok, err := crypto.MultisigVerify(logic.Program(program), crypto.Digest(msigAddr), msig) - require.NoError(t, err) - require.True(t, ok) + a.NoError(err) + a.True(ok) } diff --git a/test/e2e-go/kmd/e2e_kmd_wallet_test.go b/test/e2e-go/kmd/e2e_kmd_wallet_test.go index d263dba907..e0c7ccd9bf 100644 --- a/test/e2e-go/kmd/e2e_kmd_wallet_test.go +++ b/test/e2e-go/kmd/e2e_kmd_wallet_test.go @@ -27,6 +27,7 @@ import ( ) func TestWalletCreation(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Setup(t) @@ -36,10 +37,10 @@ func TestWalletCreation(t *testing.T) { req0 := kmdapi.APIV1GETWalletsRequest{} resp0 := kmdapi.APIV1GETWalletsResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Shouldn't be any wallets yet - require.Equal(t, len(resp0.Wallets), 0) + a.Equal(len(resp0.Wallets), 0) // Create a wallet walletName := "default" @@ -51,16 +52,16 @@ func TestWalletCreation(t *testing.T) { } resp1 := kmdapi.APIV1POSTWalletResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Test that `GET /v1/wallets` returns the new wallet req2 := kmdapi.APIV1GETWalletsRequest{} resp2 := kmdapi.APIV1GETWalletsResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // Should be one wallet - require.Equal(t, len(resp2.Wallets), 1) + a.Equal(len(resp2.Wallets), 1) // Try to create a wallet with the same name req3 := kmdapi.APIV1POSTWalletRequest{ @@ -72,10 +73,11 @@ func TestWalletCreation(t *testing.T) { err = f.Client.DoV1Request(req3, &resp3) // Should be an error - require.Error(t, err) + a.Error(err) } func TestBlankWalletCreation(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Setup(t) @@ -89,25 +91,26 @@ func TestBlankWalletCreation(t *testing.T) { } resp0 := kmdapi.APIV1POSTWalletResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Test that `GET /v1/wallets` returns the new wallet req1 := kmdapi.APIV1GETWalletsRequest{} resp1 := kmdapi.APIV1GETWalletsResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Should be one wallet - require.Equal(t, len(resp1.Wallets), 1) + a.Equal(len(resp1.Wallets), 1) // Name should not be blank - require.NotEmpty(t, resp1.Wallets[0].Name) + a.NotEmpty(resp1.Wallets[0].Name) // Name should be equal to ID - require.Equal(t, resp1.Wallets[0].Name, resp1.Wallets[0].ID) + a.Equal(resp1.Wallets[0].Name, resp1.Wallets[0].ID) } func TestWalletRename(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture f.Setup(t) @@ -123,19 +126,19 @@ func TestWalletRename(t *testing.T) { } resp0 := kmdapi.APIV1POSTWalletResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Test that `GET /v1/wallets` returns the new wallet req1 := kmdapi.APIV1GETWalletsRequest{} resp1 := kmdapi.APIV1GETWalletsResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Should be one wallet - require.Equal(t, len(resp1.Wallets), 1) + a.Equal(len(resp1.Wallets), 1) // Name should be correct - require.Equal(t, resp1.Wallets[0].Name, walletName) + a.Equal(resp1.Wallets[0].Name, walletName) // Try to rename the wallet with the wrong password newWalletName := "newWallet4u" @@ -148,7 +151,7 @@ func TestWalletRename(t *testing.T) { err = f.Client.DoV1Request(req2, &resp2) // Should be an error - require.Error(t, err) + a.Error(err) // Try to rename the wallet with the correct password req3 := kmdapi.APIV1POSTWalletRenameRequest{ @@ -160,31 +163,32 @@ func TestWalletRename(t *testing.T) { err = f.Client.DoV1Request(req3, &resp3) // Should succeed - require.NoError(t, err) + a.NoError(err) // Returned wallet should have the new name - require.Equal(t, newWalletName, resp3.Wallet.Name) + a.Equal(newWalletName, resp3.Wallet.Name) // Returned wallet should have the correct ID - require.Equal(t, resp1.Wallets[0].ID, resp3.Wallet.ID) + a.Equal(resp1.Wallets[0].ID, resp3.Wallet.ID) // Test that `GET /v1/wallets` returns the new wallet req4 := kmdapi.APIV1GETWalletsRequest{} resp4 := kmdapi.APIV1GETWalletsResponse{} err = f.Client.DoV1Request(req4, &resp4) - require.NoError(t, err) + a.NoError(err) // Should be one wallet - require.Equal(t, len(resp4.Wallets), 1) + a.Equal(len(resp4.Wallets), 1) // Returned wallet should have the new name - require.Equal(t, newWalletName, resp4.Wallets[0].Name) + a.Equal(newWalletName, resp4.Wallets[0].Name) // Returned wallet should have the correct ID - require.Equal(t, resp1.Wallets[0].ID, resp4.Wallets[0].ID) + a.Equal(resp1.Wallets[0].ID, resp4.Wallets[0].ID) } func TestWalletSessionRelease(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -196,10 +200,10 @@ func TestWalletSessionRelease(t *testing.T) { } resp0 := kmdapi.APIV1POSTWalletInfoResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Should return the wallet we created - require.Equal(t, resp0.WalletHandle.Wallet.Name, f.WalletName) + a.Equal(resp0.WalletHandle.Wallet.Name, f.WalletName) // Test that `POST /v1/wallet/release` succeeds req1 := kmdapi.APIV1POSTWalletReleaseRequest{ @@ -207,7 +211,7 @@ func TestWalletSessionRelease(t *testing.T) { } resp1 := kmdapi.APIV1POSTWalletReleaseResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Test that `POST /v1/wallet/info` no longer works with this token req2 := kmdapi.APIV1POSTWalletInfoRequest{ @@ -217,13 +221,14 @@ func TestWalletSessionRelease(t *testing.T) { err = f.Client.DoV1Request(req2, &resp2) // Error response - require.Error(t, err) + a.Error(err) // Should not return the wallet we created - require.NotEqual(t, resp2.WalletHandle.Wallet.Name, f.WalletName) + a.NotEqual(resp2.WalletHandle.Wallet.Name, f.WalletName) } func TestWalletSessionRenew(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture walletHandleToken := f.SetupWithWallet(t) @@ -235,7 +240,7 @@ func TestWalletSessionRenew(t *testing.T) { } resp0 := kmdapi.APIV1POSTWalletInfoResponse{} err := f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Note # seconds until expiration expiresSecsInitial := resp0.WalletHandle.ExpiresSeconds @@ -249,11 +254,11 @@ func TestWalletSessionRenew(t *testing.T) { } resp1 := kmdapi.APIV1POSTWalletInfoResponse{} err = f.Client.DoV1Request(req1, &resp1) - require.NoError(t, err) + a.NoError(err) // Should have decreased expiresSecsLater := resp1.WalletHandle.ExpiresSeconds - require.True(t, expiresSecsLater < expiresSecsInitial) + a.True(expiresSecsLater < expiresSecsInitial) // Renew the handle req2 := kmdapi.APIV1POSTWalletRenewRequest{ @@ -261,14 +266,15 @@ func TestWalletSessionRenew(t *testing.T) { } resp2 := kmdapi.APIV1POSTWalletRenewResponse{} err = f.Client.DoV1Request(req2, &resp2) - require.NoError(t, err) + a.NoError(err) // Should have increased expiresSecsRenewed := resp2.WalletHandle.ExpiresSeconds - require.True(t, expiresSecsRenewed > expiresSecsLater) + a.True(expiresSecsRenewed > expiresSecsLater) } func TestWalletSessionExpiry(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) t.Parallel() var f fixtures.KMDFixture // Write a config for 1 second session expirations @@ -276,7 +282,7 @@ func TestWalletSessionExpiry(t *testing.T) { f.SetupWithConfig(t, cfg) walletHandleToken, err := f.MakeWalletAndHandleToken() defer f.Shutdown() - require.NoError(t, err) + a.NoError(err) // Get deets about this wallet token to confirm the token works req0 := kmdapi.APIV1POSTWalletInfoRequest{ @@ -284,7 +290,7 @@ func TestWalletSessionExpiry(t *testing.T) { } resp0 := kmdapi.APIV1POSTWalletInfoResponse{} err = f.Client.DoV1Request(req0, &resp0) - require.NoError(t, err) + a.NoError(err) // Wait for token to expire time.Sleep(2 * time.Second) @@ -297,5 +303,5 @@ func TestWalletSessionExpiry(t *testing.T) { err = f.Client.DoV1Request(req1, &resp1) // Token should have expired - require.Error(t, err) + a.Error(err) } diff --git a/test/e2e-go/perf/basic_test.go b/test/e2e-go/perf/basic_test.go index 085072ede7..d9748e5cdf 100644 --- a/test/e2e-go/perf/basic_test.go +++ b/test/e2e-go/perf/basic_test.go @@ -59,6 +59,7 @@ func queuePayments(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, q <-chan } func signer(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, wh []byte, txnChan <-chan *transactions.Transaction, sigTxnChan chan<- *transactions.SignedTxn) { + a := require.New(fixtures.SynchronizedTest(b)) for { txn := <-txnChan if txn == nil { @@ -69,7 +70,7 @@ func signer(b *testing.B, wg *sync.WaitGroup, c libgoal.Client, wh []byte, txnCh if err != nil { fmt.Printf("Error signing: %v\n", err) } - require.NoError(b, err) + a.NoError(err) sigTxnChan <- &stxn } @@ -84,6 +85,7 @@ func BenchmarkPaymentsThroughput(b *testing.B) { } func doBenchTemplate(b *testing.B, template string, moneynode string) { + a := require.New(fixtures.SynchronizedTest(b)) fmt.Printf("Starting to benchmark template %s\n", template) // consensusTestBigBlocks is a version of ConsensusV0 used for testing @@ -106,15 +108,15 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) { c := fixture.GetLibGoalClientForNamedNode(moneynode) wallet, err := c.GetUnencryptedWalletHandle() - require.NoError(b, err) + a.NoError(err) addrs, err := c.ListAddresses(wallet) - require.NoError(b, err) - require.True(b, len(addrs) > 0) + a.NoError(err) + a.True(len(addrs) > 0) addr := addrs[0] suggest, err := c.SuggestedParams() - require.NoError(b, err) + a.NoError(err) var genesisHash crypto.Digest copy(genesisHash[:], suggest.GenesisHash) @@ -133,7 +135,7 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) { fmt.Printf("Pre-signing %d transactions..\n", numTransactions) wh, err := c.GetUnencryptedWalletHandle() - require.NoError(b, err) + a.NoError(err) var sigWg sync.WaitGroup txnChan := make(chan *transactions.Transaction, 100) @@ -145,13 +147,13 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) { go func() { sender, err := basics.UnmarshalChecksumAddress(addr) - require.NoError(b, err) + a.NoError(err) round, err := c.CurrentRound() - require.NoError(b, err) + a.NoError(err) params, err := c.SuggestedParams() - require.NoError(b, err) + a.NoError(err) proto := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)] for txi := 0; txi < numTransactions; txi++ { @@ -192,11 +194,11 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) { } status, err = c.Status() - require.NoError(b, err) + a.NoError(err) fmt.Printf("Waiting for round %d to start benchmark..\n", status.LastRound+1) status, err = c.WaitForRound(status.LastRound + 1) - require.NoError(b, err) + a.NoError(err) b.StartTimer() @@ -232,7 +234,7 @@ func doBenchTemplate(b *testing.B, template string, moneynode string) { _, err = fixture.WaitForConfirmedTxn(status.LastRound+100, addr, tx.ID().String()) fmt.Printf("Waiting for confirmation transaction to commit..\n") - require.NoError(b, err) + a.NoError(err) } }) diff --git a/test/e2e-go/restAPI/restClient_test.go b/test/e2e-go/restAPI/restClient_test.go index 77d39199f1..a53e808b83 100644 --- a/test/e2e-go/restAPI/restClient_test.go +++ b/test/e2e-go/restAPI/restClient_test.go @@ -106,10 +106,11 @@ func isLetterOrSpace(s string) bool { } func getMaxBalAddr(t *testing.T, testClient libgoal.Client, addresses []string) (someBal uint64, someAddress string) { + a := require.New(fixtures.SynchronizedTest(t)) someBal = 0 for _, addr := range addresses { bal, err := testClient.GetBalance(addr) - require.NoError(t, err) + a.NoError(err) if bal > someBal { someAddress = addr someBal = bal @@ -119,6 +120,7 @@ func getMaxBalAddr(t *testing.T, testClient libgoal.Client, addresses []string) } func getDestAddr(t *testing.T, testClient libgoal.Client, addresses []string, someAddress string, wh []byte) (toAddress string) { + a := require.New(fixtures.SynchronizedTest(t)) if len(addresses) > 1 { for _, addr := range addresses { if addr != someAddress { @@ -129,11 +131,12 @@ func getDestAddr(t *testing.T, testClient libgoal.Client, addresses []string, so } var err error toAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) return } func waitForRoundOne(t *testing.T, testClient libgoal.Client) { + a := require.New(fixtures.SynchronizedTest(t)) errchan := make(chan error) quit := make(chan struct{}) go func() { @@ -145,7 +148,7 @@ func waitForRoundOne(t *testing.T, testClient libgoal.Client) { }() select { case err := <-errchan: - require.NoError(t, err) + a.NoError(err) case <-time.After(1 * time.Minute): // Wait 1 minute (same as WaitForRound) close(quit) t.Fatalf("%s: timeout waiting for round 1", t.Name()) @@ -155,8 +158,9 @@ func waitForRoundOne(t *testing.T, testClient libgoal.Client) { var errWaitForTransactionTimeout = errors.New("wait for transaction timed out") func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, txID string, timeout time.Duration) (tx v1.Transaction, err error) { + a := require.New(fixtures.SynchronizedTest(t)) rnd, err := testClient.Status() - require.NoError(t, err) + a.NoError(err) if rnd.LastRound == 0 { t.Fatal("it is currently round 0 but we need to wait for a transaction that might happen this round but we'll never know if that happens because ConfirmedRound==0 is indestinguishable from not having happened") } @@ -167,8 +171,8 @@ func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, tx tx, err = testClient.PendingTransactionInformation(txID) } if err == nil { - require.NotEmpty(t, tx) - require.Empty(t, tx.PoolError) + a.NotEmpty(tx) + a.Empty(tx.PoolError) if tx.ConfirmedRound > 0 { return } @@ -182,31 +186,34 @@ func waitForTransaction(t *testing.T, testClient libgoal.Client, fromAddress, tx } func TestClientCanGetStatus(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient statusResponse, err := testClient.Status() - require.NoError(t, err) - require.NotEmpty(t, statusResponse) + a.NoError(err) + a.NotEmpty(statusResponse) testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1) statusResponse2, err := testClient.Status() - require.NoError(t, err) - require.NotEmpty(t, statusResponse2) - require.True(t, statusResponse2.LastRound >= statusResponse.LastRound) + a.NoError(err) + a.NotEmpty(statusResponse2) + a.True(statusResponse2.LastRound >= statusResponse.LastRound) } func TestClientCanGetStatusAfterBlock(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient statusResponse, err := testClient.WaitForRound(1) - require.NoError(t, err) - require.NotEmpty(t, statusResponse) + a.NoError(err) + a.NotEmpty(statusResponse) testClient.SetAPIVersionAffinity(algodclient.APIVersionV2, kmdclient.APIVersionV1) statusResponse, err = testClient.WaitForRound(statusResponse.LastRound + 1) - require.NoError(t, err) - require.NotEmpty(t, statusResponse) + a.NoError(err) + a.NotEmpty(statusResponse) } func TestTransactionsByAddr(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) var localFixture fixtures.RestClientFixture localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer localFixture.Shutdown() @@ -214,185 +221,196 @@ func TestTransactionsByAddr(t *testing.T) { testClient := localFixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) _, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") } toAddress := getDestAddr(t, testClient, addresses, someAddress, wh) tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0) - require.NoError(t, err) + a.NoError(err) txID := tx.ID() rnd, err := testClient.Status() - require.NoError(t, err) + a.NoError(err) t.Logf("rnd[%d] created txn %s", rnd.LastRound, txID) _, err = waitForTransaction(t, testClient, someAddress, txID.String(), 15*time.Second) - require.NoError(t, err) + a.NoError(err) // what is my round? rnd, err = testClient.Status() - require.NoError(t, err) + a.NoError(err) t.Logf("rnd %d", rnd.LastRound) // Now let's get the transaction restClient, err := localFixture.NC.AlgodClient() - require.NoError(t, err) + a.NoError(err) res, err := restClient.TransactionsByAddr(toAddress, 0, rnd.LastRound, 100) - require.NoError(t, err) - require.Equal(t, 1, len(res.Transactions)) + a.NoError(err) + a.Equal(1, len(res.Transactions)) for _, tx := range res.Transactions { - require.Equal(t, tx.From, someAddress) - require.Equal(t, tx.Payment.Amount, uint64(100000)) - require.Equal(t, tx.Fee, uint64(10000)) + a.Equal(tx.From, someAddress) + a.Equal(tx.Payment.Amount, uint64(100000)) + a.Equal(tx.Fee, uint64(10000)) } } func TestClientCanGetVersion(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient versionResponse, err := testClient.AlgodVersions() - require.NoError(t, err) - require.NotEmpty(t, versionResponse) + a.NoError(err) + a.NotEmpty(versionResponse) } func TestClientCanGetSuggestedFee(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient suggestedFeeResponse, err := testClient.SuggestedFee() - require.NoError(t, err) + a.NoError(err) _ = suggestedFeeResponse // per-byte-fee is allowed to be zero } func TestClientCanGetMinTxnFee(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient suggestedParamsRes, err := testClient.SuggestedParams() - require.NoError(t, err) - require.Truef(t, suggestedParamsRes.MinTxnFee > 0, "min txn fee not supplied") + a.NoError(err) + a.Truef(suggestedParamsRes.MinTxnFee > 0, "min txn fee not supplied") } func TestClientCanGetBlockInfo(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) blockResponse, err := testClient.Block(1) - require.NoError(t, err) - require.NotEmpty(t, blockResponse) + a.NoError(err) + a.NotEmpty(blockResponse) } func TestClientRejectsBadFromAddressWhenSending(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) badAccountAddress := "This is absolutely not a valid account address." goodAccountAddress := addresses[0] _, err = testClient.SendPaymentFromWallet(wh, nil, badAccountAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientRejectsBadToAddressWhenSending(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) badAccountAddress := "This is absolutely not a valid account address." goodAccountAddress := addresses[0] _, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, badAccountAddress, 10000, 100000, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientRejectsMutatedFromAddressWhenSending(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) goodAccountAddress := addresses[0] var unmutatedAccountAddress string if len(addresses) > 1 { unmutatedAccountAddress = addresses[1] } else { unmutatedAccountAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } mutatedAccountAddress := mutateStringAtIndex(unmutatedAccountAddress, 0) _, err = testClient.SendPaymentFromWallet(wh, nil, mutatedAccountAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientRejectsMutatedToAddressWhenSending(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) goodAccountAddress := addresses[0] var unmutatedAccountAddress string if len(addresses) > 1 { unmutatedAccountAddress = addresses[1] } else { unmutatedAccountAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } mutatedAccountAddress := mutateStringAtIndex(unmutatedAccountAddress, 0) _, err = testClient.SendPaymentFromWallet(wh, nil, goodAccountAddress, mutatedAccountAddress, 10000, 100000, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) goodAccountAddress := addresses[0] nodeDoesNotHaveKeyForThisAddress := "NJY27OQ2ZXK6OWBN44LE4K43TA2AV3DPILPYTHAJAMKIVZDWTEJKZJKO4A" _, err = testClient.SendPaymentFromWallet(wh, nil, nodeDoesNotHaveKeyForThisAddress, goodAccountAddress, 10000, 100000, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientOversizedNote(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) fromAddress := addresses[0] var toAddress string if len(addresses) > 1 { toAddress = addresses[1] } else { toAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } maxTxnNoteBytes := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnNoteBytes note := make([]byte, maxTxnNoteBytes+1) _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, 100000, note, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientCanSendAndGetNote(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) _, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") @@ -401,20 +419,21 @@ func TestClientCanSendAndGetNote(t *testing.T) { maxTxnNoteBytes := config.Consensus[protocol.ConsensusCurrentVersion].MaxTxnNoteBytes note := make([]byte, maxTxnNoteBytes) tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, note, "", 0, 0) - require.NoError(t, err) + a.NoError(err) txStatus, err := waitForTransaction(t, testClient, someAddress, tx.ID().String(), 15*time.Second) - require.NoError(t, err) - require.Equal(t, note, txStatus.Note) + a.NoError(err) + a.Equal(note, txStatus.Note) } func TestClientCanGetTransactionStatus(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) _, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") @@ -422,55 +441,57 @@ func TestClientCanGetTransactionStatus(t *testing.T) { toAddress := getDestAddr(t, testClient, addresses, someAddress, wh) tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0) t.Log(string(protocol.EncodeJSON(tx))) - require.NoError(t, err) + a.NoError(err) t.Log(tx.ID().String()) _, err = waitForTransaction(t, testClient, someAddress, tx.ID().String(), 15*time.Second) - require.NoError(t, err) + a.NoError(err) } func TestAccountBalance(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) _, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") } toAddress, err := testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) tx, err := testClient.SendPaymentFromWallet(wh, nil, someAddress, toAddress, 10000, 100000, nil, "", 0, 0) - require.NoError(t, err) + a.NoError(err) _, err = waitForTransaction(t, testClient, someAddress, tx.ID().String(), 15*time.Second) - require.NoError(t, err) + a.NoError(err) account, err := testClient.AccountInformation(toAddress) - require.NoError(t, err) - require.Equal(t, account.AmountWithoutPendingRewards, uint64(100000)) - require.Truef(t, account.Amount >= 100000, "account must have received money, and account information endpoint must print it") + a.NoError(err) + a.Equal(account.AmountWithoutPendingRewards, uint64(100000)) + a.Truef(account.Amount >= 100000, "account must have received money, and account information endpoint must print it") } func TestAccountParticipationInfo(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) _, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") } - require.NoError(t, err) + a.NoError(err) addr, err := basics.UnmarshalChecksumAddress(someAddress) params, err := testClient.SuggestedParams() - require.NoError(t, err) + a.NoError(err) firstRound := basics.Round(params.LastRound + 1) lastRound := basics.Round(params.LastRound + 1000) @@ -501,90 +522,94 @@ func TestAccountParticipationInfo(t *testing.T) { }, } txID, err := testClient.SignAndBroadcastTransaction(wh, nil, tx) - require.NoError(t, err) + a.NoError(err) _, err = waitForTransaction(t, testClient, someAddress, txID, 15*time.Second) - require.NoError(t, err) + a.NoError(err) account, err := testClient.AccountInformation(someAddress) - require.NoError(t, err) - require.Equal(t, randomVotePKStr, string(account.Participation.ParticipationPK), "API must print correct root voting key") - require.Equal(t, randomSelPKStr, string(account.Participation.VRFPK), "API must print correct vrf key") - require.Equal(t, uint64(firstRound), account.Participation.VoteFirst, "API must print correct first participation round") - require.Equal(t, uint64(lastRound), account.Participation.VoteLast, "API must print correct last participation round") - require.Equal(t, dilution, account.Participation.VoteKeyDilution, "API must print correct key dilution") + a.NoError(err) + a.Equal(randomVotePKStr, string(account.Participation.ParticipationPK), "API must print correct root voting key") + a.Equal(randomSelPKStr, string(account.Participation.VRFPK), "API must print correct vrf key") + a.Equal(uint64(firstRound), account.Participation.VoteFirst, "API must print correct first participation round") + a.Equal(uint64(lastRound), account.Participation.VoteLast, "API must print correct last participation round") + a.Equal(dilution, account.Participation.VoteKeyDilution, "API must print correct key dilution") } func TestSupply(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient supply, err := testClient.LedgerSupply() - require.NoError(t, err) - require.True(t, supply.TotalMoney > 1e6) - require.True(t, supply.OnlineMoney > 1e6) - require.True(t, supply.TotalMoney >= supply.OnlineMoney) + a.NoError(err) + a.True(supply.TotalMoney > 1e6) + a.True(supply.OnlineMoney > 1e6) + a.True(supply.TotalMoney >= supply.OnlineMoney) } func TestClientCanGetGoRoutines(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.AlgodClient ctx, ctxCancel := context.WithCancel(context.Background()) defer ctxCancel() goRoutines, err := testClient.GetGoRoutines(ctx) - require.NoError(t, err) - require.NotEmpty(t, goRoutines) - require.True(t, strings.Index(goRoutines, "goroutine profile:") >= 0) + a.NoError(err) + a.NotEmpty(goRoutines) + a.True(strings.Index(goRoutines, "goroutine profile:") >= 0) } func TestSendingTooMuchFails(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) fromAddress := addresses[0] var toAddress string if len(addresses) > 1 { toAddress = addresses[1] } else { toAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } fromBalance, err := testClient.GetBalance(fromAddress) - require.NoError(t, err) + a.NoError(err) // too much amount _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, fromBalance+100, nil, "", 0, 0) t.Log(err) - require.Error(t, err) + a.Error(err) // waaaay too much amount _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, math.MaxUint64, nil, "", 0, 0) t.Log(err) - require.Error(t, err) + a.Error(err) // too much fee _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, fromBalance+100, 10000, nil, "", 0, 0) t.Log(err) - require.Error(t, err) + a.Error(err) // waaaay too much fee _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, math.MaxUint64, 10000, nil, "", 0, 0) t.Log(err) - require.Error(t, err) + a.Error(err) } func TestSendingFromEmptyAccountFails(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) var fromAddress string for _, addr := range addresses { bal, err := testClient.GetBalance(addr) - require.NoError(t, err) + a.NoError(err) if bal == 0 { fromAddress = addr break @@ -592,7 +617,7 @@ func TestSendingFromEmptyAccountFails(t *testing.T) { } if fromAddress == "" { fromAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } var toAddress string for _, addr := range addresses { @@ -603,24 +628,25 @@ func TestSendingFromEmptyAccountFails(t *testing.T) { } if toAddress == "" { toAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } _, err = testClient.SendPaymentFromWallet(wh, nil, fromAddress, toAddress, 10000, 100000, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestSendingTooLittleToEmptyAccountFails(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient waitForRoundOne(t, testClient) wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) var emptyAddress string for _, addr := range addresses { bal, err := testClient.GetBalance(addr) - require.NoError(t, err) + a.NoError(err) if bal == 0 { emptyAddress = addr break @@ -628,23 +654,24 @@ func TestSendingTooLittleToEmptyAccountFails(t *testing.T) { } if emptyAddress == "" { emptyAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } _, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { t.Error("no addr with funds") } _, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, 10000, 1, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestSendingLowFeeFails(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) defer fixture.SetTestContext(t)() testClient := fixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) const sendAmount = 100000 someBal, someAddress := getMaxBalAddr(t, testClient, addresses) if someAddress == "" { @@ -655,35 +682,36 @@ func TestSendingLowFeeFails(t *testing.T) { } toAddress := getDestAddr(t, testClient, addresses, someAddress, wh) utx, err := testClient.ConstructPayment(someAddress, toAddress, 1, sendAmount, nil, "", [32]byte{}, 0, 0) - require.NoError(t, err) + a.NoError(err) utx.Fee.Raw = 1 stx, err := testClient.SignTransactionWithWallet(wh, nil, utx) - require.NoError(t, err) + a.NoError(err) _, err = testClient.BroadcastTransaction(stx) t.Log(err) - require.Error(t, err) + a.Error(err) utx.Fee.Raw = 0 stx, err = testClient.SignTransactionWithWallet(wh, nil, utx) - require.NoError(t, err) + a.NoError(err) _, err = testClient.BroadcastTransaction(stx) t.Log(err) - require.Error(t, err) + a.Error(err) } func TestSendingNotClosingAccountFails(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) // use a local fixture because we might really mess with the balances var localFixture fixtures.RestClientFixture localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer localFixture.Shutdown() testClient := localFixture.LibGoalClient wh, err := testClient.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) addresses, err := testClient.ListAddresses(wh) - require.NoError(t, err) + a.NoError(err) var emptyAddress string for _, addr := range addresses { bal, err := testClient.GetBalance(addr) - require.NoError(t, err) + a.NoError(err) if bal == 0 { emptyAddress = addr break @@ -691,14 +719,14 @@ func TestSendingNotClosingAccountFails(t *testing.T) { } if emptyAddress == "" { emptyAddress, err = testClient.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) } var someAddress string someBal := uint64(0) for _, addr := range addresses { if addr != emptyAddress { bal, err := testClient.GetBalance(addr) - require.NoError(t, err) + a.NoError(err) if bal > someBal { someAddress = addr someBal = bal @@ -710,10 +738,11 @@ func TestSendingNotClosingAccountFails(t *testing.T) { } amt := someBal - 10000 - 1 _, err = testClient.SendPaymentFromWallet(wh, nil, someAddress, emptyAddress, 10000, amt, nil, "", 0, 0) - require.Error(t, err) + a.Error(err) } func TestClientCanGetPendingTransactions(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) var localFixture fixtures.RestClientFixture localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer localFixture.Shutdown() @@ -726,24 +755,25 @@ func TestClientCanGetPendingTransactions(t *testing.T) { // We may not need to kill the other node, but do it anyways to ensure the txn never gets committed nc, _ := localFixture.GetNodeController("Node") err := nc.FullStop() - require.NoError(t, err) + a.NoError(err) minTxnFee, minAcctBalance, err := localFixture.CurrentMinFeeAndBalance() - require.NoError(t, err) + a.NoError(err) // Check that a single pending txn is corectly displayed tx, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee, minAcctBalance, nil) - require.NoError(t, err) + a.NoError(err) statusResponse, err := testClient.GetPendingTransactions(0) - require.NoError(t, err) - require.NotEmpty(t, statusResponse) - require.True(t, statusResponse.TotalTxns == 1) - require.True(t, len(statusResponse.TruncatedTxns.Transactions) == 1) - require.True(t, statusResponse.TruncatedTxns.Transactions[0].TxID == tx.ID().String()) + a.NoError(err) + a.NotEmpty(statusResponse) + a.True(statusResponse.TotalTxns == 1) + a.True(len(statusResponse.TruncatedTxns.Transactions) == 1) + a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == tx.ID().String()) } func TestClientTruncatesPendingTransactions(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) var localFixture fixtures.RestClientFixture localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer localFixture.Shutdown() @@ -752,10 +782,10 @@ func TestClientTruncatesPendingTransactions(t *testing.T) { wh, _ := testClient.GetUnencryptedWalletHandle() nc, _ := localFixture.GetNodeController("Node") err := nc.FullStop() - require.NoError(t, err) + a.NoError(err) minTxnFee, minAcctBalance, err := localFixture.CurrentMinFeeAndBalance() - require.NoError(t, err) + a.NoError(err) NumTxns := 10 MaxTxns := 7 @@ -765,25 +795,25 @@ func TestClientTruncatesPendingTransactions(t *testing.T) { for i := 0; i < NumTxns; i++ { toAddress, _ := testClient.GenerateAddress(wh) tx2, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee, minAcctBalance, nil) - require.NoError(t, err) + a.NoError(err) txIDsSeen[tx2.ID().String()] = true } statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns)) - require.NoError(t, err) - require.NotEmpty(t, statusResponse) - require.True(t, int(statusResponse.TotalTxns) == NumTxns) - require.True(t, len(statusResponse.TruncatedTxns.Transactions) == MaxTxns) + a.NoError(err) + a.NotEmpty(statusResponse) + a.True(int(statusResponse.TotalTxns) == NumTxns) + a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns) for _, tx := range statusResponse.TruncatedTxns.Transactions { - require.True(t, txIDsSeen[tx.TxID]) + a.True(txIDsSeen[tx.TxID]) delete(txIDsSeen, tx.TxID) } - require.True(t, len(txIDsSeen) == NumTxns-MaxTxns) + a.True(len(txIDsSeen) == NumTxns-MaxTxns) } func TestClientPrioritizesPendingTransactions(t *testing.T) { t.Skip("new FIFO pool does not have prioritization") - + a := require.New(fixtures.SynchronizedTest(t)) var localFixture fixtures.RestClientFixture localFixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) defer localFixture.Shutdown() @@ -795,28 +825,28 @@ func TestClientPrioritizesPendingTransactions(t *testing.T) { toAddress, _ := testClient.GenerateAddress(wh) nc, _ := localFixture.GetNodeController("Node") err := nc.FullStop() - require.NoError(t, err) + a.NoError(err) minTxnFee, minAcctBalance, err := localFixture.CurrentMinFeeAndBalance() - require.NoError(t, err) + a.NoError(err) NumTxns := 5 MaxTxns := 3 for i := 0; i < NumTxns; i++ { toAddress2, _ := testClient.GenerateAddress(wh) _, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress2, minTxnFee, minAcctBalance, nil) - require.NoError(t, err) + a.NoError(err) } // Add a very high fee transaction. This should have first priority // (even if we don't know the encoding length of the underlying signed txn) txHigh, err := testClient.SendPaymentFromUnencryptedWallet(fromAddress, toAddress, minTxnFee*10, minAcctBalance, nil) - require.NoError(t, err) + a.NoError(err) statusResponse, err := testClient.GetPendingTransactions(uint64(MaxTxns)) - require.NoError(t, err) - require.NotEmpty(t, statusResponse) - require.True(t, int(statusResponse.TotalTxns) == NumTxns+1) - require.True(t, len(statusResponse.TruncatedTxns.Transactions) == MaxTxns) - require.True(t, statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String()) + a.NoError(err) + a.NotEmpty(statusResponse) + a.True(int(statusResponse.TotalTxns) == NumTxns+1) + a.True(len(statusResponse.TruncatedTxns.Transactions) == MaxTxns) + a.True(statusResponse.TruncatedTxns.Transactions[0].TxID == txHigh.ID().String()) } diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go index 461e31e7e1..bb09714f90 100644 --- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go +++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go @@ -49,7 +49,7 @@ func cascadeCreateAndFundAccounts(amountToSend, transactionFee uint64, fundingAc // sends them all money, and sends them online func TestManyAccountsCanGoOnline(t *testing.T) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) var fixture fixtures.RestClientFixture fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes50Each.json")) diff --git a/test/e2e-go/upgrades/application_support_test.go b/test/e2e-go/upgrades/application_support_test.go index 8514c54fd8..6c27528315 100644 --- a/test/e2e-go/upgrades/application_support_test.go +++ b/test/e2e-go/upgrades/application_support_test.go @@ -41,19 +41,20 @@ const lastProtocolBeforeApplicationSupport = protocol.ConsensusV23 const firstProtocolWithApplicationSupport = protocol.ConsensusV24 func makeApplicationUpgradeConsensus(t *testing.T) (appConsensus config.ConsensusProtocols) { + a := require.New(fixtures.SynchronizedTest(t)) appConsensus = generateFastUpgradeConsensus() // make sure that the "current" version does not support application and that the "future" version *does* support applications. currentProtocolParams, ok := appConsensus[consensusTestFastUpgrade(lastProtocolBeforeApplicationSupport)] - require.True(t, ok) + a.True(ok) futureProtocolParams, ok := appConsensus[consensusTestFastUpgrade(firstProtocolWithApplicationSupport)] - require.True(t, ok) + a.True(ok) // ensure it's disabled. - require.False(t, currentProtocolParams.Application) - require.False(t, currentProtocolParams.SupportRekeying) + a.False(currentProtocolParams.Application) + a.False(currentProtocolParams.SupportRekeying) // verify that the future protocol supports applications. - require.True(t, futureProtocolParams.Application) + a.True(futureProtocolParams.Application) // add an upgrade path from current to future. currentProtocolParams.ApprovedUpgrades = make(map[protocol.ConsensusVersion]uint64) @@ -77,36 +78,38 @@ func TestApplicationsUpgradeOverREST(t *testing.T) { fixture.Setup(t, filepath.Join("nettemplates", "TwoNodes100SecondTestUnupgradedProtocol.json")) defer fixture.Shutdown() + a := require.New(fixtures.SynchronizedTest(t)) + client := fixture.GetLibGoalClientForNamedNode("Node") accountList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir()) - require.NoError(t, err) + a.NoError(err) creator := accountList[0].Address wh, err := client.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) user, err := client.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) fee := uint64(1000) round, err := client.CurrentRound() - require.NoError(t, err) + a.NoError(err) // Fund the manager, so it can issue transactions later on _, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil) - require.NoError(t, err) + a.NoError(err) client.WaitForRound(round + 2) // There should be no apps to start with ad, err := client.AccountData(creator) - require.NoError(t, err) - require.Zero(t, len(ad.AppParams)) + a.NoError(err) + a.Zero(len(ad.AppParams)) ad, err = client.AccountData(user) - require.NoError(t, err) - require.Zero(t, len(ad.AppParams)) - require.Equal(t, basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos) + a.NoError(err) + a.Zero(len(ad.AppParams)) + a.Equal(basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos) counter := `#pragma version 2 // a simple global and local calls counter app @@ -133,9 +136,9 @@ app_local_put int 1 ` approvalOps, err := logic.AssembleString(counter) - require.NoError(t, err) + a.NoError(err) clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1") - require.NoError(t, err) + a.NoError(err) schema := basics.StateSchema{ NumUint: 1, } @@ -144,20 +147,20 @@ int 1 tx, err := client.MakeUnsignedAppCreateTx( transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, ) - require.NoError(t, err) + a.NoError(err) tx, err = client.FillUnsignedTxTemplate(creator, 0, 0, fee, tx) - require.NoError(t, err) + a.NoError(err) signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx) - require.NoError(t, err) + a.NoError(err) round, err = client.CurrentRound() - require.NoError(t, err) + a.NoError(err) _, err = client.BroadcastTransaction(signedTxn) - require.Error(t, err) - require.Contains(t, err.Error(), "application transaction not supported") + a.Error(err) + a.Contains(err.Error(), "application transaction not supported") curStatus, err := client.Status() - require.NoError(t, err) + a.NoError(err) initialStatus := curStatus startLoopTime := time.Now() @@ -165,31 +168,31 @@ int 1 // wait until the network upgrade : this can take a while. for curStatus.LastVersion == initialStatus.LastVersion { curStatus, err = client.Status() - require.NoError(t, err) + a.NoError(err) - require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } // now, that we have upgraded to the new protocol which supports applications, try again. _, err = client.BroadcastTransaction(signedTxn) - require.NoError(t, err) + a.NoError(err) curStatus, err = client.Status() - require.NoError(t, err) + a.NoError(err) round = curStatus.LastRound client.WaitForRound(round + 2) pendingTx, err := client.GetPendingTransactions(1) - require.NoError(t, err) - require.Equal(t, uint64(0), pendingTx.TotalTxns) + a.NoError(err) + a.Equal(uint64(0), pendingTx.TotalTxns) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) - require.NoError(t, err) - require.Equal(t, 1, len(ad.AppParams)) + a.NoError(err) + a.Equal(1, len(ad.AppParams)) var appIdx basics.AppIndex var params basics.AppParams for i, p := range ad.AppParams { @@ -197,84 +200,84 @@ int 1 params = p break } - require.Equal(t, approvalOps.Program, params.ApprovalProgram) - require.Equal(t, clearstateOps.Program, params.ClearStateProgram) - require.Equal(t, schema, params.LocalStateSchema) - require.Equal(t, schema, params.GlobalStateSchema) - require.Equal(t, 1, len(params.GlobalState)) + a.Equal(approvalOps.Program, params.ApprovalProgram) + a.Equal(clearstateOps.Program, params.ClearStateProgram) + a.Equal(schema, params.LocalStateSchema) + a.Equal(schema, params.GlobalStateSchema) + a.Equal(1, len(params.GlobalState)) value, ok := params.GlobalState["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) - require.Equal(t, 1, len(ad.AppLocalStates)) + a.Equal(1, len(ad.AppLocalStates)) state, ok := ad.AppLocalStates[appIdx] - require.True(t, ok) - require.Equal(t, schema, state.Schema) - require.Equal(t, 1, len(state.KeyValue)) + a.True(ok) + a.Equal(schema, state.Schema) + a.Equal(1, len(state.KeyValue)) value, ok = state.KeyValue["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) // call the app tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil) - require.NoError(t, err) + a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) - require.NoError(t, err) + a.NoError(err) signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx) - require.NoError(t, err) + a.NoError(err) round, err = client.CurrentRound() - require.NoError(t, err) + a.NoError(err) _, err = client.BroadcastTransaction(signedTxn) - require.NoError(t, err) + a.NoError(err) client.WaitForRound(round + 2) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) - require.NoError(t, err) - require.Equal(t, 1, len(ad.AppParams)) + a.NoError(err) + a.Equal(1, len(ad.AppParams)) params, ok = ad.AppParams[appIdx] - require.True(t, ok) - require.Equal(t, approvalOps.Program, params.ApprovalProgram) - require.Equal(t, clearstateOps.Program, params.ClearStateProgram) - require.Equal(t, schema, params.LocalStateSchema) - require.Equal(t, schema, params.GlobalStateSchema) - require.Equal(t, 1, len(params.GlobalState)) + a.True(ok) + a.Equal(approvalOps.Program, params.ApprovalProgram) + a.Equal(clearstateOps.Program, params.ClearStateProgram) + a.Equal(schema, params.LocalStateSchema) + a.Equal(schema, params.GlobalStateSchema) + a.Equal(1, len(params.GlobalState)) value, ok = params.GlobalState["counter"] - require.True(t, ok) - require.Equal(t, uint64(2), value.Uint) + a.True(ok) + a.Equal(uint64(2), value.Uint) - require.Equal(t, 1, len(ad.AppLocalStates)) + a.Equal(1, len(ad.AppLocalStates)) state, ok = ad.AppLocalStates[appIdx] - require.True(t, ok) - require.Equal(t, schema, state.Schema) - require.Equal(t, 1, len(state.KeyValue)) + a.True(ok) + a.Equal(schema, state.Schema) + a.Equal(1, len(state.KeyValue)) value, ok = state.KeyValue["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) - require.Equal(t, uint64(2), ad.TotalAppSchema.NumUint) + a.Equal(uint64(2), ad.TotalAppSchema.NumUint) // check user's balance record for the app entry and the state changes ad, err = client.AccountData(user) - require.NoError(t, err) - require.Equal(t, 0, len(ad.AppParams)) + a.NoError(err) + a.Equal(0, len(ad.AppParams)) - require.Equal(t, 1, len(ad.AppLocalStates)) + a.Equal(1, len(ad.AppLocalStates)) state, ok = ad.AppLocalStates[appIdx] - require.True(t, ok) - require.Equal(t, schema, state.Schema) - require.Equal(t, 1, len(state.KeyValue)) + a.True(ok) + a.Equal(schema, state.Schema) + a.Equal(1, len(state.KeyValue)) value, ok = state.KeyValue["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) - require.Equal(t, basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) + a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) app, err := client.ApplicationInformation(uint64(appIdx)) - require.NoError(t, err) - require.Equal(t, uint64(appIdx), app.Id) - require.Equal(t, creator, app.Params.Creator) + a.NoError(err) + a.Equal(uint64(appIdx), app.Id) + a.Equal(creator, app.Params.Creator) return } @@ -282,6 +285,7 @@ int 1 // to a version that supports applications. It verify that prior to supporting applications, the node would not accept // any application transaction and after the upgrade is complete, it would support that. func TestApplicationsUpgradeOverGossip(t *testing.T) { + a := require.New(fixtures.SynchronizedTest(t)) smallLambdaMs := 500 consensus := makeApplicationUpgradeConsensus(t) @@ -298,44 +302,44 @@ func TestApplicationsUpgradeOverGossip(t *testing.T) { client := fixture.GetLibGoalClientForNamedNode("Primary") secondary := fixture.GetLibGoalClientForNamedNode("Node") err := config.SaveConfigurableConsensus(client.DataDir(), consensus) - require.NoError(t, err) + a.NoError(err) fixture.Start() defer fixture.Shutdown() accountList, err := fixture.GetNodeWalletsSortedByBalance(client.DataDir()) - require.NoError(t, err) + a.NoError(err) creator := accountList[0].Address wh, err := client.GetUnencryptedWalletHandle() - require.NoError(t, err) + a.NoError(err) user, err := client.GenerateAddress(wh) - require.NoError(t, err) + a.NoError(err) fee := uint64(1000) round, err := client.CurrentRound() - require.NoError(t, err) + a.NoError(err) // Fund the manager, so it can issue transactions later on _, err = client.SendPaymentFromUnencryptedWallet(creator, user, fee, 10000000000, nil) - require.NoError(t, err) + a.NoError(err) client.WaitForRound(round + 2) round, err = client.CurrentRound() - require.NoError(t, err) + a.NoError(err) // There should be no apps to start with ad, err := client.AccountData(creator) - require.NoError(t, err) - require.Zero(t, len(ad.AppParams)) + a.NoError(err) + a.Zero(len(ad.AppParams)) ad, err = client.AccountData(user) - require.NoError(t, err) - require.Zero(t, len(ad.AppParams)) - require.Equal(t, basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos) + a.NoError(err) + a.Zero(len(ad.AppParams)) + a.Equal(basics.MicroAlgos{Raw: 10000000000}, ad.MicroAlgos) counter := `#pragma version 2 // a simple global and local calls counter app @@ -362,9 +366,9 @@ app_local_put int 1 ` approvalOps, err := logic.AssembleString(counter) - require.NoError(t, err) + a.NoError(err) clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1") - require.NoError(t, err) + a.NoError(err) schema := basics.StateSchema{ NumUint: 1, } @@ -373,39 +377,39 @@ int 1 tx, err := client.MakeUnsignedAppCreateTx( transactions.OptInOC, approvalOps.Program, clearstateOps.Program, schema, schema, nil, nil, nil, nil, ) - require.NoError(t, err) + a.NoError(err) tx, err = client.FillUnsignedTxTemplate(creator, round, round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds, fee, tx) - require.NoError(t, err) + a.NoError(err) signedTxn, err := client.SignTransactionWithWallet(wh, nil, tx) - require.NoError(t, err) + a.NoError(err) round, err = client.CurrentRound() - require.NoError(t, err) + a.NoError(err) _, err = client.BroadcastTransaction(signedTxn) - require.NoError(t, err) + a.NoError(err) // this transaction is expect to reach the first node ( primary ), but to be rejected by the second node when transmitted over gossip. client.WaitForRound(round + 2) // check that the primary node still has this transaction in it's transaction pool. pendingTx, err := client.GetPendingTransactions(1) - require.NoError(t, err) + a.NoError(err) round, err = client.CurrentRound() - require.NoError(t, err) + a.NoError(err) if round > round+primaryNodeUnupgradedProtocol.DefaultUpgradeWaitRounds { t.Skip("Test platform is too slow for this test") } - require.Equal(t, uint64(1), pendingTx.TotalTxns) + a.Equal(uint64(1), pendingTx.TotalTxns) // check that the secondary node doesn't have that transaction in it's transaction pool. pendingTx, err = secondary.GetPendingTransactions(1) - require.NoError(t, err) - require.Equal(t, uint64(0), pendingTx.TotalTxns) + a.NoError(err) + a.Equal(uint64(0), pendingTx.TotalTxns) curStatus, err := client.Status() - require.NoError(t, err) + a.NoError(err) initialStatus := curStatus startLoopTime := time.Now() @@ -413,35 +417,35 @@ int 1 // wait until the network upgrade : this can take a while. for curStatus.LastVersion == initialStatus.LastVersion { curStatus, err = client.Status() - require.NoError(t, err) + a.NoError(err) - require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } // now, that we have upgraded to the new protocol which supports applications, try again. tx, err = client.FillUnsignedTxTemplate(creator, round, round+100, fee, tx) - require.NoError(t, err) + a.NoError(err) signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx) - require.NoError(t, err) + a.NoError(err) _, err = client.BroadcastTransaction(signedTxn) - require.NoError(t, err) + a.NoError(err) curStatus, err = client.Status() - require.NoError(t, err) + a.NoError(err) round = curStatus.LastRound client.WaitForRound(round + 2) pendingTx, err = client.GetPendingTransactions(1) - require.NoError(t, err) - require.Equal(t, uint64(0), pendingTx.TotalTxns) + a.NoError(err) + a.Equal(uint64(0), pendingTx.TotalTxns) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) - require.NoError(t, err) - require.Equal(t, 1, len(ad.AppParams)) + a.NoError(err) + a.Equal(1, len(ad.AppParams)) var appIdx basics.AppIndex var params basics.AppParams for i, p := range ad.AppParams { @@ -449,83 +453,83 @@ int 1 params = p break } - require.Equal(t, approvalOps.Program, params.ApprovalProgram) - require.Equal(t, clearstateOps.Program, params.ClearStateProgram) - require.Equal(t, schema, params.LocalStateSchema) - require.Equal(t, schema, params.GlobalStateSchema) - require.Equal(t, 1, len(params.GlobalState)) + a.Equal(approvalOps.Program, params.ApprovalProgram) + a.Equal(clearstateOps.Program, params.ClearStateProgram) + a.Equal(schema, params.LocalStateSchema) + a.Equal(schema, params.GlobalStateSchema) + a.Equal(1, len(params.GlobalState)) value, ok := params.GlobalState["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) - require.Equal(t, 1, len(ad.AppLocalStates)) + a.Equal(1, len(ad.AppLocalStates)) state, ok := ad.AppLocalStates[appIdx] - require.True(t, ok) - require.Equal(t, schema, state.Schema) - require.Equal(t, 1, len(state.KeyValue)) + a.True(ok) + a.Equal(schema, state.Schema) + a.Equal(1, len(state.KeyValue)) value, ok = state.KeyValue["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) // call the app tx, err = client.MakeUnsignedAppOptInTx(uint64(appIdx), nil, nil, nil, nil) - require.NoError(t, err) + a.NoError(err) tx, err = client.FillUnsignedTxTemplate(user, 0, 0, fee, tx) - require.NoError(t, err) + a.NoError(err) signedTxn, err = client.SignTransactionWithWallet(wh, nil, tx) - require.NoError(t, err) + a.NoError(err) round, err = client.CurrentRound() - require.NoError(t, err) + a.NoError(err) _, err = client.BroadcastTransaction(signedTxn) - require.NoError(t, err) + a.NoError(err) client.WaitForRound(round + 2) // check creator's balance record for the app entry and the state changes ad, err = client.AccountData(creator) - require.NoError(t, err) - require.Equal(t, 1, len(ad.AppParams)) + a.NoError(err) + a.Equal(1, len(ad.AppParams)) params, ok = ad.AppParams[appIdx] - require.True(t, ok) - require.Equal(t, approvalOps.Program, params.ApprovalProgram) - require.Equal(t, clearstateOps.Program, params.ClearStateProgram) - require.Equal(t, schema, params.LocalStateSchema) - require.Equal(t, schema, params.GlobalStateSchema) - require.Equal(t, 1, len(params.GlobalState)) + a.True(ok) + a.Equal(approvalOps.Program, params.ApprovalProgram) + a.Equal(clearstateOps.Program, params.ClearStateProgram) + a.Equal(schema, params.LocalStateSchema) + a.Equal(schema, params.GlobalStateSchema) + a.Equal(1, len(params.GlobalState)) value, ok = params.GlobalState["counter"] - require.True(t, ok) - require.Equal(t, uint64(2), value.Uint) + a.True(ok) + a.Equal(uint64(2), value.Uint) - require.Equal(t, 1, len(ad.AppLocalStates)) + a.Equal(1, len(ad.AppLocalStates)) state, ok = ad.AppLocalStates[appIdx] - require.True(t, ok) - require.Equal(t, schema, state.Schema) - require.Equal(t, 1, len(state.KeyValue)) + a.True(ok) + a.Equal(schema, state.Schema) + a.Equal(1, len(state.KeyValue)) value, ok = state.KeyValue["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) - require.Equal(t, uint64(2), ad.TotalAppSchema.NumUint) + a.Equal(uint64(2), ad.TotalAppSchema.NumUint) // check user's balance record for the app entry and the state changes ad, err = client.AccountData(user) - require.NoError(t, err) - require.Equal(t, 0, len(ad.AppParams)) + a.NoError(err) + a.Equal(0, len(ad.AppParams)) - require.Equal(t, 1, len(ad.AppLocalStates)) + a.Equal(1, len(ad.AppLocalStates)) state, ok = ad.AppLocalStates[appIdx] - require.True(t, ok) - require.Equal(t, schema, state.Schema) - require.Equal(t, 1, len(state.KeyValue)) + a.True(ok) + a.Equal(schema, state.Schema) + a.Equal(1, len(state.KeyValue)) value, ok = state.KeyValue["counter"] - require.True(t, ok) - require.Equal(t, uint64(1), value.Uint) + a.True(ok) + a.Equal(uint64(1), value.Uint) - require.Equal(t, basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) + a.Equal(basics.MicroAlgos{Raw: 10000000000 - fee}, ad.MicroAlgos) app, err := client.ApplicationInformation(uint64(appIdx)) - require.NoError(t, err) - require.Equal(t, uint64(appIdx), app.Id) - require.Equal(t, creator, app.Params.Creator) + a.NoError(err) + a.Equal(uint64(appIdx), app.Id) + a.Equal(creator, app.Params.Creator) return } diff --git a/test/e2e-go/upgrades/rekey_support_test.go b/test/e2e-go/upgrades/rekey_support_test.go index 2cb85dfa40..e4e56c5bb0 100644 --- a/test/e2e-go/upgrades/rekey_support_test.go +++ b/test/e2e-go/upgrades/rekey_support_test.go @@ -29,7 +29,7 @@ import ( // TestRekeyUpgrade tests that we rekey does not work before the upgrade and works well after func TestRekeyUpgrade(t *testing.T) { - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) smallLambdaMs := 500 consensus := makeApplicationUpgradeConsensus(t) @@ -73,7 +73,7 @@ func TestRekeyUpgrade(t *testing.T) { a.NoError(err) _, err = client.BroadcastTransaction(rekey) a.Error(err) - require.Contains(t, err.Error(), "transaction has RekeyTo set but rekeying not yet enable") + a.Contains(err.Error(), "transaction has RekeyTo set but rekeying not yet enable") // use rekeyed key to authorize (AuthAddr check) tx.RekeyTo = basics.Address{} @@ -81,10 +81,10 @@ func TestRekeyUpgrade(t *testing.T) { a.NoError(err) _, err = client.BroadcastTransaction(rekeyed) a.Error(err) - require.Contains(t, err.Error(), "nonempty AuthAddr but rekeying not supported") + a.Contains(err.Error(), "nonempty AuthAddr but rekeying not supported") // go to upgrade curStatus, err := client.Status() - require.NoError(t, err) + a.NoError(err) initialStatus := curStatus startLoopTime := time.Now() @@ -92,21 +92,21 @@ func TestRekeyUpgrade(t *testing.T) { // wait until the network upgrade : this can take a while. for curStatus.LastVersion == initialStatus.LastVersion { curStatus, err = client.Status() - require.NoError(t, err) + a.NoError(err) - require.Less(t, int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) + a.Less(int64(time.Now().Sub(startLoopTime)), int64(3*time.Minute)) time.Sleep(time.Duration(smallLambdaMs) * time.Millisecond) round = curStatus.LastRound } // now, that we have upgraded to the new protocol which supports rekey, try again. _, err = client.BroadcastTransaction(rekey) - require.NoError(t, err) + a.NoError(err) round, err = client.CurrentRound() a.NoError(err) client.WaitForRound(round + 1) _, err = client.BroadcastTransaction(rekeyed) - require.NoError(t, err) + a.NoError(err) } diff --git a/test/e2e-go/upgrades/send_receive_upgrade_test.go b/test/e2e-go/upgrades/send_receive_upgrade_test.go index 548d1ff455..6ad81e8ffb 100644 --- a/test/e2e-go/upgrades/send_receive_upgrade_test.go +++ b/test/e2e-go/upgrades/send_receive_upgrade_test.go @@ -98,7 +98,7 @@ func generateFastUpgradeConsensus() (fastUpgradeProtocols config.ConsensusProtoc func testAccountsCanSendMoneyAcrossUpgrade(t *testing.T, templatePath string) { t.Parallel() - a := require.New(t) + a := require.New(fixtures.SynchronizedTest(t)) consensus := generateFastUpgradeConsensus() diff --git a/test/framework/fixtures/auctionFixture.go b/test/framework/fixtures/auctionFixture.go index e5ef6b5d65..2a88a6e1be 100644 --- a/test/framework/fixtures/auctionFixture.go +++ b/test/framework/fixtures/auctionFixture.go @@ -30,7 +30,6 @@ import ( "strings" "sync" "syscall" - "testing" "time" "github.com/stretchr/testify/assert" @@ -164,9 +163,9 @@ func (f *AuctionFixture) GetAuctionConsoleRestClient() auctionClient.ConsoleRest } // Setup is called to initialize the test fixture for the test(s), uses default ports for auction bank and console -func (f *AuctionFixture) Setup(t *testing.T, templateFile string) (err error) { +func (f *AuctionFixture) Setup(t TestingTB, templateFile string) (err error) { - f.t = t + f.t = SynchronizedTest(t) f.bidderSecretKeyCache = make(map[string]crypto.PrivateKey) diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go index 57687f46d3..5d34b0c2cf 100644 --- a/test/framework/fixtures/expectFixture.go +++ b/test/framework/fixtures/expectFixture.go @@ -104,9 +104,9 @@ func MakeExpectTest(t *testing.T) *ExpectFixture { } return nil }) - require.NoError(t, err) + require.NoError(SynchronizedTest(t), err) err = ef.initialize(t) - require.NoError(t, err) + require.NoError(SynchronizedTest(t), err) return ef } @@ -115,9 +115,10 @@ func (ef *ExpectFixture) Run() { for testName := range ef.expectFiles { if match, _ := regexp.MatchString(ef.testFilter, testName); match { ef.t.Run(testName, func(t *testing.T) { + syncTest := SynchronizedTest(t) workingDir, algoDir, err := ef.getTestDir(testName) - require.NoError(t, err) - t.Logf("algoDir: %s\ntestDataDir:%s\n", algoDir, ef.testDataDir) + require.NoError(SynchronizedTest(t), err) + syncTest.Logf("algoDir: %s\ntestDataDir:%s\n", algoDir, ef.testDataDir) cmd := exec.Command("expect", testName, algoDir, ef.testDataDir) var outBuf bytes.Buffer cmd.Stdout = &outBuf @@ -131,8 +132,8 @@ func (ef *ExpectFixture) Run() { // Using os.File as stderr does not trigger goroutine creation, instead exec.Cmd relies on os.File implementation. errFile, err := os.OpenFile(path.Join(workingDir, "stderr.txt"), os.O_CREATE|os.O_RDWR, 0) if err != nil { - t.Logf("failed opening stderr temp file: %s\n", err.Error()) - t.Fail() + syncTest.Logf("failed opening stderr temp file: %s\n", err.Error()) + syncTest.Fail() } defer errFile.Close() // Close might error but we Sync it before leaving the scope cmd.Stderr = errFile @@ -154,8 +155,8 @@ func (ef *ExpectFixture) Run() { if ferr != nil { stderr = ferr.Error() } - t.Logf("err running '%s': %s\nstdout: %s\nstderr: %s\n", testName, err, string(outBuf.Bytes()), stderr) - t.Fail() + syncTest.Logf("err running '%s': %s\nstdout: %s\nstderr: %s\n", testName, err, string(outBuf.Bytes()), stderr) + syncTest.Fail() } else { // t.Logf("stdout: %s", string(outBuf.Bytes())) ef.removeTestDir(workingDir) diff --git a/test/framework/fixtures/fixture.go b/test/framework/fixtures/fixture.go index 4bb1cf6254..2775a31d7c 100644 --- a/test/framework/fixtures/fixture.go +++ b/test/framework/fixtures/fixture.go @@ -16,18 +16,30 @@ package fixtures -import "testing" +import ( + "testing" -// TestingT captures the common methods of *testing.T and *testing.B -// that we use. -type TestingT interface { - Fatalf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) + "github.com/algorand/go-deadlock" +) + +// TestingTB is identical to testing.TB, beside the private method. +type TestingTB interface { + Cleanup(func()) Error(args ...interface{}) - Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fail() FailNow() Failed() bool + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Helper() + Log(args ...interface{}) + Logf(format string, args ...interface{}) Name() string + Skip(args ...interface{}) + SkipNow() + Skipf(format string, args ...interface{}) + Skipped() bool } // Fixture provides the base interface for all E2E test fixtures @@ -50,3 +62,110 @@ type Fixture interface { // (e.g. shared across all tests in a package) ShutdownImpl(preserveData bool) } + +var synchTestMu deadlock.Mutex +var synchTests = make(map[TestingTB]TestingTB) + +// SynchronizedTest generates a testing.TB compatible test for a given testing.TB interface. +// calling SynchronizedTest with the same tb would return the exact same instance of synchTest +func SynchronizedTest(tb TestingTB) TestingTB { + if st, ok := tb.(*synchTest); ok { + return st + } + synchTestMu.Lock() + defer synchTestMu.Unlock() + if t, have := synchTests[tb]; have { + return t + } + t := &synchTest{ + t: tb, + } + synchTests[tb] = t + return t +} + +type synchTest struct { + deadlock.Mutex + t TestingTB +} + +func (st *synchTest) Cleanup(f func()) { + st.Lock() + defer st.Unlock() + st.t.Cleanup(f) +} +func (st *synchTest) Error(args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Error(args...) +} +func (st *synchTest) Errorf(format string, args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Errorf(format, args...) +} +func (st *synchTest) Fail() { + st.Lock() + defer st.Unlock() + st.t.Fail() +} +func (st *synchTest) FailNow() { + st.Lock() + defer st.Unlock() + st.t.FailNow() +} +func (st *synchTest) Failed() bool { + st.Lock() + defer st.Unlock() + return st.t.Failed() +} +func (st *synchTest) Fatal(args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Fatal(args...) +} +func (st *synchTest) Fatalf(format string, args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Fatalf(format, args...) +} +func (st *synchTest) Helper() { + st.Lock() + defer st.Unlock() + st.t.Helper() +} +func (st *synchTest) Log(args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Log(args...) +} +func (st *synchTest) Logf(format string, args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Logf(format, args...) +} +func (st *synchTest) Name() string { + st.Lock() + defer st.Unlock() + return st.t.Name() +} +func (st *synchTest) Skip(args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Skip(args...) +} +func (st *synchTest) SkipNow() { + st.Lock() + defer st.Unlock() + st.t.SkipNow() +} +func (st *synchTest) Skipf(format string, args ...interface{}) { + st.Lock() + defer st.Unlock() + st.t.Skipf(format, args...) +} +func (st *synchTest) Skipped() bool { + st.Lock() + defer st.Unlock() + return st.t.Skipped() +} diff --git a/test/framework/fixtures/kmdFixture.go b/test/framework/fixtures/kmdFixture.go index 89cf1bbb73..1226bd090c 100644 --- a/test/framework/fixtures/kmdFixture.go +++ b/test/framework/fixtures/kmdFixture.go @@ -50,7 +50,7 @@ var defaultAPIToken = []byte(strings.Repeat("a", 64)) // KMDFixture is a test fixture for tests requiring interactions with kmd type KMDFixture struct { baseFixture - t TestingT + t TestingTB initialized bool dataDir string kmdDir string @@ -94,39 +94,39 @@ func (f *KMDFixture) ShutdownImpl(preserveData bool) { } // SetupWithWallet starts kmd and creates a wallet, returning a wallet handle -func (f *KMDFixture) SetupWithWallet(t TestingT) (handleToken string) { +func (f *KMDFixture) SetupWithWallet(t TestingTB) (handleToken string) { f.Setup(t) handleToken, _ = f.MakeWalletAndHandleToken() return } // Setup starts kmd with the default config -func (f *KMDFixture) Setup(t TestingT) { +func (f *KMDFixture) Setup(t TestingTB) { f.SetupWithConfig(t, "") } // Initialize initializes the dataDir and TestingT for this test but doesn't start kmd -func (f *KMDFixture) Initialize(t TestingT) { +func (f *KMDFixture) Initialize(t TestingTB) { f.initialize(f) - f.t = t + f.t = SynchronizedTest(t) f.dataDir = filepath.Join(f.testDir, t.Name()) // Remove any existing tests in this dataDir + recreate err := os.RemoveAll(f.dataDir) - require.NoError(t, err) + require.NoError(f.t, err) err = os.Mkdir(f.dataDir, 0750) - require.NoError(t, err) + require.NoError(f.t, err) // Set up the kmd data dir within the main datadir f.kmdDir = filepath.Join(f.dataDir, nodecontrol.DefaultKMDDataDir) err = os.Mkdir(f.kmdDir, nodecontrol.DefaultKMDDataDirPerms) - require.NoError(t, err) + require.NoError(f.t, err) } // SetupWithConfig starts a kmd node with the passed config or default test // config, if the passed config is blank. Though internally an error might // occur during setup, we never return one, because we'll still fail the test // for any errors here, and it keeps the test code much cleaner -func (f *KMDFixture) SetupWithConfig(t TestingT, config string) { +func (f *KMDFixture) SetupWithConfig(t TestingTB, config string) { // Setup is called once per test, so it's OK for test to store one particular TestingT f.Initialize(t) @@ -134,14 +134,14 @@ func (f *KMDFixture) SetupWithConfig(t TestingT, config string) { f.APIToken = defaultAPIToken tokenFilepath := filepath.Join(f.kmdDir, "kmd.token") err := ioutil.WriteFile(tokenFilepath, f.APIToken, 0640) - require.NoError(t, err) + require.NoError(f.t, err) if config == "" { config = defaultConfig } configFilepath := filepath.Join(f.kmdDir, "kmd_config.json") err = ioutil.WriteFile(configFilepath, []byte(config), 0640) - require.NoError(t, err) + require.NoError(f.t, err) // Start kmd nc := nodecontrol.MakeNodeController(f.binDir, f.dataDir) @@ -149,17 +149,17 @@ func (f *KMDFixture) SetupWithConfig(t TestingT, config string) { _, err = nc.StartKMD(nodecontrol.KMDStartArgs{ TimeoutSecs: defaultTimeoutSecs, }) - require.NoError(t, err) + require.NoError(f.t, err) // Mark ourselves as initialized so we know to shut down server f.initialized = true // Build a client sock, err := util.GetFirstLineFromFile(filepath.Join(f.kmdDir, "kmd.net")) - require.NoError(t, err) + require.NoError(f.t, err) f.Sock = sock client, err := client.MakeKMDClient(f.Sock, string(f.APIToken)) - require.NoError(t, err) + require.NoError(f.t, err) f.Client = &client } diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index 60bd6fc969..7adb8cd26e 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -50,7 +50,7 @@ type LibGoalFixture struct { rootDir string Name string network netdeploy.Network - t TestingT + t TestingTB tMu deadlock.RWMutex clientPartKeys map[string][]account.Participation consensus config.ConsensusProtocols @@ -63,13 +63,13 @@ func (f *RestClientFixture) SetConsensus(consensus config.ConsensusProtocols) { } // Setup is called to initialize the test fixture for the test(s) -func (f *LibGoalFixture) Setup(t TestingT, templateFile string) { +func (f *LibGoalFixture) Setup(t TestingTB, templateFile string) { f.setup(t, t.Name(), templateFile, true) } // SetupNoStart is called to initialize the test fixture for the test(s) // but does not start the network before returning. Call NC.Start() to start later. -func (f *LibGoalFixture) SetupNoStart(t TestingT, templateFile string) { +func (f *LibGoalFixture) SetupNoStart(t TestingTB, templateFile string) { f.setup(t, t.Name(), templateFile, false) } @@ -83,10 +83,10 @@ func (f *LibGoalFixture) Genesis() gen.GenesisData { return f.network.Genesis() } -func (f *LibGoalFixture) setup(test TestingT, testName string, templateFile string, startNetwork bool) { +func (f *LibGoalFixture) setup(test TestingTB, testName string, templateFile string, startNetwork bool) { // Call initialize for our base implementation f.initialize(f) - f.t = test + f.t = SynchronizedTest(test) f.rootDir = filepath.Join(f.testDir, testName) // In case we're running tests against the same rootDir, purge it to avoid errors from already-exists @@ -273,10 +273,10 @@ func (f *LibGoalFixture) Start() { // SetTestContext should be called within each test using a shared fixture. // It ensures the current test context is set and then reset after the test ends // It should be called in the form of "defer fixture.SetTestContext(t)()" -func (f *LibGoalFixture) SetTestContext(t TestingT) func() { +func (f *LibGoalFixture) SetTestContext(t TestingTB) func() { f.tMu.Lock() defer f.tMu.Unlock() - f.t = t + f.t = SynchronizedTest(t) return func() { f.tMu.Lock() defer f.tMu.Unlock() diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go index 9f57cd0469..a27a1a5b1e 100644 --- a/test/framework/fixtures/restClientFixture.go +++ b/test/framework/fixtures/restClientFixture.go @@ -39,14 +39,14 @@ type RestClientFixture struct { } // Setup is called to initialize the test fixture for the test(s) -func (f *RestClientFixture) Setup(t TestingT, templateFile string) { +func (f *RestClientFixture) Setup(t TestingTB, templateFile string) { f.LibGoalFixture.Setup(t, templateFile) f.AlgodClient = f.GetAlgodClientForController(f.NC) } // SetupNoStart is called to initialize the test fixture for the test(s) // but does not start the network before returning. Call NC.Start() to start later. -func (f *RestClientFixture) SetupNoStart(t TestingT, templateFile string) { +func (f *RestClientFixture) SetupNoStart(t TestingTB, templateFile string) { f.LibGoalFixture.SetupNoStart(t, templateFile) } From 0797ad1d8093a2998f56df9db2cf0987068e3016 Mon Sep 17 00:00:00 2001 From: nicholasguoalgorand <67928479+nicholasguoalgorand@users.noreply.github.com> Date: Wed, 31 Mar 2021 17:09:03 -0700 Subject: [PATCH 154/215] Add networking infrastructure for cancelling sends (#1966) ## Summary When we send a large message (ie a proposal payload), sometimes we realize after starting sending the message that we don't need to send the message (ie the proposal isn't the best one, the payload is malformed). Thus we wish to have support to be able to cancel sending the message after it is enqueued / starting to be sent. This pr adds the functionality of broadcasting an array of messages, and the ability to cancel sending after any subset of messages in the array is sent. --- agreement/gossip/networkFull_test.go | 26 +++--- network/ping.go | 3 +- network/wsNetwork.go | 69 ++++++++++---- network/wsNetwork_test.go | 130 +++++++++++++++++++++++++-- network/wsPeer.go | 103 ++++++++++++++++----- 5 files changed, 273 insertions(+), 58 deletions(-) diff --git a/agreement/gossip/networkFull_test.go b/agreement/gossip/networkFull_test.go index 2a2a0a84bd..54b7899b67 100644 --- a/agreement/gossip/networkFull_test.go +++ b/agreement/gossip/networkFull_test.go @@ -202,12 +202,12 @@ func testNetworkImplMixed(t *testing.T, nodesCount int) { nets, counters := spinNetwork(t, nodesCount) defer shutdownNetwork(nets, counters) - nets[0].broadcastTimeout(protocol.AgreementVoteTag, []byte{1}, testNetTimeout) - nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{1}, testNetTimeout) - nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{1}, testNetTimeout) - nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{1}, testNetTimeout) - nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{1}, testNetTimeout) - nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{1}, testNetTimeout) + nets[0].Broadcast(protocol.AgreementVoteTag, []byte{1}) + nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{1}) + nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{1}) + nets[0].Broadcast(protocol.VoteBundleTag, []byte{1}) + nets[0].Broadcast(protocol.VoteBundleTag, []byte{1}) + nets[0].Broadcast(protocol.VoteBundleTag, []byte{1}) for i, counter := range counters { if i != 0 { if !counter.verify(t, 1, 2, 3) { @@ -228,14 +228,14 @@ func testNetworkImplMixed2(t *testing.T, nodesCount int) { const loadSize = 12 for i := byte(0); i < loadSize; i++ { - ok := nets[0].broadcastTimeout(protocol.AgreementVoteTag, []byte{i}, testNetTimeout) + ok := nets[0].Broadcast(protocol.AgreementVoteTag, []byte{i}) assert.NoError(t, ok) if i%2 == 0 { - ok = nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{i}, testNetTimeout) + ok = nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{i}) assert.NoError(t, ok) } if i%4 == 0 { - ok = nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{i}, testNetTimeout) + ok = nets[0].Broadcast(protocol.VoteBundleTag, []byte{i}) assert.NoError(t, ok) } } @@ -266,14 +266,14 @@ func testNetworkImplReordered(t *testing.T, nodesCount int) { wg.Add(loadSize) for i := byte(0); i < loadSize; i++ { go func(i byte) { - ok := nets[0].broadcastTimeout(protocol.AgreementVoteTag, []byte{i}, testNetTimeout) + ok := nets[0].Broadcast(protocol.AgreementVoteTag, []byte{i}) assert.NoError(t, ok) if i%2 == 0 { - ok = nets[0].broadcastTimeout(protocol.ProposalPayloadTag, []byte{i}, testNetTimeout) + ok = nets[0].Broadcast(protocol.ProposalPayloadTag, []byte{i}) assert.NoError(t, ok) } if i%4 == 0 { - ok = nets[0].broadcastTimeout(protocol.VoteBundleTag, []byte{i}, testNetTimeout) + ok = nets[0].Broadcast(protocol.VoteBundleTag, []byte{i}) assert.NoError(t, ok) } wg.Done() @@ -323,7 +323,7 @@ func testNetworkImplRebroadcast(t *testing.T, nodesCount int) { rebroadcastNodes = 3 } for i := byte(0); i < byte(rebroadcastNodes); i++ { - ok := nets[i].broadcastTimeout(protocol.AgreementVoteTag, []byte{i, i + 1}, testNetTimeout) + ok := nets[i].Broadcast(protocol.AgreementVoteTag, []byte{i, i + 1}) assert.NoError(t, ok) } diff --git a/network/ping.go b/network/ping.go index 2fafb970f8..064bb5ff94 100644 --- a/network/ping.go +++ b/network/ping.go @@ -18,6 +18,7 @@ package network import ( "bytes" + "context" "time" "github.com/algorand/go-algorand/crypto" @@ -35,7 +36,7 @@ func pingHandler(message IncomingMessage) OutgoingMessage { copy(mbytes, tbytes) copy(mbytes[len(tbytes):], message.Data) var digest crypto.Digest // leave blank, ping message too short - peer.writeNonBlock(mbytes, false, digest, time.Now()) + peer.writeNonBlock(context.Background(), mbytes, false, digest, time.Now()) return OutgoingMessage{} } diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 38f961e754..4fa96d21f8 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -151,7 +151,9 @@ const ( type GossipNode interface { Address() (string, bool) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error + BroadcastArray(ctx context.Context, tag []protocol.Tag, data [][]byte, wait bool, except Peer) error Relay(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error + RelayArray(ctx context.Context, tag []protocol.Tag, data [][]byte, wait bool, except Peer) error Disconnect(badnode Peer) DisconnectPeers() Ready() chan struct{} @@ -231,8 +233,13 @@ type IncomingMessage struct { // Tag is a short string (2 bytes) marking a type of message type Tag = protocol.Tag -func highPriorityTag(tag protocol.Tag) bool { - return tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag +func highPriorityTag(tags []protocol.Tag) bool { + for _, tag := range tags { + if tag == protocol.AgreementVoteTag || tag == protocol.ProposalPayloadTag { + return true + } + } + return false } // OutgoingMessage represents a message we want to send. @@ -398,11 +405,12 @@ type WebsocketNetwork struct { } type broadcastRequest struct { - tag Tag - data []byte + tags []Tag + data [][]byte except *wsPeer done chan struct{} enqueueTime time.Time + ctx context.Context } // Address returns a string and whether that is a 'final' address or guessed. @@ -434,15 +442,30 @@ func (wn *WebsocketNetwork) PublicAddress() string { // Broadcast sends a message. // If except is not nil then we will not send it to that neighboring Peer. // if wait is true then the call blocks until the packet has actually been sent to all neighbors. -// TODO: add `priority` argument so that we don't have to guess it based on tag func (wn *WebsocketNetwork) Broadcast(ctx context.Context, tag protocol.Tag, data []byte, wait bool, except Peer) error { - request := broadcastRequest{tag: tag, data: data, enqueueTime: time.Now()} + dataArray := make([][]byte, 1, 1) + dataArray[0] = data + tagArray := make([]protocol.Tag, 1, 1) + tagArray[0] = tag + return wn.BroadcastArray(ctx, tagArray, dataArray, wait, except) +} + +// BroadcastArray sends an array of messages. +// If except is not nil then we will not send it to that neighboring Peer. +// if wait is true then the call blocks until the packet has actually been sent to all neighbors. +// TODO: add `priority` argument so that we don't have to guess it based on tag +func (wn *WebsocketNetwork) BroadcastArray(ctx context.Context, tags []protocol.Tag, data [][]byte, wait bool, except Peer) error { + if len(tags) != len(data) { + return errBcastInvalidArray + } + + request := broadcastRequest{tags: tags, data: data, enqueueTime: time.Now(), ctx: ctx} if except != nil { request.except = except.(*wsPeer) } broadcastQueue := wn.broadcastQueueBulk - if highPriorityTag(tag) { + if highPriorityTag(tags) { broadcastQueue = wn.broadcastQueueHighPrio } if wait { @@ -487,6 +510,14 @@ func (wn *WebsocketNetwork) Relay(ctx context.Context, tag protocol.Tag, data [] return nil } +// RelayArray relays array of messages +func (wn *WebsocketNetwork) RelayArray(ctx context.Context, tags []protocol.Tag, data [][]byte, wait bool, except Peer) error { + if wn.relayMessages { + return wn.BroadcastArray(ctx, tags, data, wait, except) + } + return nil +} + func (wn *WebsocketNetwork) disconnectThread(badnode Peer, reason disconnectReason) { defer wn.wg.Done() wn.disconnect(badnode, reason) @@ -1336,14 +1367,18 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool, } start := time.Now() - tbytes := []byte(request.tag) - mbytes := make([]byte, len(tbytes)+len(request.data)) - copy(mbytes, tbytes) - copy(mbytes[len(tbytes):], request.data) - var digest crypto.Digest - if request.tag != protocol.MsgDigestSkipTag && len(request.data) >= messageFilterSize { - digest = crypto.Hash(mbytes) + digests := make([]crypto.Digest, len(request.data), len(request.data)) + data := make([][]byte, len(request.data), len(request.data)) + for i, d := range request.data { + tbytes := []byte(request.tags[i]) + mbytes := make([]byte, len(tbytes)+len(d)) + copy(mbytes, tbytes) + copy(mbytes[len(tbytes):], d) + data[i] = mbytes + if request.tags[i] != protocol.MsgDigestSkipTag && len(d) >= messageFilterSize { + digests[i] = crypto.Hash(mbytes) + } } // first send to all the easy outbound peers who don't block, get them started. @@ -1355,7 +1390,7 @@ func (wn *WebsocketNetwork) innerBroadcast(request broadcastRequest, prio bool, if peer == request.except { continue } - ok := peer.writeNonBlock(mbytes, prio, digest, request.enqueueTime) + ok := peer.writeNonBlockMsgs(request.ctx, data, prio, digests, request.enqueueTime) if ok { sentMessageCount++ continue @@ -1846,6 +1881,8 @@ var errNetworkClosing = errors.New("WebsocketNetwork shutting down") var errBcastCallerCancel = errors.New("caller cancelled broadcast") +var errBcastInvalidArray = errors.New("invalid broadcast array") + var errBcastQFull = errors.New("broadcast queue full") // HostColonPortPattern matches "^[^:]+:\\d+$" e.g. "foo.com.:1234" @@ -2052,7 +2089,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) { resp := wn.prioScheme.MakePrioResponse(challenge) if resp != nil { mbytes := append([]byte(protocol.NetPrioResponseTag), resp...) - sent := peer.writeNonBlock(mbytes, true, crypto.Digest{}, time.Now()) + sent := peer.writeNonBlock(context.Background(), mbytes, true, crypto.Digest{}, time.Now()) if !sent { wn.log.With("remote", addr).With("local", localAddr).Warnf("could not send priority response to %v", addr) } diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go index 1e37d06619..a97293e4ba 100644 --- a/network/wsNetwork_test.go +++ b/network/wsNetwork_test.go @@ -325,6 +325,122 @@ func TestWebsocketPeerData(t *testing.T) { require.Equal(t, nil, netA.GetPeerData(peerB, "foo")) } +// Test sending array of messages +func TestWebsocketNetworkArray(t *testing.T) { + netA := makeTestWebsocketNode(t) + netA.config.GossipFanout = 1 + netA.Start() + defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }() + netB := makeTestWebsocketNode(t) + netB.config.GossipFanout = 1 + addrA, postListen := netA.Address() + require.True(t, postListen) + t.Log(addrA) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.Start() + defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() + counter := newMessageCounter(t, 3) + counterDone := counter.done + netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) + + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") + + tags := []protocol.Tag{protocol.TxnTag, protocol.TxnTag, protocol.TxnTag} + data := [][]byte{[]byte("foo"), []byte("bar"), []byte("algo")} + netA.BroadcastArray(context.Background(), tags, data, false, nil) + + select { + case <-counterDone: + case <-time.After(2 * time.Second): + t.Errorf("timeout, count=%d, wanted 2", counter.count) + } +} + +// Test cancelling message sends +func TestWebsocketNetworkCancel(t *testing.T) { + netA := makeTestWebsocketNode(t) + netA.config.GossipFanout = 1 + netA.Start() + defer func() { t.Log("stopping A"); netA.Stop(); t.Log("A done") }() + netB := makeTestWebsocketNode(t) + netB.config.GossipFanout = 1 + addrA, postListen := netA.Address() + require.True(t, postListen) + t.Log(addrA) + netB.phonebook.ReplacePeerList([]string{addrA}, "default", PhoneBookEntryRelayRole) + netB.Start() + defer func() { t.Log("stopping B"); netB.Stop(); t.Log("B done") }() + counter := newMessageCounter(t, 100) + counterDone := counter.done + netB.RegisterHandlers([]TaggedMessageHandler{{Tag: protocol.TxnTag, MessageHandler: counter}}) + + readyTimeout := time.NewTimer(2 * time.Second) + waitReady(t, netA, readyTimeout.C) + t.Log("a ready") + waitReady(t, netB, readyTimeout.C) + t.Log("b ready") + + tags := make([]protocol.Tag, 100) + data := make([][]byte, 100) + for i := range data { + tags[i] = protocol.TxnTag + data[i] = []byte(string(i)) + } + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + + // try calling BroadcastArray + netA.BroadcastArray(ctx, tags, data, true, nil) + + select { + case <-counterDone: + t.Errorf("All messages were sent, send not cancelled") + case <-time.After(2 * time.Second): + } + assert.Equal(t, 0, counter.Count()) + + // try calling innerBroadcast + request := broadcastRequest{tags: tags, data: data, enqueueTime: time.Now(), ctx: ctx} + peers, _ := netA.peerSnapshot([]*wsPeer{}) + netA.innerBroadcast(request, true, peers) + + select { + case <-counterDone: + t.Errorf("All messages were sent, send not cancelled") + case <-time.After(2 * time.Second): + } + assert.Equal(t, 0, counter.Count()) + + // try calling writeLoopSend + msgs := make([]sendMessage, 0, len(data)) + enqueueTime := time.Now() + for i, msg := range data { + tbytes := []byte(tags[i]) + mbytes := make([]byte, len(tbytes)+len(msg)) + copy(mbytes, tbytes) + copy(mbytes[len(tbytes):], msg) + msgs = append(msgs, sendMessage{data: mbytes, enqueued: time.Now(), peerEnqueued: enqueueTime, hash: crypto.Hash(mbytes), ctx: context.Background()}) + } + + msgs[50].ctx = ctx + + for _, peer := range peers { + peer.sendBufferHighPrio <- sendMessages{msgs} + } + + select { + case <-counterDone: + t.Errorf("All messages were sent, send not cancelled") + case <-time.After(2 * time.Second): + } + assert.Equal(t, 50, counter.Count()) +} + // Set up two nodes, test that a.Broadcast is received by B, when B has no address. func TestWebsocketNetworkNoAddress(t *testing.T) { netA := makeTestWebsocketNode(t) @@ -657,8 +773,8 @@ func TestSlowOutboundPeer(t *testing.T) { for i := range destPeers { destPeers[i].closing = make(chan struct{}) destPeers[i].net = node - destPeers[i].sendBufferHighPrio = make(chan sendMessage, sendBufferLength) - destPeers[i].sendBufferBulk = make(chan sendMessage, sendBufferLength) + destPeers[i].sendBufferHighPrio = make(chan sendMessages, sendBufferLength) + destPeers[i].sendBufferBulk = make(chan sendMessages, sendBufferLength) destPeers[i].conn = &nopConnSingleton destPeers[i].rootURL = fmt.Sprintf("fake %d", i) node.addPeer(&destPeers[i]) @@ -795,7 +911,7 @@ func TestDupFilter(t *testing.T) { rand.Read(msg) t.Log("A send, C non-dup-send") netA.Broadcast(context.Background(), debugTag2, msg, true, nil) - // B should broadcast its non-desire to recieve the message again + // B should broadcast its non-desire to receive the message again time.Sleep(500 * time.Millisecond) // C should now not send these @@ -1179,10 +1295,14 @@ func TestWebsocketNetwork_checkServerResponseVariables(t *testing.T) { } func (wn *WebsocketNetwork) broadcastWithTimestamp(tag protocol.Tag, data []byte, when time.Time) error { - request := broadcastRequest{tag: tag, data: data, enqueueTime: when} + msgArr := make([][]byte, 1, 1) + msgArr[0] = data + tagArr := make([]protocol.Tag, 1, 1) + tagArr[0] = tag + request := broadcastRequest{tags: tagArr, data: msgArr, enqueueTime: when, ctx: context.Background()} broadcastQueue := wn.broadcastQueueBulk - if highPriorityTag(tag) { + if highPriorityTag(tagArr) { broadcastQueue = wn.broadcastQueueHighPrio } // no wait diff --git a/network/wsPeer.go b/network/wsPeer.go index 27e109e44e..9e665a2c77 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -96,7 +96,9 @@ type sendMessage struct { data []byte enqueued time.Time // the time at which the message was first generated peerEnqueued time.Time // the time at which the peer was attempting to enqueue the message - msgTags map[protocol.Tag]bool // when msgTags is speficied ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message. + msgTags map[protocol.Tag]bool // when msgTags is specified ( i.e. non-nil ), the send goroutine is to replace the message tag filter with this one. No data would be accompanied to this message. + hash crypto.Digest + ctx context.Context } // wsPeerCore also works for non-connected peers we want to do HTTP GET from @@ -126,6 +128,10 @@ type Response struct { Topics Topics } +type sendMessages struct { + msgs []sendMessage +} + type wsPeer struct { // lastPacketTime contains the UnixNano at the last time a successful communication was made with the peer. // "successful communication" above refers to either reading from or writing to a connection without receiving any @@ -147,8 +153,8 @@ type wsPeer struct { closing chan struct{} - sendBufferHighPrio chan sendMessage - sendBufferBulk chan sendMessage + sendBufferHighPrio chan sendMessages + sendBufferBulk chan sendMessages wg sync.WaitGroup @@ -278,7 +284,7 @@ func (wp *wsPeer) Unicast(ctx context.Context, msg []byte, tag protocol.Tag) err digest = crypto.Hash(mbytes) } - ok := wp.writeNonBlock(mbytes, false, digest, time.Now()) + ok := wp.writeNonBlock(ctx, mbytes, false, digest, time.Now()) if !ok { networkBroadcastsDropped.Inc(nil) err = fmt.Errorf("wsPeer failed to unicast: %v", wp.GetAddress()) @@ -302,11 +308,16 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseT serializedMsg := responseTopics.MarshallTopics() // Send serializedMsg - select { - case wp.sendBufferBulk <- sendMessage{ + msg := make([]sendMessage, 1, 1) + msg[0] = sendMessage{ data: append([]byte(protocol.TopicMsgRespTag), serializedMsg...), enqueued: time.Now(), - peerEnqueued: time.Now()}: + peerEnqueued: time.Now(), + ctx: context.Background(), + } + + select { + case wp.sendBufferBulk <- sendMessages{msgs: msg}: case <-wp.closing: wp.net.log.Debugf("peer closing %s", wp.conn.RemoteAddr().String()) return @@ -320,8 +331,8 @@ func (wp *wsPeer) Respond(ctx context.Context, reqMsg IncomingMessage, responseT func (wp *wsPeer) init(config config.Local, sendBufferLength int) { wp.net.log.Debugf("wsPeer init outgoing=%v %#v", wp.outgoing, wp.rootURL) wp.closing = make(chan struct{}) - wp.sendBufferHighPrio = make(chan sendMessage, sendBufferLength) - wp.sendBufferBulk = make(chan sendMessage, sendBufferLength) + wp.sendBufferHighPrio = make(chan sendMessages, sendBufferLength) + wp.sendBufferBulk = make(chan sendMessages, sendBufferLength) atomic.StoreInt64(&wp.lastPacketTime, time.Now().UnixNano()) wp.responseChannels = make(map[uint64]chan *Response) wp.sendMessageTag = defaultSendMessageTags @@ -498,12 +509,15 @@ func (wp *wsPeer) handleMessageOfInterest(msg IncomingMessage) (shutdown bool) { wp.net.log.Warnf("wsPeer handleMessageOfInterest: could not unmarshall message from: %s %v", wp.conn.RemoteAddr().String(), err) return } - sm := sendMessage{ + msgs := make([]sendMessage, 1, 1) + msgs[0] = sendMessage{ data: nil, enqueued: time.Now(), peerEnqueued: time.Now(), msgTags: msgTagsMap, + ctx: context.Background(), } + sm := sendMessages{msgs: msgs} // try to send the message to the send loop. The send loop will store the message locally and would use it. // the rationale here is that this message is rarely sent, and we would benefit from having it being lock-free. @@ -546,7 +560,24 @@ func (wp *wsPeer) handleFilterMessage(msg IncomingMessage) { wp.outgoingMsgFilter.CheckDigest(digest, true, true) } -func (wp *wsPeer) writeLoopSend(msg sendMessage) disconnectReason { +func (wp *wsPeer) writeLoopSend(msgs sendMessages) disconnectReason { + for _, msg := range msgs.msgs { + select { + case <-msg.ctx.Done(): + //logging.Base().Infof("cancelled large send, msg %v out of %v", i, len(msgs.msgs)) + return disconnectReasonNone + default: + } + + if err := wp.writeLoopSendMsg(msg); err != disconnectReasonNone { + return err + } + } + + return disconnectReasonNone +} + +func (wp *wsPeer) writeLoopSendMsg(msg sendMessage) disconnectReason { if len(msg.data) > maxMessageLength { wp.net.log.Errorf("trying to send a message longer than we would recieve: %d > %d tag=%s", len(msg.data), maxMessageLength, string(msg.data[0:2])) // just drop it, don't break the connection @@ -630,24 +661,47 @@ func (wp *wsPeer) writeLoopCleanup(reason disconnectReason) { wp.wg.Done() } +func (wp *wsPeer) writeNonBlock(ctx context.Context, data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time) bool { + msgs := make([][]byte, 1, 1) + digests := make([]crypto.Digest, 1, 1) + msgs[0] = data + digests[0] = digest + return wp.writeNonBlockMsgs(ctx, msgs, highPrio, digests, msgEnqueueTime) +} + // return true if enqueued/sent -func (wp *wsPeer) writeNonBlock(data []byte, highPrio bool, digest crypto.Digest, msgEnqueueTime time.Time) bool { - if wp.outgoingMsgFilter != nil && len(data) > messageFilterSize && wp.outgoingMsgFilter.CheckDigest(digest, false, false) { - //wp.net.log.Debugf("msg drop as outbound dup %s(%d) %v", string(data[:2]), len(data)-2, digest) - // peer has notified us it doesn't need this message - outgoingNetworkMessageFilteredOutTotal.Inc(nil) - outgoingNetworkMessageFilteredOutBytesTotal.AddUint64(uint64(len(data)), nil) +func (wp *wsPeer) writeNonBlockMsgs(ctx context.Context, data [][]byte, highPrio bool, digest []crypto.Digest, msgEnqueueTime time.Time) bool { + includeIndices := make([]int, 0, len(data)) + for i := range data { + if wp.outgoingMsgFilter != nil && len(data[i]) > messageFilterSize && wp.outgoingMsgFilter.CheckDigest(digest[i], false, false) { + //wp.net.log.Debugf("msg drop as outbound dup %s(%d) %v", string(data[:2]), len(data)-2, digest) + // peer has notified us it doesn't need this message + outgoingNetworkMessageFilteredOutTotal.Inc(nil) + outgoingNetworkMessageFilteredOutBytesTotal.AddUint64(uint64(len(data)), nil) + } else { + includeIndices = append(includeIndices, i) + } + } + if len(includeIndices) == 0 { // returning true because it is as good as sent, the peer already has it. return true } - var outchan chan sendMessage + + var outchan chan sendMessages + + msgs := make([]sendMessage, 0, len(includeIndices)) + enqueueTime := time.Now() + for _, index := range includeIndices { + msgs = append(msgs, sendMessage{data: data[index], enqueued: msgEnqueueTime, peerEnqueued: enqueueTime, hash: digest[index], ctx: ctx}) + } + if highPrio { outchan = wp.sendBufferHighPrio } else { outchan = wp.sendBufferBulk } select { - case outchan <- sendMessage{data: data, enqueued: msgEnqueueTime, peerEnqueued: time.Now()}: + case outchan <- sendMessages{msgs: msgs}: return true default: } @@ -672,7 +726,7 @@ func (wp *wsPeer) sendPing() bool { copy(mbytes, tagBytes) crypto.RandBytes(mbytes[len(tagBytes):]) wp.pingData = mbytes[len(tagBytes):] - sent := wp.writeNonBlock(mbytes, false, crypto.Digest{}, time.Now()) + sent := wp.writeNonBlock(context.Background(), mbytes, false, crypto.Digest{}, time.Now()) if sent { wp.pingInFlight = true @@ -759,11 +813,14 @@ func (wp *wsPeer) Request(ctx context.Context, tag Tag, topics Topics) (resp *Re defer wp.getAndRemoveResponseChannel(hash) // Send serializedMsg - select { - case wp.sendBufferBulk <- sendMessage{ + msg := make([]sendMessage, 1, 1) + msg[0] = sendMessage{ data: append([]byte(tag), serializedMsg...), enqueued: time.Now(), - peerEnqueued: time.Now()}: + peerEnqueued: time.Now(), + ctx: context.Background()} + select { + case wp.sendBufferBulk <- sendMessages{msgs: msg}: case <-wp.closing: e = fmt.Errorf("peer closing %s", wp.conn.RemoteAddr().String()) return From 653cef270cf23b0d73f1ccf4f86fe5a4bb167893 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Wed, 31 Mar 2021 20:36:53 -0400 Subject: [PATCH 155/215] Pingpong nft (#2007) Add pingpong mode to create lots of assets each with total amount 1 (aka NFTs) --- cmd/pingpong/runCmd.go | 10 +- shared/pingpong/accounts.go | 6 +- shared/pingpong/config.go | 8 + shared/pingpong/pingpong.go | 257 +++++++++++++++--- .../cc_agent/component/pingPongComponent.go | 8 +- 5 files changed, 248 insertions(+), 41 deletions(-) diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go index c1104d5d6f..ed834f4add 100644 --- a/cmd/pingpong/runCmd.go +++ b/cmd/pingpong/runCmd.go @@ -66,6 +66,7 @@ var appProgGlobKeys uint32 var appProgLocalKeys uint32 var duration uint32 var rekey bool +var nftAsaPerSecond uint32 func init() { rootCmd.AddCommand(runCmd) @@ -105,6 +106,7 @@ func init() { runCmd.Flags().BoolVar(&randomLease, "randomlease", false, "set the lease to contain a random value") runCmd.Flags().BoolVar(&rekey, "rekey", false, "Create RekeyTo transactions. Requires groupsize=2 and any of random flags exc random dst") runCmd.Flags().Uint32Var(&duration, "duration", 0, "The number of seconds to run the pingpong test, forever if 0") + runCmd.Flags().Uint32Var(&nftAsaPerSecond, "nftasapersecond", 0, "The number of NFT-style ASAs to create per second") } @@ -302,11 +304,15 @@ var runCmd = &cobra.Command{ } } + cfg.NftAsaPerSecond = nftAsaPerSecond + reportInfof("Preparing to initialize PingPong with config:\n") cfg.Dump(os.Stdout) + pps := pingpong.NewPingpong(cfg) + // Initialize accounts if necessary - accounts, cinfo, cfg, err := pingpong.PrepareAccounts(ac, cfg) + err = pps.PrepareAccounts(ac) if err != nil { reportErrorf("Error preparing accounts for transfers: %v\n", err) } @@ -319,7 +325,7 @@ var runCmd = &cobra.Command{ cfg.Dump(os.Stdout) // Kick off the real processing - pingpong.RunPingPong(context.Background(), ac, accounts, cinfo, cfg) + pps.RunPingPong(context.Background(), ac) }, } diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go index 877994f9d1..a58aa4ce6a 100644 --- a/shared/pingpong/accounts.go +++ b/shared/pingpong/accounts.go @@ -130,7 +130,9 @@ func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64 // Step 1) Create X assets for each of the participant accounts // Step 2) For each participant account, opt-in to assets of all other participant accounts // Step 3) Evenly distribute the assets across all participant accounts -func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConfig) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) { +func (pps *WorkerState) prepareAssets(assetAccounts map[string]uint64, client libgoal.Client) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) { + accounts := assetAccounts + cfg := pps.cfg proto, err := getProto(client) if err != nil { return @@ -353,7 +355,7 @@ func prepareAssets(accounts map[string]uint64, client libgoal.Client, cfg PpConf fmt.Printf("Distributing assets from %v to %v \n", creator, addr) } - tx, sendErr := constructTxn(creator, addr, cfg.MaxFee, assetAmt, k, CreatablesInfo{}, client, cfg) + tx, sendErr := pps.constructTxn(creator, addr, cfg.MaxFee, assetAmt, k, client) if sendErr != nil { fmt.Printf("Cannot transfer asset %v from account %v\n", k, creator) err = sendErr diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go index 48b3586e4e..21841ed206 100644 --- a/shared/pingpong/config.go +++ b/shared/pingpong/config.go @@ -61,6 +61,11 @@ type PpConfig struct { AppLocalKeys uint32 Rekey bool MaxRuntime time.Duration + + // asset spam; make lots of NFT ASAs + NftAsaPerSecond uint32 // e.g. 100 + NftAsaPerAccount uint32 // 0..999 + NftAsaAccountInFlight uint32 } // DefaultConfig object for Ping Pong @@ -88,6 +93,9 @@ var DefaultConfig = PpConfig{ AppProgHashSize: "sha256", Rekey: false, MaxRuntime: 0, + + NftAsaAccountInFlight: 5, + NftAsaPerAccount: 900, } // LoadConfigFromFile reads and loads Ping Pong configuration diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go index 0ffc97af29..81feac1cf1 100644 --- a/shared/pingpong/pingpong.go +++ b/shared/pingpong/pingpong.go @@ -24,6 +24,7 @@ import ( "os" "time" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1" "github.com/algorand/go-algorand/data/basics" @@ -38,14 +39,25 @@ type CreatablesInfo struct { OptIns map[uint64][]string } +// WorkerState object holds a running pingpong worker +type WorkerState struct { + cfg PpConfig + accounts map[string]uint64 + cinfo CreatablesInfo + + nftStartTime int64 + localNftIndex uint64 + nftHolders map[string]int +} + // PrepareAccounts to set up accounts and asset accounts required for Ping Pong run -func PrepareAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]uint64, cinfo CreatablesInfo, cfg PpConfig, err error) { - cfg = initCfg - accounts, cfg, err = ensureAccounts(ac, cfg) +func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) { + pps.accounts, pps.cfg, err = ensureAccounts(ac, pps.cfg) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "ensure accounts failed %v\n", err) return } + cfg := pps.cfg wallet, walletErr := ac.GetUnencryptedWalletHandle() if err != nil { @@ -58,48 +70,49 @@ func PrepareAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]u cfg.MaxAmt = 0 var assetAccounts map[string]uint64 - assetAccounts, err = prepareNewAccounts(ac, cfg, wallet, accounts) + assetAccounts, err = prepareNewAccounts(ac, cfg, wallet, pps.accounts) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err) return } - cinfo.AssetParams, cinfo.OptIns, err = prepareAssets(assetAccounts, ac, cfg) + pps.cinfo.AssetParams, pps.cinfo.OptIns, err = pps.prepareAssets(assetAccounts, ac) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "prepare assets failed %v\n", err) return } if !cfg.Quiet { - for addr := range accounts { - fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, accounts[addr]) + for addr := range pps.accounts { + fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr]) } } } else if cfg.NumApp > 0 { var appAccounts map[string]uint64 - appAccounts, err = prepareNewAccounts(ac, cfg, wallet, accounts) + appAccounts, err = prepareNewAccounts(ac, cfg, wallet, pps.accounts) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err) return } - cinfo.AppParams, cinfo.OptIns, err = prepareApps(appAccounts, ac, cfg) + pps.cinfo.AppParams, pps.cinfo.OptIns, err = prepareApps(appAccounts, ac, cfg) if err != nil { return } if !cfg.Quiet { - for addr := range accounts { - fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, accounts[addr]) + for addr := range pps.accounts { + fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr]) } } } else { - err = fundAccounts(accounts, ac, cfg) + err = fundAccounts(pps.accounts, ac, cfg) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err) return } } + pps.cfg = cfg return } @@ -268,8 +281,10 @@ func listSufficientAccounts(accounts map[string]uint64, minimumAmount uint64, ex return out } +var logPeriod = 5 * time.Second + // RunPingPong starts ping pong process -func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uint64, cinfo CreatablesInfo, cfg PpConfig) { +func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) { // Infinite loop given: // - accounts -> map of accounts to include in transfers (including src account, which we don't want to use) // - cfg -> configuration for how to proceed @@ -286,6 +301,7 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin // error = fundAccounts() // } + cfg := pps.cfg var runTime time.Duration if cfg.RunTime > 0 { runTime = cfg.RunTime @@ -299,6 +315,14 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin restTime := cfg.RestTime refreshTime := time.Now().Add(cfg.RefreshTime) + var nftThrottler *throttler + if pps.cfg.NftAsaPerSecond > 0 { + nftThrottler = newThrottler(20, float64(pps.cfg.NftAsaPerSecond)) + } + + lastLog := time.Now() + nextLog := lastLog.Add(logPeriod) + for { if ctx.Err() != nil { _, _ = fmt.Fprintf(os.Stderr, "error bad context in RunPingPong: %v\n", ctx.Err()) @@ -307,22 +331,46 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin startTime := time.Now() stopTime := startTime.Add(runTime) - var totalSent, totalSucceeded uint64 - for !time.Now().After(stopTime) { + var totalSent, totalSucceeded, lastTotalSent uint64 + for { + now := time.Now() + if now.After(stopTime) { + break + } + if now.After(nextLog) { + dt := now.Sub(lastLog) + fmt.Printf("%d sent, %0.2f/s (%d total)\n", totalSent-lastTotalSent, float64(totalSent-lastTotalSent)/dt.Seconds(), totalSent) + lastTotalSent = totalSent + for now.After(nextLog) { + nextLog = nextLog.Add(logPeriod) + } + lastLog = now + } + if cfg.MaxRuntime > 0 && time.Now().After(endTime) { fmt.Printf("Terminating after max run time of %.f seconds\n", cfg.MaxRuntime.Seconds()) return } + if pps.cfg.NftAsaPerSecond > 0 { + sent, err := pps.makeNftTraffic(ac) + if err != nil { + fmt.Fprintf(os.Stderr, "error sending nft transactions: %v\n", err) + } + nftThrottler.maybeSleep(int(sent)) + totalSent += sent + continue + } + minimumAmount := cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*2 - fromList := listSufficientAccounts(accounts, minimumAmount, cfg.SrcAccount) + fromList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount) // in group tests txns are sent back and forth, so both parties need funds if cfg.GroupSize == 1 { minimumAmount = 0 } - toList := listSufficientAccounts(accounts, minimumAmount, cfg.SrcAccount) + toList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount) - sent, succeeded, err := sendFromTo(fromList, toList, accounts, cinfo, ac, cfg) + sent, succeeded, err := pps.sendFromTo(fromList, toList, ac) totalSent += sent totalSucceeded += succeeded if err != nil { @@ -330,7 +378,7 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin } if cfg.RefreshTime > 0 && time.Now().After(refreshTime) { - err = refreshAccounts(accounts, ac, cfg) + err = refreshAccounts(pps.accounts, ac, cfg) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "error refreshing: %v\n", err) } @@ -350,6 +398,11 @@ func RunPingPong(ctx context.Context, ac libgoal.Client, accounts map[string]uin } } +// NewPingpong creates a new pingpong WorkerState +func NewPingpong(cfg PpConfig) *WorkerState { + return &WorkerState{cfg: cfg, nftHolders: make(map[string]int)} +} + func getCreatableID(cfg PpConfig, cinfo CreatablesInfo) (aidx uint64) { if cfg.NumAsset > 0 { rindex := rand.Intn(len(cinfo.AssetParams)) @@ -375,14 +428,96 @@ func getCreatableID(cfg PpConfig, cinfo CreatablesInfo) (aidx uint64) { return } -func sendFromTo( - fromList, toList []string, accounts map[string]uint64, - cinfo CreatablesInfo, - client libgoal.Client, cfg PpConfig, +func (pps *WorkerState) fee() uint64 { + cfg := pps.cfg + fee := cfg.MaxFee + if cfg.RandomizeFee { + fee = rand.Uint64()%(cfg.MaxFee-cfg.MinFee) + cfg.MinFee + } + return fee +} + +func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64, err error) { + fee := pps.fee() + if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) { + var addr string + var wallet []byte + wallet, err = client.GetUnencryptedWalletHandle() + if err != nil { + return + } + addr, err = client.GenerateAddress(wallet) + if err != nil { + return + } + fmt.Printf("new NFT holder %s\n", addr) + var proto config.ConsensusParams + proto, err = getProto(client) + if err != nil { + return + } + // enough for the per-asa minbalance and more than enough for the txns to create them + toSend := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2 + pps.nftHolders[addr] = 0 + _, err = sendPaymentFromUnencryptedWallet(client, pps.cfg.SrcAccount, addr, fee, toSend, nil) + if err != nil { + return + } + sentCount++ + // we ran one txn above already to fund the new addr, + // we'll run a second txn below + } + // pick a random sender from nft holder sub accounts + pick := rand.Intn(len(pps.nftHolders)) + pos := 0 + var sender string + var senderNftCount int + for addr, nftCount := range pps.nftHolders { + sender = addr + senderNftCount = nftCount + if pos == pick { + break + } + pos++ + + } + var meta [32]byte + rand.Read(meta[:]) + assetName := pps.nftSpamAssetName() + const totalSupply = 1 + txn, err := client.MakeUnsignedAssetCreateTx(totalSupply, false, sender, sender, sender, sender, "ping", assetName, "", meta[:], 0) + if err != nil { + fmt.Printf("Cannot make asset create txn with meta %v\n", meta) + return + } + txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, pps.cfg.MaxFee, txn) + if err != nil { + fmt.Printf("Cannot fill asset creation txn\n") + return + } + if senderNftCount+1 >= int(pps.cfg.NftAsaPerAccount) { + delete(pps.nftHolders, sender) + } else { + pps.nftHolders[sender] = senderNftCount + 1 + } + stxn, err := signTxn(sender, txn, client, pps.cfg) + if err != nil { + return + } + sentCount++ + _, err = client.BroadcastTransaction(stxn) + return +} + +func (pps *WorkerState) sendFromTo( + fromList, toList []string, + client libgoal.Client, ) (sentCount, successCount uint64, err error) { + accounts := pps.accounts + cinfo := pps.cinfo + cfg := pps.cfg amt := cfg.MaxAmt - fee := cfg.MaxFee assetsByCreator := make(map[string][]*v1.AssetParams) for _, p := range cinfo.AssetParams { @@ -394,9 +529,7 @@ func sendFromTo( amt = rand.Uint64()%cfg.MaxAmt + 1 } - if cfg.RandomizeFee { - fee = rand.Uint64()%(cfg.MaxFee-cfg.MinFee) + cfg.MinFee - } + fee := pps.fee() to := toList[i] if cfg.RandomizeDst { @@ -417,7 +550,7 @@ func sendFromTo( // generate random assetID or appId if we send asset/app txns aidx := getCreatableID(cfg, cinfo) // Construct single txn - txn, consErr := constructTxn(from, to, fee, amt, aidx, cinfo, client, cfg) + txn, consErr := pps.constructTxn(from, to, fee, amt, aidx, client) if consErr != nil { err = consErr _, _ = fmt.Fprintf(os.Stderr, "constructTxn failed: %v\n", err) @@ -460,17 +593,17 @@ func sendFromTo( var txn transactions.Transaction var signer string if j%2 == 0 { - txn, err = constructTxn(from, to, fee, amt, 0, cinfo, client, cfg) + txn, err = pps.constructTxn(from, to, fee, amt, 0, client) fromBalanceChange -= int64(txn.Fee.Raw + amt) toBalanceChange += int64(amt) signer = from } else if cfg.GroupSize == 2 && cfg.Rekey { - txn, err = constructTxn(from, to, fee, amt, 0, cinfo, client, cfg) + txn, err = pps.constructTxn(from, to, fee, amt, 0, client) fromBalanceChange -= int64(txn.Fee.Raw + amt) toBalanceChange += int64(amt) signer = to } else { - txn, err = constructTxn(to, from, fee, amt, 0, cinfo, client, cfg) + txn, err = pps.constructTxn(to, from, fee, amt, 0, client) toBalanceChange -= int64(txn.Fee.Raw + amt) fromBalanceChange += int64(amt) signer = to @@ -553,7 +686,17 @@ func sendFromTo( return } -func constructTxn(from, to string, fee, amt, aidx uint64, cinfo CreatablesInfo, client libgoal.Client, cfg PpConfig) (txn transactions.Transaction, err error) { +func (pps *WorkerState) nftSpamAssetName() string { + if pps.nftStartTime == 0 { + pps.nftStartTime = time.Now().Unix() + } + pps.localNftIndex++ + return fmt.Sprintf("nft%d_%d", pps.nftStartTime, pps.localNftIndex) +} + +func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, client libgoal.Client) (txn transactions.Transaction, err error) { + cfg := pps.cfg + cinfo := pps.cinfo var noteField []byte const pingpongTag = "pingpong" const tagLen = uint32(len(pingpongTag)) @@ -675,3 +818,53 @@ func signTxn(signer string, txn transactions.Transaction, client libgoal.Client, } return } + +type timeCount struct { + when time.Time + count int +} + +type throttler struct { + times []timeCount + + next int + + // target x per-second + xps float64 + + // rough proportional + integral control + iterm float64 +} + +func newThrottler(windowSize int, targetPerSecond float64) *throttler { + return &throttler{times: make([]timeCount, windowSize), xps: targetPerSecond, iterm: 0.0} +} + +func (t *throttler) maybeSleep(count int) { + now := time.Now() + t.times[t.next].when = now + t.times[t.next].count = count + nn := (t.next + 1) % len(t.times) + t.next = nn + if t.times[nn].when.IsZero() { + return + } + dt := now.Sub(t.times[nn].when) + countsum := 0 + for i, tc := range t.times { + if i != nn { + countsum += tc.count + } + } + rate := float64(countsum) / dt.Seconds() + if rate > t.xps { + // rate too high, slow down + desiredSeconds := float64(countsum) / t.xps + extraSeconds := desiredSeconds - dt.Seconds() + t.iterm += 0.1 * extraSeconds / float64(len(t.times)) + time.Sleep(time.Duration(int64(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times))))) + + } else { + t.iterm *= 0.95 + } +} diff --git a/test/commandandcontrol/cc_agent/component/pingPongComponent.go b/test/commandandcontrol/cc_agent/component/pingPongComponent.go index 07aa1a5f85..155060c97f 100644 --- a/test/commandandcontrol/cc_agent/component/pingPongComponent.go +++ b/test/commandandcontrol/cc_agent/component/pingPongComponent.go @@ -120,13 +120,11 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong. log.Infof("Preparing to initialize PingPong with config: %+v\n", cfg) - var accounts map[string]uint64 - var cinfo pingpong.CreatablesInfo - var resultCfg pingpong.PpConfig + pps := pingpong.NewPingpong(*cfg) // Initialize accounts if necessary, this may take several attempts while previous transactions to settle for i := 0; i < 10; i++ { - accounts, cinfo, resultCfg, err = pingpong.PrepareAccounts(ac, *cfg) + err = pps.PrepareAccounts(ac) if err == nil { break } else { @@ -145,7 +143,7 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong. componentInstance.ctx, componentInstance.cancelFunc = context.WithCancel(context.Background()) // Kick off the real processing - go pingpong.RunPingPong(componentInstance.ctx, ac, accounts, cinfo, resultCfg) + go pps.RunPingPong(componentInstance.ctx, ac) return } From 93bb7db1fcd00c6bdad93047518e8eadb150fddb Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Wed, 31 Mar 2021 20:37:40 -0400 Subject: [PATCH 156/215] script to watch algod heap (#2011) Script for repeatedly snapshotting algod heap profile, creates snapshot svg reports and differential svg reports. --- test/heapwatch/README.md | 43 ++++++++ test/heapwatch/heapWatch.py | 208 ++++++++++++++++++++++++++++++++++++ test/heapwatch/start.sh | 33 ++++++ test/heapwatch/stop.sh | 29 +++++ 4 files changed, 313 insertions(+) create mode 100644 test/heapwatch/README.md create mode 100644 test/heapwatch/heapWatch.py create mode 100755 test/heapwatch/start.sh create mode 100755 test/heapwatch/stop.sh diff --git a/test/heapwatch/README.md b/test/heapwatch/README.md new file mode 100644 index 0000000000..27cb54d316 --- /dev/null +++ b/test/heapwatch/README.md @@ -0,0 +1,43 @@ +# Heap Watch + +Tools for checking if algod has memory leaks. + +Run a local private network of three nodes and two pingpongs. + +Periodically sample pprof memory profiles. + +Watch memory usage from `ps` and write to a CSV file for each algod. + +# Usage + +To start: + +```sh +bash test/heapwatch/start.sh /tmp/todaysTest +``` + +To stop: + +```sh +bash test/heapwatch/stop.sh /tmp/todaysTest +``` + +Results: + +Snapshot usage plots and inter-snapshot delta plots. + +```sh +ls /tmp/todaysTest/heaps/*.svg +``` + +The raw files for analysis with `go tool pprof` + +```sh +ls /tmp/todaysTest/heaps/*.heap +``` + +CSV files of memory usage according to `ps`: + +```sh +ls /tmp/todaysTest/heaps/*.csv +``` \ No newline at end of file diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py new file mode 100644 index 0000000000..24bbf2ff5c --- /dev/null +++ b/test/heapwatch/heapWatch.py @@ -0,0 +1,208 @@ +#!/usr/bin/python3 +# +# repeatedly snapshot heap profiles for one or more algod +# +# usage: +# mkdir -p /tmp/heaps +# python3 test/scripts/heapWatch.py -o /tmp/heaps --period 60s private_network_root/* + +import argparse +import logging +import os +import signal +import subprocess +import sys +import time +import urllib.request + +logger = logging.getLogger(__name__) + + + +def read_algod_dir(algorand_data): + with open(os.path.join(algorand_data, 'algod.net')) as fin: + net = fin.read().strip() + with open(os.path.join(algorand_data, 'algod.token')) as fin: + token = fin.read().strip() + with open(os.path.join(algorand_data, 'algod.admin.token')) as fin: + admin_token = fin.read().strip() + return net, token, admin_token + +# data from /debug/pprof/* is already gzipped + +# curl -o /tmp/algod.pprof.heap "http://`cat ${ALGORAND_DATA}/algod.net`/urlAuth/`cat ${ALGORAND_DATA}/algod.admin.token`/debug/pprof/heap" +# both reports can be generated from one heap profile snapshot +# go tool pprof -sample_index=inuse_space -svg -output /tmp/algod.heap.svg /tmp/algod.pprof.heap +# go tool pprof -sample_index=alloc_space -svg -output /tmp/algod.alloc.svg /tmp/algod.pprof.heap + +## curl -o /tmp/algod.pprof.allocs "http://`cat ${ALGORAND_DATA}/algod.net`/urlAuth/`cat ${ALGORAND_DATA}/algod.admin.token`/debug/pprof/allocs" +# go tool pprof -svg -output /tmp/algod.allocs.svg /tmp/algod.pprof.allocs + +# http://localhost:6060/debug/pprof/allocs?debug=1 + +# -inuse_space Same as -sample_index=inuse_space +# -inuse_objects Same as -sample_index=inuse_objects +# -alloc_space Same as -sample_index=alloc_space +# -alloc_objects Same as -sample_index=alloc_objects + +graceful_stop = False + +def do_graceful_stop(signum, frame): + global graceful_stop + if graceful_stop: + sys.stderr.write("second signal, quitting\n") + sys.exit(1) + sys.stderr.write("graceful stop...\n") + graceful_stop = True + +signal.signal(signal.SIGTERM, do_graceful_stop) +signal.signal(signal.SIGINT, do_graceful_stop) + + +class algodDir: + def __init__(self, path): + self.path = path + self.nick = os.path.basename(self.path) + net, token, admin_token = read_algod_dir(self.path) + self.net = net + self.token = token + self.admin_token = admin_token + self.headers = {} + self._pid = None + + def pid(self): + if self._pid is None: + with open(os.path.join(self.path, 'algod.pid')) as fin: + self._pid = int(fin.read()) + return self._pid + + def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None): + url = 'http://' + self.net + '/urlAuth/' + self.admin_token + '/debug/pprof/' + name + response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers)) + if response.code != 200: + logger.error('could not fetch %s from %s via %r', name, self.path. url) + return + blob = response.read() + if snapshot_name is None: + snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime()) + outpath = os.path.join(outdir or '.', self.nick + '.' + snapshot_name + '.' + name) + with open(outpath, 'wb') as fout: + fout.write(blob) + logger.debug('%s -> %s', self.nick, outpath) + return outpath + + def get_heap_snapshot(self, snapshot_name=None, outdir=None): + return self.get_pprof_snapshot('heap', snapshot_name, outdir) + + def get_goroutine_snapshot(self, snapshot_name=None, outdir=None): + return self.get_pprof_snapshot('goroutine', snapshot_name, outdir) + + def psHeap(self): + # return rss, vsz + # ps -o rss,vsz $(cat ${ALGORAND_DATA}/algod.pid) + subp = subprocess.Popen(['ps', '-o', 'rss,vsz', str(self.pid())], stdout=subprocess.PIPE) + try: + outs, errs = subp.communicate(timeout=2) + for line in outs.decode().splitlines(): + try: + rss,vsz = [int(x) for x in line.strip().split()] + return rss,vsz + except: + pass + except: + return None, None + +class watcher: + def __init__(self, args): + self.args = args + self.prevsnapshots = {} + self.they = [] + for path in args.data_dirs: + if os.path.exists(os.path.join(path, 'algod.net')): + try: + ad = algodDir(path) + logger.debug('found "%s" at %r', ad.nick, ad.path) + self.they.append(ad) + except: + logger.error('bad algod: %r', path, exc_info=True) + + def do_snap(self, now): + snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime(now)) + snapshot_isotime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now)) + logger.debug('begin snapshot %s', snapshot_name) + psheaps = {} + newsnapshots = {} + for ad in self.they: + snappath = ad.get_heap_snapshot(snapshot_name, outdir=self.args.out) + newsnapshots[ad.path] = snappath + rss, vsz = ad.psHeap() + if rss and vsz: + psheaps[ad.nick] = (rss, vsz) + for nick, rssvsz in psheaps.items(): + rss, vsz = rssvsz + with open(os.path.join(self.args.out, nick + '.heap.csv'), 'at') as fout: + fout.write('{},{},{},{}\n'.format(snapshot_name,snapshot_isotime,rss, vsz)) + if self.args.goroutine: + for ad in self.they: + ad.get_goroutine_snapshot(snapshot_name, outdir=self.args.out) + logger.debug('snapped, processing...') + # make absolute and differential plots + for path, snappath in newsnapshots.items(): + subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse.svg', snappath]) + subprocess.call(['go', 'tool', 'pprof', '-sample_index=alloc_space', '-svg', '-output', snappath + '.alloc.svg', snappath]) + prev = self.prevsnapshots.get(path) + if prev: + subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse_diff.svg', '-base='+prev, snappath]) + subprocess.call(['go', 'tool', 'pprof', '-sample_index=alloc_space', '-svg', '-output', snappath + '.alloc_diff.svg', '-diff_base='+prev, snappath]) + self.prevsnapshots = newsnapshots + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('data_dirs', nargs='*', help='list paths to algorand datadirs to grab heap profile from') + ap.add_argument('--goroutine', default=False, action='store_true', help='also capture goroutine profile') + ap.add_argument('--period', default=None, help='seconds between automatically capturing') + ap.add_argument('-o', '--out', default=None, help='directory to write to') + ap.add_argument('--verbose', default=False, action='store_true') + args = ap.parse_args() + + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + app = watcher(args) + + # get a first snapshot immediately + start = time.time() + now = start + + app.do_snap(now) + + if args.period: + lastc = args.period.lower()[-1:] + if lastc == 's': + periodSecs = int(args.period[:-1]) + elif lastc == 'm': + periodSecs = int(args.period[:-1]) * 60 + elif lastc == 'h': + periodSecs = int(args.period[:-1]) * 3600 + else: + periodSecs = int(args.period) + + periodi = 1 + nextt = start + (periodi * periodSecs) + while not graceful_stop: + while nextt < now: + nextt = start + (periodi * periodSecs) + while now < nextt - (periodSecs * 0.05): + logger.debug('sleep %f', nextt - now) + time.sleep(nextt - now) + if graceful_stop: + return + now = time.time() + periodi += 1 + nextt += periodSecs + app.do_snap(now) + +if __name__ == '__main__': + sys.exit(main()) diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh new file mode 100755 index 0000000000..ee04ed853a --- /dev/null +++ b/test/heapwatch/start.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e +set -o pipefail +set -x +export SHELLOPTS + +TESTDIR=$1 +if [ -z "${TESTDIR}" ]; then + TESTDIR=/tmp/heap_testnetwork +fi + +REPO_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"/../.. + +goal network create -r "${TESTDIR}" -t "${REPO_ROOT}/test/testdata/nettemplates/ThreeNodesEvenDist.json" -n tbd + +goal network start -r "${TESTDIR}" + +# give all the algod a moment... +sleep 2 + +mkdir -p "${TESTDIR}/heaps" +python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --period 10m "${TESTDIR}/"* & + +echo "$!" > .heapWatch.pid + +pingpong run -d "${TESTDIR}/Node1" --tps 10 --rest 0 --run 0 --nftasapersecond 200 & + +echo "$!" > .pingpong1.pid + +pingpong run -d "${TESTDIR}/Node2" --tps 10 --rest 0 --run 0 --nftasapersecond 200 & + +echo "$!" > .pingpong2.pid diff --git a/test/heapwatch/stop.sh b/test/heapwatch/stop.sh new file mode 100755 index 0000000000..146d8adcca --- /dev/null +++ b/test/heapwatch/stop.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# +# clean up what was started by heapstart.sh + +set -e +set -o pipefail +set -x +export SHELLOPTS + +if [ -f .heapWatch.pid ]; then + kill $(cat .heapWatch.pid) || true +fi + +for i in .pingpong*.pid; do + kill $(cat $i) || true + rm -f "${i}" +done + +TESTDIR=$1 +if [ -z "${TESTDIR}" ]; then + TESTDIR=/tmp/heap_testnetwork +fi + +goal network stop -r "${TESTDIR}" + +if [ -f .heapWatch.pid ]; then + kill -9 $(cat .heapWatch.pid) || true + rm -f .heapWatch.pid +fi From 310e05a092284becbb5b761152c25e344860aba8 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 1 Apr 2021 00:12:03 -0400 Subject: [PATCH 157/215] ledger: split committed round range by consensus protocol version (#2027) ledger: split committed round range by consensus protocol version --- ledger/accountdb.go | 6 +- ledger/accountdb_test.go | 2 +- ledger/acctupdates.go | 44 +++++-- ledger/acctupdates_test.go | 255 +++++++++++++++++++++++++++++++++++++ 4 files changed, 290 insertions(+), 17 deletions(-) diff --git a/ledger/accountdb.go b/ledger/accountdb.go index 1c37bb8132..a2079895e5 100644 --- a/ledger/accountdb.go +++ b/ledger/accountdb.go @@ -1136,7 +1136,7 @@ func accountsNewRound(tx *sql.Tx, updates compactAccountDeltas, creatables map[b } // totalsNewRounds updates the accountsTotals by applying series of round changes -func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpdates compactAccountDeltas, accountTotals []ledgercore.AccountTotals, protos []config.ConsensusParams) (err error) { +func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpdates compactAccountDeltas, accountTotals []ledgercore.AccountTotals, proto config.ConsensusParams) (err error) { var ot basics.OverflowTracker totals, err := accountsTotals(tx, false) if err != nil { @@ -1157,13 +1157,13 @@ func totalsNewRounds(tx *sql.Tx, updates []ledgercore.AccountDeltas, compactUpda addr, data := updates[i].GetByIdx(j) if oldAccountData, has := accounts[addr]; has { - totals.DelAccount(protos[i], oldAccountData, &ot) + totals.DelAccount(proto, oldAccountData, &ot) } else { err = fmt.Errorf("missing old account data") return } - totals.AddAccount(protos[i], data, &ot) + totals.AddAccount(proto, data, &ot) accounts[addr] = data } } diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go index 0907fa897e..707117ca48 100644 --- a/ledger/accountdb_test.go +++ b/ledger/accountdb_test.go @@ -552,7 +552,7 @@ func TestAccountDBRound(t *testing.T) { updatesCnt := makeCompactAccountDeltas([]ledgercore.AccountDeltas{updates}, baseAccounts) err = updatesCnt.accountsLoadOld(tx) require.NoError(t, err) - err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, []config.ConsensusParams{proto}) + err = totalsNewRounds(tx, []ledgercore.AccountDeltas{updates}, updatesCnt, []ledgercore.AccountTotals{{}}, proto) require.NoError(t, err) _, err = accountsNewRound(tx, updatesCnt, ctbsWithDeletes, proto, basics.Round(i)) require.NoError(t, err) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 1e7daef2ab..a5decac61a 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -127,7 +127,7 @@ type accountUpdates struct { // initAccounts specifies initial account values for database. initAccounts map[basics.Address]basics.AccountData - // initProto specifies the initial consensus parameters. + // initProto specifies the initial consensus parameters at the genesis block. initProto config.ConsensusParams // dbDirectory is the directory where the ledger and block sql file resides as well as the parent directroy for the catchup files to be generated @@ -172,9 +172,9 @@ type accountUpdates struct { // appears in creatableDeltas creatables map[basics.CreatableIndex]ledgercore.ModifiedCreatable - // protos stores consensus parameters dbRound and every - // round after it; i.e., protos is one longer than deltas. - protos []config.ConsensusParams + // versions stores consensus version dbRound and every + // round after it; i.e., versions is one longer than deltas. + versions []protocol.ConsensusVersion // totals stores the totals for dbRound and every round after it; // i.e., totals is one longer than deltas. @@ -651,11 +651,10 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b au.committedOffset <- dc } }() - retRound = basics.Round(0) var pendingDeltas int - lookback := basics.Round(au.protos[len(au.protos)-1].MaxBalLookback) + lookback := basics.Round(config.Consensus[au.versions[len(au.versions)-1]].MaxBalLookback) if committedRound < lookback { return } @@ -712,6 +711,19 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b offset = uint64(newBase - au.dbRound) + // check if this update chunk spans across multiple consensus versions. If so, break it so that each update would tackle only a single + // consensus version. + if au.versions[1] != au.versions[offset] { + // find the tip point. + tipPoint := sort.Search(int(offset), func(i int) bool { + // we're going to search here for version inequality, with the assumption that consensus versions won't repeat. + // that allow us to support [ver1, ver1, ..., ver2, ver2, ..., ver3, ver3] but not [ver1, ver1, ..., ver2, ver2, ..., ver1, ver3]. + return au.versions[1] != au.versions[1+i] + }) + // no need to handle the case of "no found", or tipPoint==int(offset), since we already know that it's there. + offset = uint64(tipPoint) + } + // check to see if this is a catchpoint round isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval)) @@ -1134,7 +1146,8 @@ func (au *accountUpdates) initializeFromDisk(l ledgerForTracker) (lastBalancesRo if err != nil { return } - au.protos = []config.ConsensusParams{config.Consensus[hdr.CurrentProtocol]} + + au.versions = []protocol.ConsensusVersion{hdr.CurrentProtocol} au.deltas = nil au.creatableDeltas = nil au.accounts = make(map[basics.Address]modifiedAccount) @@ -1572,7 +1585,7 @@ func (au *accountUpdates) newBlockImpl(blk bookkeeping.Block, delta ledgercore.S au.log.Panicf("accountUpdates: newBlockImpl %d too far in the future, dbRound %d, deltas %d", rnd, au.dbRound, len(au.deltas)) } au.deltas = append(au.deltas, delta.Accts) - au.protos = append(au.protos, proto) + au.versions = append(au.versions, blk.CurrentProtocol) au.creatableDeltas = append(au.creatableDeltas, delta.Creatables) au.roundDigest = append(au.roundDigest, blk.Digest()) au.deltasAccum = append(au.deltasAccum, delta.Accts.Len()+au.deltasAccum[len(au.deltasAccum)-1]) @@ -1662,7 +1675,7 @@ func (au *accountUpdates) lookupWithRewards(rnd basics.Round, addr basics.Addres return } - rewardsProto = au.protos[offset] + rewardsProto = config.Consensus[au.versions[offset]] rewardsLevel = au.roundTotals[offset].RewardsLevel // we're testing the withRewards here and setting the defer function only once, and only if withRewards is true. @@ -1981,11 +1994,16 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb deltas := make([]ledgercore.AccountDeltas, offset, offset) creatableDeltas := make([]map[basics.CreatableIndex]ledgercore.ModifiedCreatable, offset, offset) roundTotals := make([]ledgercore.AccountTotals, offset+1, offset+1) - protos := make([]config.ConsensusParams, offset+1, offset+1) copy(deltas, au.deltas[:offset]) copy(creatableDeltas, au.creatableDeltas[:offset]) copy(roundTotals, au.roundTotals[:offset+1]) - copy(protos, au.protos[:offset+1]) + + // verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that. + if au.versions[1] != au.versions[offset] { + au.log.Errorf("attempted to commit series of rounds with non-uniform consensus versions") + return + } + consensusVersion := au.versions[1] var committedRoundDigest crypto.Digest @@ -2045,7 +2063,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb return err } - err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], protos[1:offset+1]) + err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], config.Consensus[consensusVersion]) if err != nil { return err } @@ -2145,7 +2163,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb au.deltas = au.deltas[offset:] au.deltasAccum = au.deltasAccum[offset:] au.roundDigest = au.roundDigest[offset:] - au.protos = au.protos[offset:] + au.versions = au.versions[offset:] au.roundTotals = au.roundTotals[offset:] au.creatableDeltas = au.creatableDeltas[offset:] au.dbRound = newBase diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index 3e9fb05702..f6b422e5ec 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -1637,3 +1637,258 @@ func TestCachesInitialization(t *testing.T) { require.Equal(t, int(proto.MaxBalLookback), len(au.deltas)) require.Equal(t, recoveredLedgerRound-basics.Round(proto.MaxBalLookback), au.dbRound) } + +// TestSplittingConsensusVersionCommits tests the a sequence of commits that spans over multiple consensus versions works correctly. +func TestSplittingConsensusVersionCommits(t *testing.T) { + initProtocolVersion := protocol.ConsensusV20 + initialProtoParams := config.Consensus[initProtocolVersion] + + initialRounds := uint64(1) + + ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion) + ml.log.SetLevel(logging.Warn) + defer ml.Close() + + accountsCount := 5 + accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)} + rewardsLevels := []uint64{0} + + pooldata := basics.AccountData{} + pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 * 1000 * 1000 + pooldata.Status = basics.NotParticipating + accts[0][testPoolAddr] = pooldata + + sinkdata := basics.AccountData{} + sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 + sinkdata.Status = basics.NotParticipating + accts[0][testSinkAddr] = sinkdata + + au := &accountUpdates{} + au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0]) + err := au.loadFromDisk(ml) + require.NoError(t, err) + defer au.close() + + // cover initialRounds genesis blocks + rewardLevel := uint64(0) + for i := 1; i < int(initialRounds); i++ { + accts = append(accts, accts[0]) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + extraRounds := uint64(39) + + // write the extraRounds rounds so that we will fill up the queue. + for i := basics.Round(initialRounds); i < basics.Round(initialRounds+extraRounds); i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + accountChanges := 2 + + updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) + prevTotals, err := au.Totals(basics.Round(i - 1)) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = initProtocolVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.addMockBlock(blockEntry{block: blk}, delta) + au.newBlock(blk, delta) + accts = append(accts, totals) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + newVersionBlocksCount := uint64(47) + newVersion := protocol.ConsensusV21 + // add 47 more rounds that contains blocks using a newer consensus version, and stuff it with MaxBalLookback + lastRoundToWrite := basics.Round(initialRounds + initialProtoParams.MaxBalLookback + extraRounds + newVersionBlocksCount) + for i := basics.Round(initialRounds + extraRounds); i < lastRoundToWrite; i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + accountChanges := 2 + + updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) + prevTotals, err := au.Totals(basics.Round(i - 1)) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = newVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.addMockBlock(blockEntry{block: blk}, delta) + au.newBlock(blk, delta) + accts = append(accts, totals) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + // now, commit and verify that the committedUpTo method broken the range correctly. + au.committedUpTo(lastRoundToWrite) + au.waitAccountsWriting() + require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound) + +} + +// TestSplittingConsensusVersionCommitsBoundry tests the a sequence of commits that spans over multiple consensus versions works correctly, and +// in particular, complements TestSplittingConsensusVersionCommits by testing the commit boundry. +func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) { + initProtocolVersion := protocol.ConsensusV20 + initialProtoParams := config.Consensus[initProtocolVersion] + + initialRounds := uint64(1) + + ml := makeMockLedgerForTracker(t, true, int(initialRounds), initProtocolVersion) + ml.log.SetLevel(logging.Warn) + defer ml.Close() + + accountsCount := 5 + accts := []map[basics.Address]basics.AccountData{randomAccounts(accountsCount, true)} + rewardsLevels := []uint64{0} + + pooldata := basics.AccountData{} + pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 * 1000 * 1000 + pooldata.Status = basics.NotParticipating + accts[0][testPoolAddr] = pooldata + + sinkdata := basics.AccountData{} + sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 + sinkdata.Status = basics.NotParticipating + accts[0][testSinkAddr] = sinkdata + + au := &accountUpdates{} + au.initialize(config.GetDefaultLocal(), ".", initialProtoParams, accts[0]) + err := au.loadFromDisk(ml) + require.NoError(t, err) + defer au.close() + + // cover initialRounds genesis blocks + rewardLevel := uint64(0) + for i := 1; i < int(initialRounds); i++ { + accts = append(accts, accts[0]) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + extraRounds := uint64(39) + + // write extraRounds rounds so that we will fill up the queue. + for i := basics.Round(initialRounds); i < basics.Round(initialRounds+extraRounds); i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + accountChanges := 2 + + updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) + prevTotals, err := au.Totals(basics.Round(i - 1)) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = initProtocolVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.addMockBlock(blockEntry{block: blk}, delta) + au.newBlock(blk, delta) + accts = append(accts, totals) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + + newVersion := protocol.ConsensusV21 + // add MaxBalLookback-extraRounds more rounds that contains blocks using a newer consensus version. + endOfFirstNewProtocolSegment := basics.Round(initialRounds + extraRounds + initialProtoParams.MaxBalLookback) + for i := basics.Round(initialRounds + extraRounds); i <= endOfFirstNewProtocolSegment; i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + accountChanges := 2 + + updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) + prevTotals, err := au.Totals(basics.Round(i - 1)) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = newVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.addMockBlock(blockEntry{block: blk}, delta) + au.newBlock(blk, delta) + accts = append(accts, totals) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + // now, commit and verify that the committedUpTo method broken the range correctly. + au.committedUpTo(endOfFirstNewProtocolSegment) + au.waitAccountsWriting() + require.Equal(t, basics.Round(initialRounds+extraRounds)-1, au.dbRound) + + // write additional extraRounds elements and verify these can be flushed. + for i := endOfFirstNewProtocolSegment + 1; i <= basics.Round(initialRounds+2*extraRounds+initialProtoParams.MaxBalLookback); i++ { + rewardLevelDelta := crypto.RandUint64() % 5 + rewardLevel += rewardLevelDelta + accountChanges := 2 + + updates, totals := randomDeltasBalanced(accountChanges, accts[i-1], rewardLevel) + prevTotals, err := au.Totals(basics.Round(i - 1)) + require.NoError(t, err) + + newPool := totals[testPoolAddr] + newPool.MicroAlgos.Raw -= prevTotals.RewardUnits() * rewardLevelDelta + updates.Upsert(testPoolAddr, newPool) + totals[testPoolAddr] = newPool + + blk := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + Round: basics.Round(i), + }, + } + blk.RewardsLevel = rewardLevel + blk.CurrentProtocol = newVersion + + delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0) + delta.Accts.MergeAccounts(updates) + ml.addMockBlock(blockEntry{block: blk}, delta) + au.newBlock(blk, delta) + accts = append(accts, totals) + rewardsLevels = append(rewardsLevels, rewardLevel) + } + au.committedUpTo(endOfFirstNewProtocolSegment + basics.Round(extraRounds)) + au.waitAccountsWriting() + require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.dbRound) +} From d1a3fe4d9b4797313b93952a01b7c50fea53be14 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 2 Apr 2021 10:50:51 -0400 Subject: [PATCH 158/215] Fix ClearState program applying when it errs (#2038) ## Summary * Apps logic needs to ignore result and error from logic evaluator but but fail on other errors * The bug happened in app refactor PR * Added unit and e2e test --- ledger/appcow.go | 3 +- ledger/apply/application.go | 16 +++++--- ledger/apply/application_test.go | 51 +++++++++++++++++++++---- ledger/ledgercore/error.go | 10 +++++ test/scripts/e2e_subs/e2e-app-simple.sh | 20 ++++++++++ 5 files changed, 86 insertions(+), 14 deletions(-) diff --git a/ledger/appcow.go b/ledger/appcow.go index aa0519defd..f5ca8ae5c3 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -25,6 +25,7 @@ import ( "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/ledger/apply" + "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" ) @@ -419,7 +420,7 @@ func (cb *roundCowState) StatefulEval(params logic.EvalParams, aidx basics.AppIn // Eval the program pass, err = logic.EvalStateful(program, params) if err != nil { - return false, basics.EvalDelta{}, err + return false, basics.EvalDelta{}, ledgercore.LogicEvalError{Err: err} } // If program passed, build our eval delta, and commit to state changes diff --git a/ledger/apply/application.go b/ledger/apply/application.go index d593eed6e5..e3665e1a7e 100644 --- a/ledger/apply/application.go +++ b/ledger/apply/application.go @@ -22,6 +22,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/ledgercore" ) // Allocate the map of basics.AppParams if it is nil, and return a copy. We do *not* @@ -368,14 +369,19 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio if exists { pass, evalDelta, err := balances.StatefulEval(*evalParams, appIdx, params.ClearStateProgram) if err != nil { - return err + // Fail on non-logic eval errors and ignore LogicEvalError errors + if _, ok := err.(ledgercore.LogicEvalError); !ok { + return err + } } - // Fill in applyData, so that consumers don't have to implement a - // stateful TEAL interpreter to apply state changes - if pass { - // We will have applied any changes if and only if we passed + // We will have applied any changes if and only if we passed + if err == nil && pass { + // Fill in applyData, so that consumers don't have to implement a + // stateful TEAL interpreter to apply state changes ad.EvalDelta = evalDelta + } else { + // Ignore logic eval errors and rejections from the ClearStateProgram } } diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go index 65d74cae7a..23f1a49090 100644 --- a/ledger/apply/application_test.go +++ b/ledger/apply/application_test.go @@ -28,6 +28,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/protocol" ) @@ -110,6 +111,7 @@ type testBalances struct { // logic evaluator control pass bool delta basics.EvalDelta + err error } type testBalancesPass struct { @@ -182,7 +184,7 @@ func (b *testBalances) Deallocate(addr basics.Address, aidx basics.AppIndex, glo } func (b *testBalances) StatefulEval(params logic.EvalParams, aidx basics.AppIndex, program []byte) (passed bool, evalDelta basics.EvalDelta, err error) { - return b.pass, b.delta, nil + return b.pass, b.delta, b.err } func (b *testBalancesPass) Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) { @@ -665,7 +667,7 @@ func TestAppCallClearState(t *testing.T) { ac := transactions.ApplicationCallTxnFields{ ApplicationID: appIdx, - OnCompletion: transactions.CloseOutOC, + OnCompletion: transactions.ClearStateOC, } params := basics.AppParams{ ApprovalProgram: []byte{1}, @@ -689,7 +691,7 @@ func TestAppCallClearState(t *testing.T) { b.balances[sender] = basics.AccountData{} err := ApplicationCall(ac, h, &b, ad, &ep, txnCounter) a.Error(err) - a.Contains(err.Error(), "is not opted in to app") + a.Contains(err.Error(), "is not currently opted in to app") a.Equal(0, b.put) a.Equal(0, b.putWith) @@ -741,8 +743,8 @@ func TestAppCallClearState(t *testing.T) { b.appCreators[appIdx] = creator // one put: to opt out - gd := basics.StateDelta{"uint": {Action: basics.SetUintAction, Uint: 1}} - b.delta = basics.EvalDelta{GlobalDelta: gd} + b.pass = false + b.delta = basics.EvalDelta{GlobalDelta: nil} err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter) a.NoError(err) a.Equal(1, b.put) @@ -750,13 +752,46 @@ func TestAppCallClearState(t *testing.T) { br = b.putBalances[sender] a.Equal(0, len(br.AppLocalStates)) a.Equal(basics.StateSchema{}, br.TotalAppSchema) - a.Equal(gd, ad.EvalDelta.GlobalDelta) + a.Equal(basics.StateDelta(nil), ad.EvalDelta.GlobalDelta) b.ResetWrites() - // check existing application with successful ClearStateProgram. two - // one to opt out, one deallocate + // check existing application with logic err ClearStateProgram. + // one to opt out, one deallocate, no error from ApplicationCall b.pass = true + b.delta = basics.EvalDelta{GlobalDelta: nil} + b.err = ledgercore.LogicEvalError{Err: fmt.Errorf("test error")} + err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter) + a.NoError(err) + a.Equal(1, b.put) + a.Equal(0, b.putWith) + br = b.putBalances[sender] + a.Equal(0, len(br.AppLocalStates)) + a.Equal(basics.StateSchema{}, br.TotalAppSchema) + a.Equal(basics.StateDelta(nil), ad.EvalDelta.GlobalDelta) + + b.ResetWrites() + + // check existing application with non-logic err ClearStateProgram. + // ApplicationCall must fail + b.pass = true + b.delta = basics.EvalDelta{GlobalDelta: nil} + b.err = fmt.Errorf("test error") + err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter) + a.Error(err) + br = b.putBalances[sender] + a.Equal(0, len(br.AppLocalStates)) + a.Equal(basics.StateSchema{}, br.TotalAppSchema) + a.Equal(basics.StateDelta(nil), ad.EvalDelta.GlobalDelta) + + b.ResetWrites() + + // check existing application with successful ClearStateProgram. + // one to opt out, one deallocate, no error from ApplicationCall + b.pass = true + b.err = nil + gd := basics.StateDelta{"uint": {Action: basics.SetUintAction, Uint: 1}} + b.delta = basics.EvalDelta{GlobalDelta: gd} err = ApplicationCall(ac, h, &b, ad, &ep, txnCounter) a.NoError(err) a.Equal(1, b.put) diff --git a/ledger/ledgercore/error.go b/ledger/ledgercore/error.go index ae53934416..65cbcff023 100644 --- a/ledger/ledgercore/error.go +++ b/ledger/ledgercore/error.go @@ -76,3 +76,13 @@ type ErrNoEntry struct { func (err ErrNoEntry) Error() string { return fmt.Sprintf("ledger does not have entry %d (latest %d, committed %d)", err.Round, err.Latest, err.Committed) } + +// LogicEvalError indicates TEAL evaluation failure +type LogicEvalError struct { + Err error +} + +// Error satisfies builtin interface `error` +func (err LogicEvalError) Error() string { + return fmt.Sprintf("logic eval error: %v", err.Err) +} diff --git a/test/scripts/e2e_subs/e2e-app-simple.sh b/test/scripts/e2e_subs/e2e-app-simple.sh index d67486c10d..e770ee872e 100755 --- a/test/scripts/e2e_subs/e2e-app-simple.sh +++ b/test/scripts/e2e_subs/e2e-app-simple.sh @@ -101,3 +101,23 @@ if [[ $RES != *"${EXPERROR}"* ]]; then date '+app-create-test FAIL clearing state twice should fail %Y%m%d_%H%M%S' false fi + +# Create an application with clear program always errs +# Ensure clear still works +APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog <(printf '#pragma version 2\nint 1') --clear-prog <(printf '#pragma version 2\nerr') --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }') + +# Should succeed to opt in +${gcmd} app optin --app-id $APPID --from $ACCOUNT + +# Succeed in clearing state for the app +${gcmd} app clear --app-id $APPID --from $ACCOUNT + +# Create an application with clear program always fails +# Ensure clear still works +APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog <(printf '#pragma version 2\nint 1') --clear-prog <(printf '#pragma version 2\nint 0') --global-byteslices 0 --global-ints 0 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }') + +# Should succeed to opt in +${gcmd} app optin --app-id $APPID --from $ACCOUNT + +# Succeed in clearing state for the app +${gcmd} app clear --app-id $APPID --from $ACCOUNT From a21ba27b3ad8b9ec85c348dd184d32419484878f Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 2 Apr 2021 16:37:19 -0400 Subject: [PATCH 159/215] Fix for empty local state key value map assignment (#2037) ## Summary * Opting in does not allocate key value map * State delta merging code assumed the map in old AD was allocated * Added that missed allocation, and a test verifying combinations * Inspected and removed TODOs from applyStorageDelta --- ledger/appcow.go | 18 +-- ledger/applications_test.go | 235 ++++++++++++++++++++++++++++++++++++ 2 files changed, 246 insertions(+), 7 deletions(-) diff --git a/ledger/appcow.go b/ledger/appcow.go index f5ca8ae5c3..0286bb6126 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -567,16 +567,18 @@ func applyStorageDelta(data basics.AccountData, aapp storagePtr, store *storageD case deallocAction: delete(owned, aapp.aidx) case allocAction, remainAllocAction: - // TODO verify this assertion // note: these should always exist because they were - // at least preceded by a call to PutWithCreatable? + // at least preceded by a call to PutWithCreatable params, ok := owned[aapp.aidx] if !ok { return basics.AccountData{}, fmt.Errorf("could not find existing params for %v", aapp.aidx) } params = params.Clone() - if store.action == allocAction { - // TODO does this ever accidentally clobber? + if (store.action == allocAction && len(store.kvCow) > 0) || + (store.action == remainAllocAction && params.GlobalState == nil) { + // allocate KeyValue for + // 1) app creation and global write in the same app call + // 2) global state writing into empty global state params.GlobalState = make(basics.TealKeyValue) } // note: if this is an allocAction, there will be no @@ -603,7 +605,6 @@ func applyStorageDelta(data basics.AccountData, aapp storagePtr, store *storageD case deallocAction: delete(owned, aapp.aidx) case allocAction, remainAllocAction: - // TODO verify this assertion // note: these should always exist because they were // at least preceded by a call to Put? states, ok := owned[aapp.aidx] @@ -611,8 +612,11 @@ func applyStorageDelta(data basics.AccountData, aapp storagePtr, store *storageD return basics.AccountData{}, fmt.Errorf("could not find existing states for %v", aapp.aidx) } states = states.Clone() - if store.action == allocAction { - // TODO does this ever accidentally clobber? + if (store.action == allocAction && len(store.kvCow) > 0) || + (store.action == remainAllocAction && states.KeyValue == nil) { + // allocate KeyValue for + // 1) opting in and local state write in the same app call + // 2) local state writing into empty local state (opted in) states.KeyValue = make(basics.TealKeyValue) } // note: if this is an allocAction, there will be no diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 8e4358f76f..0e827940e9 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -18,14 +18,19 @@ package ledger import ( "crypto/rand" + "encoding/hex" "fmt" "testing" "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" ) func getRandomAddress(a *require.Assertions) basics.Address { @@ -332,3 +337,233 @@ func TestLogicLedgerDelKey(t *testing.T) { err = l.DelLocal(addr1, "lkey") a.NoError(err) } + +// test ensures that +// 1) app's GlobalState and local state's KeyValue are stored in the same way +// before and after application code refactoring +// 2) writing into empty (opted-in) local state's KeyValue works after reloading +// Hardcoded values are from commit 9a0b439 (pre app refactor commit) +func TestAppAccountDataStorage(t *testing.T) { + a := require.New(t) + source := `#pragma version 2 +// do not write local key on opt in or on app create +txn ApplicationID +int 0 +== +bnz success +txn OnCompletion +int NoOp +== +bnz writetostate +txn OnCompletion +int OptIn +== +bnz checkargs +int 0 +return +checkargs: +// if no args the success +// otherwise write data +txn NumAppArgs +int 0 +== +bnz success +// write local or global key depending on arg1 +writetostate: +txna ApplicationArgs 0 +byte "local" +== +bnz writelocal +txna ApplicationArgs 0 +byte "global" +== +bnz writeglobal +int 0 +return +writelocal: +int 0 +byte "lk" +byte "local" +app_local_put +b success +writeglobal: +byte "gk" +byte "global" +app_global_put +success: +int 1 +return` + + ops, err := logic.AssembleString(source) + a.NoError(err) + a.Greater(len(ops.Program), 1) + program := ops.Program + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + + creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") + a.NoError(err) + userOptin, err := basics.UnmarshalChecksumAddress("6S6UMUQ4462XRGNON5GKBHW55RUJGJ5INIRDFVFD6KSPHGWGRKPC6RK2O4") + a.NoError(err) + userLocal, err := basics.UnmarshalChecksumAddress("UL5C6SRVLOROSB5FGAE6TY34VXPXVR7GNIELUB3DD5KTA4VT6JGOZ6WFAY") + a.NoError(err) + userLocal2, err := basics.UnmarshalChecksumAddress("XNOGOJECWDOMVENCDJHNMOYVV7PIVIJXRWTSZUA3GSKYTVXH3VVGOXP7CU") + a.NoError(err) + + a.Contains(genesisInitState.Accounts, creator) + a.Contains(genesisInitState.Accounts, userOptin) + a.Contains(genesisInitState.Accounts, userLocal) + a.Contains(genesisInitState.Accounts, userLocal2) + + expectedCreator, err := hex.DecodeString("84a4616c676fce009d2290a461707070810184a6617070726f76c45602200200012604056c6f63616c06676c6f62616c026c6b02676b3118221240003331192212400010311923124000022243311b221240001c361a00281240000a361a0029124000092243222a28664200032b29672343a6636c65617270c40102a46773636881a36e627304a46c73636881a36e627301a36f6e6c01a47473636881a36e627304") + a.NoError(err) + expectedUserOptIn, err := hex.DecodeString("84a4616c676fce00a02fd0a46170706c810181a46873636881a36e627301a36f6e6c01a47473636881a36e627301") + a.NoError(err) + expectedUserLocal, err := hex.DecodeString("84a4616c676fce00a33540a46170706c810182a46873636881a36e627301a3746b7681a26c6b82a27462a56c6f63616ca2747401a36f6e6c01a47473636881a36e627301") + a.NoError(err) + + cfg := config.GetDefaultLocal() + l, err := OpenLedger(logging.Base(), "TestAppAccountData", true, genesisInitState, cfg) + a.NoError(err) + defer l.Close() + + txHeader := transactions.Header{ + Sender: creator, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: t.Name(), + GenesisHash: genesisInitState.GenesisHash, + } + + // create application + approvalProgram := program + clearStateProgram := []byte("\x02") // empty + appCreateFields := transactions.ApplicationCallTxnFields{ + ApprovalProgram: approvalProgram, + ClearStateProgram: clearStateProgram, + GlobalStateSchema: basics.StateSchema{NumByteSlice: 4}, + LocalStateSchema: basics.StateSchema{NumByteSlice: 1}, + } + appCreate := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCreateFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{}) + a.NoError(err) + + appIdx := basics.AppIndex(1) // first tnx => idx = 1 + + // opt-in, do no write + txHeader.Sender = userOptin + appCallFields := transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.OptInOC, + ApplicationID: appIdx, + } + appCall := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{}) + a.NoError(err) + + // opt-in + write + txHeader.Sender = userLocal + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{}) + a.NoError(err) + + // save data into DB and write into local state + l.accts.accountsWriting.Add(1) + l.accts.commitRound(3, 0, 0) + l.reloadLedger() + + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: 0, + ApplicationID: appIdx, + ApplicationArgs: [][]byte{[]byte("local")}, + } + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, + transactions.ApplyData{EvalDelta: basics.EvalDelta{ + LocalDeltas: map[uint64]basics.StateDelta{0: {"lk": basics.ValueDelta{Action: basics.SetBytesAction, Bytes: "local"}}}}, + }) + a.NoError(err) + + // save data into DB + l.accts.accountsWriting.Add(1) + l.accts.commitRound(1, 3, 0) + l.reloadLedger() + + // dump accounts + var rowid int64 + var dbRound basics.Round + var buf []byte + err = l.accts.accountsq.lookupStmt.QueryRow(creator[:]).Scan(&rowid, &dbRound, &buf) + a.NoError(err) + a.Equal(expectedCreator, buf) + + err = l.accts.accountsq.lookupStmt.QueryRow(userOptin[:]).Scan(&rowid, &dbRound, &buf) + a.NoError(err) + a.Equal(expectedUserOptIn, buf) + pad, err := l.accts.accountsq.lookup(userOptin) + a.Nil(pad.accountData.AppLocalStates[appIdx].KeyValue) + ad, err := l.Lookup(dbRound, userOptin) + a.Nil(ad.AppLocalStates[appIdx].KeyValue) + + err = l.accts.accountsq.lookupStmt.QueryRow(userLocal[:]).Scan(&rowid, &dbRound, &buf) + a.NoError(err) + a.Equal(expectedUserLocal, buf) + + ad, err = l.Lookup(dbRound, userLocal) + a.NoError(err) + a.Equal("local", ad.AppLocalStates[appIdx].KeyValue["lk"].Bytes) + + // ensure writing into empty global state works as well + l.reloadLedger() + txHeader.Sender = creator + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: 0, + ApplicationID: appIdx, + ApplicationArgs: [][]byte{[]byte("global")}, + } + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, + transactions.ApplyData{EvalDelta: basics.EvalDelta{ + GlobalDelta: basics.StateDelta{"gk": basics.ValueDelta{Action: basics.SetBytesAction, Bytes: "global"}}}, + }) + a.NoError(err) + + // opt-in + write by during opt-in + txHeader.Sender = userLocal2 + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.OptInOC, + ApplicationID: appIdx, + ApplicationArgs: [][]byte{[]byte("local")}, + } + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, + transactions.ApplyData{EvalDelta: basics.EvalDelta{ + LocalDeltas: map[uint64]basics.StateDelta{0: {"lk": basics.ValueDelta{Action: basics.SetBytesAction, Bytes: "local"}}}}, + }) + a.NoError(err) +} From 76c251895a0fc79cf53fc071094eb04d64063f6b Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Tue, 6 Apr 2021 09:30:59 -0400 Subject: [PATCH 160/215] Fix node panicing during shutdown. (#2043) Fix node panicing during shutdown due to unsynchronized compactcert database access. --- node/node.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index 57784f487a..760ea6b509 100644 --- a/node/node.go +++ b/node/node.go @@ -410,6 +410,10 @@ func (node *AlgorandFullNode) Stop() { defer func() { node.mu.Unlock() node.waitMonitoringRoutines() + // we want to shut down the compactCert last, since the oldKeyDeletionThread might depend on it when making the + // call to LatestSigsFromThisNode. + node.compactCert.Shutdown() + node.compactCert = nil }() node.net.ClearHandlers() @@ -429,7 +433,6 @@ func (node *AlgorandFullNode) Stop() { node.lowPriorityCryptoVerificationPool.Shutdown() node.cryptoPool.Shutdown() node.cancelCtx() - node.compactCert.Shutdown() if node.indexer != nil { node.indexer.Shutdown() } From a2c8993484056368875a9e6ef8223e41f01fc8a5 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 8 Apr 2021 13:21:10 -0400 Subject: [PATCH 161/215] bugfix: ensure loading of a merkle trie deferred page during commit (#2049) When using the MerkleTrie, the trie avoid making loads from disk for pages that aren't needed. In particular, it won't load the latest page ( known as the deferred page ) until it needs to commit it. The implementation had a bug in the `loadPage` method, where it would reset the loading page flag incorrectly. --- crypto/merkletrie/cache.go | 7 ++++--- crypto/merkletrie/cache_test.go | 33 +++++++++++++++++++++++++++++++++ ledger/accountdb.go | 4 ++-- 3 files changed, 39 insertions(+), 5 deletions(-) diff --git a/crypto/merkletrie/cache.go b/crypto/merkletrie/cache.go index 02e3dac0bf..e4dc387dcf 100644 --- a/crypto/merkletrie/cache.go +++ b/crypto/merkletrie/cache.go @@ -109,8 +109,9 @@ func (mtc *merkleTrieCache) initialize(mt *Trie, committer Committer, memoryConf mtc.targetPageFillFactor = memoryConfig.PageFillFactor mtc.maxChildrenPagesThreshold = memoryConfig.MaxChildrenPagesThreshold if mt.nextNodeID != storedNodeIdentifierBase { - // if the next node is going to be on a new page, no need to reload the last page. - if (int64(mtc.mt.nextNodeID) / mtc.nodesPerPage) == (int64(mtc.mt.nextNodeID-1) / mtc.nodesPerPage) { + // If the next node would reside on a page that already has a few entries in it, make sure to mark it for late loading. + // Otherwise, the next node is going to be the first node on this page, we don't need to reload that page ( since it doesn't exist! ). + if (int64(mtc.mt.nextNodeID) % mtc.nodesPerPage) > 0 { mtc.deferedPageLoad = uint64(mtc.mt.nextNodeID) / uint64(mtc.nodesPerPage) } } @@ -262,7 +263,7 @@ func (mtc *merkleTrieCache) loadPage(page uint64) (err error) { } // if we've just loaded a deferred page, no need to reload it during the commit. - if mtc.deferedPageLoad != page { + if mtc.deferedPageLoad == page { mtc.deferedPageLoad = storedNodeIdentifierNull } return diff --git a/crypto/merkletrie/cache_test.go b/crypto/merkletrie/cache_test.go index f54354aa93..8b50d8ea75 100644 --- a/crypto/merkletrie/cache_test.go +++ b/crypto/merkletrie/cache_test.go @@ -454,3 +454,36 @@ func TestCachePagedOutTip(t *testing.T) { page = uint64(mt1.root) / uint64(memConfig.NodesCountPerPage) require.NotNil(t, mt1.cache.pageToNIDsPtr[page]) } + +// TestCacheLoadingDeferedPage verifies that the loadPage +// method correcly resets the mtc.deferedPageLoad on the correct page. +func TestCacheLoadingDeferedPage(t *testing.T) { + var memoryCommitter1 InMemoryCommitter + mt1, _ := MakeTrie(&memoryCommitter1, defaultTestMemoryConfig) + // create 100000 hashes. + leafsCount := 100000 + hashes := make([]crypto.Digest, leafsCount) + for i := 0; i < len(hashes); i++ { + hashes[i] = crypto.Hash([]byte{byte(i % 256), byte((i / 256) % 256), byte(i / 65536)}) + } + + for i := 0; i < len(hashes); i++ { + mt1.Add(hashes[i][:]) + } + _, err := mt1.Commit() + require.NoError(t, err) + + // verify that the cache doesn't reset the mtc.deferedPageLoad on loading a non-defered page. + dupMem := memoryCommitter1.Duplicate() + mt2, _ := MakeTrie(dupMem, defaultTestMemoryConfig) + lastPage := int64(mt2.nextNodeID) / defaultTestMemoryConfig.NodesCountPerPage + require.Equal(t, uint64(lastPage), mt2.cache.deferedPageLoad) + err = mt2.cache.loadPage(uint64(lastPage - 1)) + require.NoError(t, err) + require.Equal(t, uint64(lastPage), mt2.cache.deferedPageLoad) + + // verify that the cache does reset the mtc.deferedPageLoad on loading a defered page. + err = mt2.cache.loadPage(uint64(lastPage)) + require.NoError(t, err) + require.Equal(t, uint64(0), mt2.cache.deferedPageLoad) +} diff --git a/ledger/accountdb.go b/ledger/accountdb.go index a2079895e5..e0aa74025b 100644 --- a/ledger/accountdb.go +++ b/ledger/accountdb.go @@ -1341,7 +1341,7 @@ func makeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err err return mc, nil } -// StorePage stores a single page in an in-memory persistence. +// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqllite database table. func (mc *merkleCommitter) StorePage(page uint64, content []byte) error { if len(content) == 0 { _, err := mc.deleteStmt.Exec(page) @@ -1351,7 +1351,7 @@ func (mc *merkleCommitter) StorePage(page uint64, content []byte) error { return err } -// LoadPage load a single page from an in-memory persistence. +// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqllite database table. func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) { err = mc.selectStmt.QueryRow(page).Scan(&content) if err == sql.ErrNoRows { From a971305b65ff8d7dfdd9adfe36c48c97b69d2d19 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 8 Apr 2021 13:21:33 -0400 Subject: [PATCH 162/215] Fix requestNonce alignment on arm32 (#2051) On ARM32, all 64 bit atomic operations need to be using a 64-bit aligned address. This PR moves the requestNonce to a 64 bit aligned address. --- network/wsPeer.go | 6 +++--- network/wsPeer_test.go | 11 +++++++++++ 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/network/wsPeer.go b/network/wsPeer.go index 9e665a2c77..a3f2489788 100644 --- a/network/wsPeer.go +++ b/network/wsPeer.go @@ -143,6 +143,9 @@ type wsPeer struct { // peer, or zero if no message is being written. intermittentOutgoingMessageEnqueueTime int64 + // Nonce used to uniquely identify requests + requestNonce uint64 + wsPeerCore // conn will be *websocket.Conn (except in testing) @@ -191,9 +194,6 @@ type wsPeer struct { // peer version ( this is one of the version supported by the current node and listed in SupportedProtocolVersions ) version string - // Nonce used to uniquely identify requests - requestNonce uint64 - // responseChannels used by the client to wait on the response of the request responseChannels map[uint64]chan *Response diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go index b4165a41c3..fabfe21b39 100644 --- a/network/wsPeer_test.go +++ b/network/wsPeer_test.go @@ -20,6 +20,7 @@ import ( "encoding/binary" "testing" "time" + "unsafe" "github.com/stretchr/testify/require" ) @@ -77,3 +78,13 @@ func TestDefaultMessageTagsLength(t *testing.T) { require.Equal(t, 2, len(tag)) } } + +// TestAtomicVariablesAligment ensures that the 64-bit atomic variables +// offsets are 64-bit aligned. This is required due to go atomic library +// limitation. +func TestAtomicVariablesAligment(t *testing.T) { + p := wsPeer{} + require.True(t, (unsafe.Offsetof(p.requestNonce)%8) == 0) + require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0) + require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0) +} From 7f518b4bbe753e8475bf053478dcd712387d2e71 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 8 Apr 2021 13:51:58 -0400 Subject: [PATCH 163/215] Filter out automated testing useless verbosed output (#2028) Filter out automated testing useless verbosed output. --- Makefile | 6 +- debug/logfilter/example1.in | 31 + debug/logfilter/example1.out.expected | 2 + debug/logfilter/example2.in | 98 + debug/logfilter/example2.out.expected | 22 + debug/logfilter/example3.in | 417 ++++ debug/logfilter/example3.out.expected | 206 ++ debug/logfilter/example4.in | 21 + debug/logfilter/example4.out.expected | 2 + debug/logfilter/example5.in | 128 ++ debug/logfilter/example5.out.expected | 33 + debug/logfilter/example6.in | 2619 +++++++++++++++++++++++++ debug/logfilter/example6.out.expected | 385 ++++ debug/logfilter/main.go | 137 ++ debug/logfilter/main_test.go | 59 + test/scripts/e2e_go_tests.sh | 11 +- 16 files changed, 4169 insertions(+), 8 deletions(-) create mode 100644 debug/logfilter/example1.in create mode 100644 debug/logfilter/example1.out.expected create mode 100644 debug/logfilter/example2.in create mode 100644 debug/logfilter/example2.out.expected create mode 100644 debug/logfilter/example3.in create mode 100644 debug/logfilter/example3.out.expected create mode 100644 debug/logfilter/example4.in create mode 100644 debug/logfilter/example4.out.expected create mode 100644 debug/logfilter/example5.in create mode 100644 debug/logfilter/example5.out.expected create mode 100644 debug/logfilter/example6.in create mode 100644 debug/logfilter/example6.out.expected create mode 100644 debug/logfilter/main.go create mode 100644 debug/logfilter/main_test.go diff --git a/Makefile b/Makefile index 7bfbebc2e8..0bca14bf43 100644 --- a/Makefile +++ b/Makefile @@ -210,17 +210,17 @@ $(GOPATH1)/bin/%: cp -f $< $@ test: build - go test $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 3600s + go test $(GOTAGS) -race $(UNIT_TEST_SOURCES) -timeout 3600s | logfilter fulltest: build-race for PACKAGE_DIRECTORY in $(UNIT_TEST_SOURCES) ; do \ - go test $(GOTAGS) -timeout 2500s -race $$PACKAGE_DIRECTORY; \ + go test $(GOTAGS) -timeout 2500s -race $$PACKAGE_DIRECTORY | logfilter; \ done shorttest: build-race $(addprefix short_test_target_, $(UNIT_TEST_SOURCES)) $(addprefix short_test_target_, $(UNIT_TEST_SOURCES)): build - @go test $(GOTAGS) -short -timeout 2500s -race $(subst short_test_target_,,$@) + @go test $(GOTAGS) -short -timeout 2500s -race $(subst short_test_target_,,$@) | logfilter integration: build-race ./test/scripts/run_integration_tests.sh diff --git a/debug/logfilter/example1.in b/debug/logfilter/example1.in new file mode 100644 index 0000000000..aba0820130 --- /dev/null +++ b/debug/logfilter/example1.in @@ -0,0 +1,31 @@ +=== RUN TestAccountsCanSendMoney +=== PAUSE TestAccountsCanSendMoney +=== CONT TestAccountsCanSendMoney +Created new rootkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet2.rootkey +Created new rootkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet1.rootkey +Created new partkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet2.0.3000000.partkey +Created new partkey: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(57402) : Telemetry configured from '/Users/tsachi/.algorand/logging.config' +algod(57402) : No REST API Token found. Generated token: ed061c29ef14bd560d7ca7b591ce118842d949dd42d1f473c4202ca72b7d5ff9 +algod(57402) : No Admin REST API Token found. Generated token: 9102a9b56b543456d0f11527097dd8a14a4c76a5f196168d98b7f1384404fd86 +algod(57402) : Logging to: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Primary/node.log +algod(57402) : Deadlock detection is set to: disabled (Default state is 'disable') +algod(57402) : Initializing the Algorand node... +algod(57402) : Success! +algod(57402) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(57402) : ⇨ http server started on 127.0.0.1:8080 +algod(57403) : Telemetry configured from '/Users/tsachi/.algorand/logging.config' +algod(57403) : No REST API Token found. Generated token: e752c98368436d74c4074a1cd2b15f31ea7d7661768764018d51b42b2b53fba7 +algod(57403) : No Admin REST API Token found. Generated token: 5bcc51fabb5247bb8a839a18e7ac3f7914138aea92e19257ec13765015649483 +algod(57403) : Logging to: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp317215351/TestAccountsCanSendMoney/Node/node.log +algod(57403) : Deadlock detection is set to: disabled (Default state is 'disable') +algod(57403) : Initializing the Algorand node... +algod(57403) : Success! +algod(57403) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:64818. Press Ctrl-C to exit +algod(57403) : ⇨ http server started on 127.0.0.1:64818 +algod(57403) : Exiting on terminated +algod(57402) : Exiting on terminated +--- PASS: TestAccountsCanSendMoney (112.79s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 113.107s diff --git a/debug/logfilter/example1.out.expected b/debug/logfilter/example1.out.expected new file mode 100644 index 0000000000..7bafe7b35f --- /dev/null +++ b/debug/logfilter/example1.out.expected @@ -0,0 +1,2 @@ +--- PASS: TestAccountsCanSendMoney (112.79s) +ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 113.107s diff --git a/debug/logfilter/example2.in b/debug/logfilter/example2.in new file mode 100644 index 0000000000..fbf835f590 --- /dev/null +++ b/debug/logfilter/example2.in @@ -0,0 +1,98 @@ +=== RUN TestVet +=== PAUSE TestVet +=== RUN TestVetAsm +=== PAUSE TestVetAsm +=== RUN TestVetDirs +=== PAUSE TestVetDirs +=== RUN TestTags +=== PAUSE TestTags +=== RUN TestVetVerbose +=== PAUSE TestVetVerbose +=== CONT TestVet +=== CONT TestTags +=== CONT TestVetVerbose +=== RUN TestTags/testtag +=== PAUSE TestTags/testtag +=== CONT TestVetDirs +=== CONT TestVetAsm +=== RUN TestVet/0 +=== PAUSE TestVet/0 +=== RUN TestVet/1 +=== PAUSE TestVet/1 +=== RUN TestVet/2 +=== PAUSE TestVet/2 +=== RUN TestVet/3 +=== PAUSE TestVet/3 +=== RUN TestVet/4 +=== RUN TestTags/x_testtag_y +=== PAUSE TestVet/4 +=== RUN TestVet/5 +=== PAUSE TestVet/5 +=== PAUSE TestTags/x_testtag_y +=== RUN TestVet/6 +=== RUN TestTags/x,testtag,y +=== PAUSE TestTags/x,testtag,y +=== RUN TestVetDirs/testingpkg +=== PAUSE TestVet/6 +=== CONT TestTags/x,testtag,y +=== PAUSE TestVetDirs/testingpkg +=== RUN TestVetDirs/divergent +=== RUN TestVet/7 +=== PAUSE TestVet/7 +=== PAUSE TestVetDirs/divergent +=== CONT TestTags/x_testtag_y +=== CONT TestTags/testtag +=== RUN TestVetDirs/buildtag +=== PAUSE TestVetDirs/buildtag +=== CONT TestVet/0 +=== CONT TestVet/4 +=== RUN TestVetDirs/incomplete +=== PAUSE TestVetDirs/incomplete +=== RUN TestVetDirs/cgo +=== PAUSE TestVetDirs/cgo +=== CONT TestVet/7 +=== CONT TestVet/6 +--- PASS: TestVetVerbose (0.04s) +=== CONT TestVet/5 +=== CONT TestVet/3 +=== CONT TestVet/2 +--- PASS: TestTags (0.00s) + --- PASS: TestTags/x_testtag_y (0.04s) + vet_test.go:187: -tags=x testtag y + --- PASS: TestTags/x,testtag,y (0.04s) + vet_test.go:187: -tags=x,testtag,y + --- PASS: TestTags/testtag (0.04s) + vet_test.go:187: -tags=testtag +=== CONT TestVet/1 +=== CONT TestVetDirs/testingpkg +=== CONT TestVetDirs/buildtag +=== CONT TestVetDirs/divergent +=== CONT TestVetDirs/incomplete +=== CONT TestVetDirs/cgo +--- PASS: TestVet (0.39s) + --- PASS: TestVet/5 (0.07s) + vet_test.go:114: files: ["testdata/copylock_func.go" "testdata/rangeloop.go"] + --- PASS: TestVet/3 (0.07s) + vet_test.go:114: files: ["testdata/composite.go" "testdata/nilfunc.go"] + --- PASS: TestVet/6 (0.07s) + vet_test.go:114: files: ["testdata/copylock_range.go" "testdata/shadow.go"] + --- PASS: TestVet/2 (0.07s) + vet_test.go:114: files: ["testdata/bool.go" "testdata/method.go" "testdata/unused.go"] + --- PASS: TestVet/0 (0.13s) + vet_test.go:114: files: ["testdata/assign.go" "testdata/httpresponse.go" "testdata/structtag.go"] + --- PASS: TestVet/4 (0.16s) + vet_test.go:114: files: ["testdata/copylock.go" "testdata/print.go"] + --- PASS: TestVet/1 (0.07s) + vet_test.go:114: files: ["testdata/atomic.go" "testdata/lostcancel.go" "testdata/unsafeptr.go"] + --- PASS: TestVet/7 (0.19s) + vet_test.go:114: files: ["testdata/deadcode.go" "testdata/shift.go"] +--- PASS: TestVetDirs (0.01s) + --- PASS: TestVetDirs/testingpkg (0.06s) + --- PASS: TestVetDirs/divergent (0.05s) + --- PASS: TestVetDirs/buildtag (0.06s) + --- PASS: TestVetDirs/incomplete (0.05s) + --- PASS: TestVetDirs/cgo (0.04s) +--- PASS: TestVetAsm (0.75s) +PASS +ok cmd/vet (cached) + diff --git a/debug/logfilter/example2.out.expected b/debug/logfilter/example2.out.expected new file mode 100644 index 0000000000..767f8b1d1b --- /dev/null +++ b/debug/logfilter/example2.out.expected @@ -0,0 +1,22 @@ +--- PASS: TestVetVerbose (0.04s) +--- PASS: TestTags (0.00s) + --- PASS: TestTags/x_testtag_y (0.04s) + --- PASS: TestTags/x,testtag,y (0.04s) + --- PASS: TestTags/testtag (0.04s) +--- PASS: TestVet (0.39s) + --- PASS: TestVet/5 (0.07s) + --- PASS: TestVet/3 (0.07s) + --- PASS: TestVet/6 (0.07s) + --- PASS: TestVet/2 (0.07s) + --- PASS: TestVet/0 (0.13s) + --- PASS: TestVet/4 (0.16s) + --- PASS: TestVet/1 (0.07s) + --- PASS: TestVet/7 (0.19s) +--- PASS: TestVetDirs (0.01s) + --- PASS: TestVetDirs/testingpkg (0.06s) + --- PASS: TestVetDirs/divergent (0.05s) + --- PASS: TestVetDirs/buildtag (0.06s) + --- PASS: TestVetDirs/incomplete (0.05s) + --- PASS: TestVetDirs/cgo (0.04s) +--- PASS: TestVetAsm (0.75s) +ok cmd/vet (cached) diff --git a/debug/logfilter/example3.in b/debug/logfilter/example3.in new file mode 100644 index 0000000000..76b7f7ec5f --- /dev/null +++ b/debug/logfilter/example3.in @@ -0,0 +1,417 @@ +=== RUN TestGoalWithExpect +=== RUN TestGoalWithExpect/catchpointCatchupTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/catchpointCatchupTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/createWalletTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/createWalletTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/doubleSpendingTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/doubleSpendingTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalFormattingTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalFormattingTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeSystemdTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeSystemdTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealAppInfoTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/statefulTealAppInfoTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/corsTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/corsTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAppAccountAddressTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAppAccountAddressTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/listExpiredParticipationKeyTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/listExpiredParticipationKeyTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeStatusTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeStatusTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalTxValidityTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalTxValidityTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/ledgerTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/ledgerTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/tealAndStatefulTealTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1 + stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod + TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + network create test_net_expect_1617038539 + spawn goal network create --network test_net_expect_1617038539 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.rootkey + + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.rootkey + + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey + + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey + + future 100000 + + Network test_net_expect_1617038539 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + network start test_net_expect_1617038539 + spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + network status test_net_expect_1617038539 + spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + + + [Primary] + + Last committed block: 0 + + Time since last block: 0.0s + + Sync Time: 0.0s + + Last consensus protocol: future + + Next consensus protocol: future + + Round for next consensus protocol: 1 + + Next consensus protocol supported: true + + + + [Node] + + Last committed block: 0 + + Time since last block: 0.0s + + Sync Time: 0.2s + + Last consensus protocol: future + + Next consensus protocol: future + + Round for next consensus protocol: 1 + + Next consensus protocol supported: true + + + + StartNetwork complete + Primary node address is: 127.0.0.1:34369 + Primary Node Address: 127.0.0.1:34369 + spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + [online] IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A 5000000000000000 microAlgos + + Account Address: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A Balance: 5000000000000000 + spawn goal account balance -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + 5000000000000000 microAlgos + + Wallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Balance: 5000000000000000 + Primary Account Balance: 5000000000000000 + spawn goal account rewards -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + 0 microAlgosWallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Rewards: 0 + Primary Account Rewards: 0 + spawn goal wallet new Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Please choose a password for wallet 'Wallet_1_1617038539': + + Please confirm the password: + + Creating wallet... + + Created wallet 'Wallet_1_1617038539' + + Your new wallet has a backup phrase that can be used for recovery. + + Keeping this backup phrase safe is extremely important. + + Would you like to see it now? (Y/n): y + + Your backup phrase is printed below. + + Keep this information safe -- never share it with anyone! + + + + One or more non-printable characters were ommited from the subsequent line: + + [32mtest faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue[0m + + WALLET_1_PASSPHRASE: test faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + ################################################## + + Wallet: Wallet_1_1617038539 + + ID: f6c68ab0105dccf477e2dc3de44dda18 + + ################################################## + + Wallet: unencrypted-default-wallet + + ID: 2bc05b49cc7176c389a384a28d622f90 + + spawn goal account new -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Please enter the password for wallet 'Wallet_1_1617038539': + + Created new account with address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI + + Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI + spawn goal account list -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + [offline] Unnamed-0 UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI 0 microAlgos *Default + + Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI + spawn goal wallet new Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Please choose a password for wallet 'Wallet_2_1617038539': + + Please confirm the password: + + Creating wallet... + + Created wallet 'Wallet_2_1617038539' + + Your new wallet has a backup phrase that can be used for recovery. + + Keeping this backup phrase safe is extremely important. + + Would you like to see it now? (Y/n): y + + Your backup phrase is printed below. + + Keep this information safe -- never share it with anyone! + + + + One or more non-printable characters were ommited from the subsequent line: + + [32mpowder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect[0m + + WALLET_2_PASSPHRASE: powder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + ################################################## + + Wallet: Wallet_1_1617038539 + + ID: f6c68ab0105dccf477e2dc3de44dda18 + + ################################################## + + Wallet: Wallet_2_1617038539 + + ID: d3768bb0e3c128910cc02e0bc2b357de + + spawn goal account new -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Please enter the password for wallet 'Wallet_2_1617038539': + + Created new account with address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI + + Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI + spawn goal account list -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + [offline] Unnamed-1 OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI 0 microAlgos + + Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI, transaction ID: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ. Fee set to 1000 + + Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ still pending as of round 4 + + Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ committed in round 6 + + TRANSACTION_ID 1: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ + spawn goal account balance -a UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + 1000000000 microAlgos + + Account Balance: 1000000000 + Account balance OK: 1000000000 + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI, transaction ID: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ. Fee set to 1000 + + Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ still pending as of round 6 + + Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ committed in round 8 + + TRANSACTION_ID 2: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ + spawn goal account balance -a OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + 1000000000 microAlgos + + Account Balance: 1000000000 + Account balance OK: 1000000000 + setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work + + writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal' + reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal + #pragma version 2 + int 1 + + spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal + + /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ. Fee set to 1000 + + Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ still pending as of round 8 + + Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ committed in round 10 + + TRANSACTION_ID_APP: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + 1000000000 microAlgos + + Account Balance: 1000000000 + Account balance OK: 1000000000 + calling app create + calling goal app create + spawn goal app create --creator IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + + Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A) + + Issued transaction from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, txid FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ (fee 1000) + + Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ still pending as of round 11 + + Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ committed in round 13 + + Created app with app index 4 + + App ID 4 + spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i unsginedtransaction1.tx -o sginedtransaction1.tx + + Please enter the password for wallet 'Wallet_1_1617038539': + + form combined transaction + create group transaction + spawn goal clerk group -i combinedtransactions.tx -o groupedtransactions.tx + + split transaction + spawn goal clerk split -i groupedtransactions.tx -o split.tx + + Wrote transaction 0 to split-0.tx + + Wrote transaction 1 to split-1.tx + + sign the split transaction + spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i split-0.tx -o signout-0.tx + + Aborting with Error: Timed out signing transaction + GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + GLOBAL_NETWORK_NAME test_net_expect_1617038539 + Stopping network: test_net_expect_1617038539 + spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + stderr: +=== RUN TestGoalWithExpect/goalAccountTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAccountTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealAppReadTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/statefulTealAppReadTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealCreateAppTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/statefulTealCreateAppTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/basicExpectTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/basicExpectTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/basicGoalTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/basicGoalTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAssetTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAssetTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalClerkGroupTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalClerkGroupTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeConnectionTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalNodeConnectionTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/multisigCreationDeletionTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/multisigCreationDeletionTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/pingpongTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/pingpongTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAccountInfoTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalAccountInfoTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalDryrunRestTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalDryrunRestTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/limitOrderTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/limitOrderTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/reportTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/reportTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/tealConsensusTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealConsensusTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalCmdFlagsTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/goalCmdFlagsTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/testInfraTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/testInfraTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +--- FAIL: TestGoalWithExpect (1991.52s) + --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (131.36s) + --- PASS: TestGoalWithExpect/createWalletTest.exp (103.91s) + --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (63.10s) + --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.25s) + --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.24s) + --- PASS: TestGoalWithExpect/goalNodeTest.exp (16.24s) + --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (23.72s) + --- PASS: TestGoalWithExpect/corsTest.exp (8.28s) + --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (90.79s) + --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (74.16s) + --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (19.26s) + --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (9.42s) + --- PASS: TestGoalWithExpect/ledgerTest.exp (7.30s) + --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (83.90s) + --- PASS: TestGoalWithExpect/goalAccountTest.exp (91.38s) + --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (100.50s) + --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (276.31s) + --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.03s) + --- PASS: TestGoalWithExpect/basicGoalTest.exp (52.36s) + --- PASS: TestGoalWithExpect/goalAssetTest.exp (24.90s) + --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (18.57s) + --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (7.81s) + --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (11.85s) + --- PASS: TestGoalWithExpect/pingpongTest.exp (516.38s) + --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (111.10s) + --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (25.73s) + --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.48s) + --- PASS: TestGoalWithExpect/reportTest.exp (5.93s) + --- PASS: TestGoalWithExpect/tealConsensusTest.exp (5.96s) + --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.76s) + --- PASS: TestGoalWithExpect/testInfraTest.exp (2.52s) +FAIL +FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1991.565s + diff --git a/debug/logfilter/example3.out.expected b/debug/logfilter/example3.out.expected new file mode 100644 index 0000000000..c2f2f58188 --- /dev/null +++ b/debug/logfilter/example3.out.expected @@ -0,0 +1,206 @@ + +--- FAIL: TestGoalWithExpect (1991.52s) + --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (131.36s) + --- PASS: TestGoalWithExpect/createWalletTest.exp (103.91s) + --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (63.10s) + --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.25s) + --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.24s) + --- PASS: TestGoalWithExpect/goalNodeTest.exp (16.24s) + --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (23.72s) + --- PASS: TestGoalWithExpect/corsTest.exp (8.28s) + --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (90.79s) + --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (74.16s) + --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (19.26s) + --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (9.42s) + --- PASS: TestGoalWithExpect/ledgerTest.exp (7.30s) + + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1 + stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod + TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + network create test_net_expect_1617038539 + spawn goal network create --network test_net_expect_1617038539 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.rootkey + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.rootkey + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey + future 100000 + Network test_net_expect_1617038539 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + network start test_net_expect_1617038539 + spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + network status test_net_expect_1617038539 + spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + [Primary] + Last committed block: 0 + Time since last block: 0.0s + Sync Time: 0.0s + Last consensus protocol: future + Next consensus protocol: future + Round for next consensus protocol: 1 + Next consensus protocol supported: true + + [Node] + Last committed block: 0 + Time since last block: 0.0s + Sync Time: 0.2s + Last consensus protocol: future + Next consensus protocol: future + Round for next consensus protocol: 1 + Next consensus protocol supported: true + + StartNetwork complete + Primary node address is: 127.0.0.1:34369 + Primary Node Address: 127.0.0.1:34369 + spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + [online] IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A 5000000000000000 microAlgos + Account Address: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A Balance: 5000000000000000 + spawn goal account balance -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + 5000000000000000 microAlgos + Wallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Balance: 5000000000000000 + Primary Account Balance: 5000000000000000 + spawn goal account rewards -w unencrypted-default-wallet -a IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + 0 microAlgosWallet: unencrypted-default-wallet, Account: IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, Rewards: 0 + Primary Account Rewards: 0 + spawn goal wallet new Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Please choose a password for wallet 'Wallet_1_1617038539': + Please confirm the password: + Creating wallet... + Created wallet 'Wallet_1_1617038539' + Your new wallet has a backup phrase that can be used for recovery. + Keeping this backup phrase safe is extremely important. + Would you like to see it now? (Y/n): y + Your backup phrase is printed below. + Keep this information safe -- never share it with anyone! + + One or more non-printable characters were ommited from the subsequent line: + [32mtest faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue[0m + WALLET_1_PASSPHRASE: test faculty trash brick need involve stand run jelly genius clap business zero ticket head grief rib fox ladder soap injury thrive balance able tissue + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + ################################################## + Wallet: Wallet_1_1617038539 + ID: f6c68ab0105dccf477e2dc3de44dda18 + ################################################## + Wallet: unencrypted-default-wallet + ID: 2bc05b49cc7176c389a384a28d622f90 + spawn goal account new -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Please enter the password for wallet 'Wallet_1_1617038539': + Created new account with address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI + Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI + spawn goal account list -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + [offline] Unnamed-0 UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI 0 microAlgos *Default + Account Address: UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI + spawn goal wallet new Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Please choose a password for wallet 'Wallet_2_1617038539': + Please confirm the password: + Creating wallet... + Created wallet 'Wallet_2_1617038539' + Your new wallet has a backup phrase that can be used for recovery. + Keeping this backup phrase safe is extremely important. + Would you like to see it now? (Y/n): y + Your backup phrase is printed below. + Keep this information safe -- never share it with anyone! + + One or more non-printable characters were ommited from the subsequent line: + [32mpowder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect[0m + WALLET_2_PASSPHRASE: powder sing write danger match cabin order oblige shrug slide tragic select true rule gym celery wool vendor salon goat summer rule dove able collect + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + ################################################## + Wallet: Wallet_1_1617038539 + ID: f6c68ab0105dccf477e2dc3de44dda18 + ################################################## + Wallet: Wallet_2_1617038539 + ID: d3768bb0e3c128910cc02e0bc2b357de + spawn goal account new -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Please enter the password for wallet 'Wallet_2_1617038539': + Created new account with address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI + Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI + spawn goal account list -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + [offline] Unnamed-1 OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI 0 microAlgos + Account Address: OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI, transaction ID: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ. Fee set to 1000 + Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ still pending as of round 4 + Transaction YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ committed in round 6 + TRANSACTION_ID 1: YXFXNI3MOJEUHPAC2SLZTHLA5ENKBHNHH6C65J5734SK4RC5NVMQ + spawn goal account balance -a UFEBEPVGFZKIFJFM5DJ5S462YKQGXPKOC5BVM3BW5JCWVHQS64COI4W7GI -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + 1000000000 microAlgos + Account Balance: 1000000000 + Account balance OK: 1000000000 + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI, transaction ID: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ. Fee set to 1000 + Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ still pending as of round 6 + Transaction JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ committed in round 8 + TRANSACTION_ID 2: JC2HLHKP37M6O5675MWIRNA6PSTL34ALHTTAXDJQZY2GTKKXBEJQ + spawn goal account balance -a OMG5A7QINNBSJSCJQMQ5ARZ2H44Z5KNNDT7FQ2JMCZ45HSEOYVTFTQNHFI -w Wallet_2_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + 1000000000 microAlgos + Account Balance: 1000000000 + Account balance OK: 1000000000 + setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work + + writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal' + reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal + #pragma version 2 + int 1 + + spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal + /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Sent 1000000000 MicroAlgos from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ. Fee set to 1000 + Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ still pending as of round 8 + Transaction WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ committed in round 10 + TRANSACTION_ID_APP: WP3XSFI2IEK23AOQ5IVCIJ4DYE7JGUFALYWR3YBRVYVME5F6UJUQ, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617038539 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + 1000000000 microAlgos + Account Balance: 1000000000 + Account balance OK: 1000000000 + calling app create + calling goal app create + spawn goal app create --creator IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ + Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A) + Issued transaction from account IX34TQYLCJI2AZ4GCXL2Q3DWIWGQXEYN77ILPH4B2FCVTNKNTXGUO6YK3A, txid FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ (fee 1000) + Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ still pending as of round 11 + Transaction FRAKID57TZRVVQIM342JHZFMEJ7LTEZAJM6CCMCRDUUN5F564LJQ committed in round 13 + Created app with app index 4 + App ID 4 + spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i unsginedtransaction1.tx -o sginedtransaction1.tx + Please enter the password for wallet 'Wallet_1_1617038539': + form combined transaction + create group transaction + spawn goal clerk group -i combinedtransactions.tx -o groupedtransactions.tx + split transaction + spawn goal clerk split -i groupedtransactions.tx -o split.tx + Wrote transaction 0 to split-0.tx + Wrote transaction 1 to split-1.tx + sign the split transaction + spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617038539 -i split-0.tx -o signout-0.tx + Aborting with Error: Timed out signing transaction + GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + GLOBAL_NETWORK_NAME test_net_expect_1617038539 + Stopping network: test_net_expect_1617038539 + spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/103916-1617037129265/tealAndStatefulTealTest/algod/root + + stderr: + --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (83.90s) + --- PASS: TestGoalWithExpect/goalAccountTest.exp (91.38s) + --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (100.50s) + --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (276.31s) + --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.03s) + --- PASS: TestGoalWithExpect/basicGoalTest.exp (52.36s) + --- PASS: TestGoalWithExpect/goalAssetTest.exp (24.90s) + --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (18.57s) + --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (7.81s) + --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (11.85s) + --- PASS: TestGoalWithExpect/pingpongTest.exp (516.38s) + --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (111.10s) + --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (25.73s) + --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.48s) + --- PASS: TestGoalWithExpect/reportTest.exp (5.93s) + --- PASS: TestGoalWithExpect/tealConsensusTest.exp (5.96s) + --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.76s) + --- PASS: TestGoalWithExpect/testInfraTest.exp (2.52s) +FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1991.565s diff --git a/debug/logfilter/example4.in b/debug/logfilter/example4.in new file mode 100644 index 0000000000..fb04351de7 --- /dev/null +++ b/debug/logfilter/example4.in @@ -0,0 +1,21 @@ +=== RUN TestAlgodLogsToFile +=== PAUSE TestAlgodLogsToFile +=== CONT TestAlgodLogsToFile +Created new rootkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet2.rootkey +Created new rootkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet1.rootkey +Created new partkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet1.0.3000000.partkey +Created new partkey: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(3523) : No REST API Token found. Generated token: 48ccf1649eeda3dcac44b414e134f5fad0cd12fbd01097386a44e6569cfe1404 +algod(3523) : No Admin REST API Token found. Generated token: 1427e9e6cb62beeabb825265ccf2d07d57ad4ca904275f83cb5fc28a539589cb +algod(3523) : Logging to: /home/ubuntu/go/src/github.com/algorand/go-algorand/tmp/out/e2e/103921-1617052919657/TestAlgodLogsToFile/Primary/node.log +algod(3523) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3523) : Initializing the Algorand node... +algod(3523) : Success! +algod(3523) : ⇨ http server started on 127.0.0.1:8080 +algod(3523) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(3523) : Exiting on terminated +--- PASS: TestAlgodLogsToFile (6.64s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 6.744s + diff --git a/debug/logfilter/example4.out.expected b/debug/logfilter/example4.out.expected new file mode 100644 index 0000000000..b859601348 --- /dev/null +++ b/debug/logfilter/example4.out.expected @@ -0,0 +1,2 @@ +--- PASS: TestAlgodLogsToFile (6.64s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 6.744s diff --git a/debug/logfilter/example5.in b/debug/logfilter/example5.in new file mode 100644 index 0000000000..599b6e0ba6 --- /dev/null +++ b/debug/logfilter/example5.in @@ -0,0 +1,128 @@ +=== RUN TestGoalWithExpect +=== RUN TestGoalWithExpect/basicExpectTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/basicExpectTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/corsTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/corsTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalClerkGroupTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalClerkGroupTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/listExpiredParticipationKeyTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/listExpiredParticipationKeyTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/testInfraTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/testInfraTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/doubleSpendingTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/doubleSpendingTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeStatusTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeStatusTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeSystemdTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeSystemdTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAssetTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAssetTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalFormattingTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalFormattingTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeConnectionTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeConnectionTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealCreateAppTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/statefulTealCreateAppTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalNodeTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/reportTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/reportTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealAppInfoTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/statefulTealAppInfoTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalDryrunRestTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalDryrunRestTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalTxValidityTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalTxValidityTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/limitOrderTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/limitOrderTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/basicGoalTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/basicGoalTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/catchpointCatchupTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/catchpointCatchupTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/createWalletTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/createWalletTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAccountInfoTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAccountInfoTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAppAccountAddressTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAppAccountAddressTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/pingpongTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/pingpongTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/tealConsensusTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/tealConsensusTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAccountTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalAccountTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealAppReadTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/statefulTealAppReadTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/tealAndStatefulTealTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/tealAndStatefulTealTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalCmdFlagsTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/goalCmdFlagsTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/ledgerTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/ledgerTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/multisigCreationDeletionTest.exp + expectFixture.go:120: algoDir: /var/folders/2s/vtjjqkk14j12_mld26p260mh0000gn/T/tmp616213338/expect/multisigCreationDeletionTest/algod + testDataDir:/Users/tsachi/go/src/github.com/algorand/go-algorand/test/testdata +--- PASS: TestGoalWithExpect (1412.29s) + --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.05s) + --- PASS: TestGoalWithExpect/corsTest.exp (10.75s) + --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (11.14s) + --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (73.87s) + --- PASS: TestGoalWithExpect/testInfraTest.exp (2.15s) + --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (48.80s) + --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (21.91s) + --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.09s) + --- PASS: TestGoalWithExpect/goalAssetTest.exp (19.00s) + --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.51s) + --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (9.62s) + --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (179.12s) + --- PASS: TestGoalWithExpect/goalNodeTest.exp (14.21s) + --- PASS: TestGoalWithExpect/reportTest.exp (10.75s) + --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (20.44s) + --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (16.22s) + --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (11.10s) + --- PASS: TestGoalWithExpect/limitOrderTest.exp (87.35s) + --- PASS: TestGoalWithExpect/basicGoalTest.exp (30.76s) + --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (88.84s) + --- PASS: TestGoalWithExpect/createWalletTest.exp (87.54s) + --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (91.66s) + --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (74.24s) + --- PASS: TestGoalWithExpect/pingpongTest.exp (356.50s) + --- PASS: TestGoalWithExpect/tealConsensusTest.exp (11.29s) + --- PASS: TestGoalWithExpect/goalAccountTest.exp (23.35s) + --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (34.37s) + --- PASS: TestGoalWithExpect/tealAndStatefulTealTest.exp (52.53s) + --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.38s) + --- PASS: TestGoalWithExpect/ledgerTest.exp (10.54s) + --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (13.22s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1412.538s diff --git a/debug/logfilter/example5.out.expected b/debug/logfilter/example5.out.expected new file mode 100644 index 0000000000..046e2dd2e1 --- /dev/null +++ b/debug/logfilter/example5.out.expected @@ -0,0 +1,33 @@ +--- PASS: TestGoalWithExpect (1412.29s) + --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.05s) + --- PASS: TestGoalWithExpect/corsTest.exp (10.75s) + --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (11.14s) + --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (73.87s) + --- PASS: TestGoalWithExpect/testInfraTest.exp (2.15s) + --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (48.80s) + --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (21.91s) + --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.09s) + --- PASS: TestGoalWithExpect/goalAssetTest.exp (19.00s) + --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.51s) + --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (9.62s) + --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (179.12s) + --- PASS: TestGoalWithExpect/goalNodeTest.exp (14.21s) + --- PASS: TestGoalWithExpect/reportTest.exp (10.75s) + --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (20.44s) + --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (16.22s) + --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (11.10s) + --- PASS: TestGoalWithExpect/limitOrderTest.exp (87.35s) + --- PASS: TestGoalWithExpect/basicGoalTest.exp (30.76s) + --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (88.84s) + --- PASS: TestGoalWithExpect/createWalletTest.exp (87.54s) + --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (91.66s) + --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (74.24s) + --- PASS: TestGoalWithExpect/pingpongTest.exp (356.50s) + --- PASS: TestGoalWithExpect/tealConsensusTest.exp (11.29s) + --- PASS: TestGoalWithExpect/goalAccountTest.exp (23.35s) + --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (34.37s) + --- PASS: TestGoalWithExpect/tealAndStatefulTealTest.exp (52.53s) + --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.38s) + --- PASS: TestGoalWithExpect/ledgerTest.exp (10.54s) + --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (13.22s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1412.538s diff --git a/debug/logfilter/example6.in b/debug/logfilter/example6.in new file mode 100644 index 0000000000..ab22b4c545 --- /dev/null +++ b/debug/logfilter/example6.in @@ -0,0 +1,2619 @@ +=== RUN TestNodeControllerCleanup +=== PAUSE TestNodeControllerCleanup +=== RUN TestAlgodLogsToFile +=== PAUSE TestAlgodLogsToFile +=== CONT TestAlgodLogsToFile +=== CONT TestNodeControllerCleanup +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Offline.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Rich.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Online.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Partkey.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Online.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Partkey.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Rich.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(24836) : No REST API Token found. Generated token: 51a8e4dededbe29ed75d6c150b5476386046f063de4da4339799e045a8ab59a4 +algod(24836) : No Admin REST API Token found. Generated token: 5747d77952ad65ab4b88fb4e3f4a804d69753fb96c3eaf09c55ad17ab58cef39 +algod(24836) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Primary/node.log +algod(24841) : No REST API Token found. Generated token: 7d99abec3e49d3d0daea14a6d107bbc17de8c60f4784c4abf9910bca248ee1c3 +algod(24841) : No Admin REST API Token found. Generated token: e73fc1607e8c0bc14b5559ef61ac551c3facdd1cd4bf7df0566ffcdfe5158873 +algod(24841) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAlgodLogsToFile/Primary/node.log +algod(24836) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(24836) : Initializing the Algorand node... +algod(24841) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(24841) : Initializing the Algorand node... +algod(24841) : Success! +algod(24836) : Success! +algod(24836) : ⇨ http server started on 127.0.0.1:8080 +algod(24841) : ⇨ http server started on 127.0.0.1:46195 +algod(24841) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46195. Press Ctrl-C to exit +algod(24836) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(24867) : No REST API Token found. Generated token: 385d64cae2bd97c5635171413b0353dab92c13d810979de5ad5e7b3e7323efcb +algod(24867) : No Admin REST API Token found. Generated token: c28e99b75ebad7597b9568ae940c8e5de27f06a65d6b9f5c91c8b40d0bd29cbf +algod(24867) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestNodeControllerCleanup/Node/node.log +algod(24867) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(24867) : Initializing the Algorand node... +algod(24867) : Success! +algod(24867) : ⇨ http server started on 127.0.0.1:45295 +algod(24867) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45295. Press Ctrl-C to exit +algod(24841) : Exiting on terminated +--- PASS: TestAlgodLogsToFile (1.25s) +algod(24867) : Exiting on terminated +algod(24836) : Exiting on terminated +--- PASS: TestNodeControllerCleanup (5.44s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 5.507s +=== RUN TestAlgodWithExpect +=== RUN TestAlgodWithExpect/algodTelemetryLocationTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/algodTelemetryLocationTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +--- PASS: TestAlgodWithExpect (0.07s) + --- PASS: TestAlgodWithExpect/algodTelemetryLocationTest.exp (0.07s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/algod/expect 0.150s +=== RUN TestAlgohWithExpect +=== RUN TestAlgohWithExpect/algohTimeoutTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/algohTimeoutTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +--- PASS: TestAlgohWithExpect (213.98s) + --- PASS: TestAlgohWithExpect/algohTimeoutTest.exp (213.98s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/algoh/expect 214.091s +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Wallet1.0.3000000.partkey +algod(25012) : No REST API Token found. Generated token: d707b0702703c34a98760aed697b86af287935b85617880b358980aa41787399 +algod(25012) : No Admin REST API Token found. Generated token: 619b762ae4744299fd76b696b3d2980328b792035ac6be23e7a5977775712e33 +algod(25012) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Primary/node.log +algod(25012) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(25012) : Initializing the Algorand node... +algod(25012) : Success! +algod(25012) : ⇨ http server started on 127.0.0.1:8080 +algod(25012) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(25019) : No REST API Token found. Generated token: 0ed2f171733f8c77c8e70171a1db302871f3c831081ec593a2c0ff9a5197069d +algod(25019) : No Admin REST API Token found. Generated token: 43b07c23bae76ae7fbf5bbfb0c7785654fa68fc83910af3e3e32ff6ff13f86d1 +algod(25019) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/GoalTests/Node/node.log +algod(25019) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(25019) : Initializing the Algorand node... +algod(25019) : Success! +algod(25019) : ⇨ http server started on 127.0.0.1:44729 +algod(25019) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44729. Press Ctrl-C to exit +=== RUN TestAccountNew +--- PASS: TestAccountNew (1.75s) +=== RUN TestAccountNewDuplicateFails +--- PASS: TestAccountNewDuplicateFails (0.30s) +=== RUN TestAccountRename +--- PASS: TestAccountRename (0.85s) +=== RUN TestAccountMultipleImportRootKey +--- PASS: TestAccountMultipleImportRootKey (0.49s) +=== RUN TestClerkSendNoteEncoding +--- PASS: TestClerkSendNoteEncoding (18.65s) +=== RUN TestGoalNodeCleanup +algod(25012) : Exiting on terminated +--- PASS: TestGoalNodeCleanup (0.44s) +PASS +algod(25019) : Exiting on terminated +ok github.com/algorand/go-algorand/test/e2e-go/cli/goal 26.354s +=== RUN TestGoalWithExpect +=== RUN TestGoalWithExpect/basicGoalTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/basicGoalTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAppAccountAddressTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAppAccountAddressTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeConnectionTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeConnectionTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeSystemdTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeSystemdTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/multisigCreationDeletionTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/multisigCreationDeletionTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/reportTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/reportTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealAppInfoTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/statefulTealAppInfoTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/basicExpectTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/basicExpectTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/listExpiredParticipationKeyTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/listExpiredParticipationKeyTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/doubleSpendingTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/doubleSpendingTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAccountTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAccountTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalCmdFlagsTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalCmdFlagsTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalDryrunRestTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalDryrunRestTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalFormattingTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalFormattingTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/limitOrderTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/limitOrderTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/tealConsensusTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealConsensusTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAssetTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAssetTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/ledgerTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/ledgerTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/tealAndStatefulTealTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1 + stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod + TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + network create test_net_expect_1617230151 + spawn goal network create --network test_net_expect_1617230151 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.rootkey + + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.rootkey + + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey + + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey + + future 100000 + + Network test_net_expect_1617230151 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + network start test_net_expect_1617230151 + spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + network status test_net_expect_1617230151 + spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + + + [Primary] + + Last committed block: 0 + + Time since last block: 0.0s + + Sync Time: 0.0s + + Last consensus protocol: future + + Next consensus protocol: future + + Round for next consensus protocol: 1 + + Next consensus protocol supported: true + + + + [Node] + + Last committed block: 0 + + Time since last block: 0.0s + + Sync Time: 0.6s + + Last consensus protocol: future + + Next consensus protocol: future + + Round for next consensus protocol: 1 + + Next consensus protocol supported: true + + + + StartNetwork complete + Primary node address is: 127.0.0.1:43613 + Primary Node Address: 127.0.0.1:43613 + spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + [online] W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE 5000000000000000 microAlgos + + Account Address: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE Balance: 5000000000000000 + spawn goal account balance -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + 5000000000000000 microAlgos + + Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Balance: 5000000000000000 + Primary Account Balance: 5000000000000000 + spawn goal account rewards -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + 0 microAlgos + + Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Rewards: 0 + Primary Account Rewards: 0 + spawn goal wallet new Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Please choose a password for wallet 'Wallet_1_1617230151': + + Please confirm the password: + + Creating wallet... + + Created wallet 'Wallet_1_1617230151' + + Your new wallet has a backup phrase that can be used for recovery. + + Keeping this backup phrase safe is extremely important. + + Would you like to see it now? (Y/n): y + + Your backup phrase is printed below. + + Keep this information safe -- never share it with anyone! + + + + One or more non-printable characters were ommited from the subsequent line: + + [32mattract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret[0mWALLET_1_PASSPHRASE: attract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + ################################################## + + Wallet: Wallet_1_1617230151 + + ID: 12dd4a15929ae17827788883ca77479d + + ################################################## + + Wallet: unencrypted-default-wallet + + ID: ec9a33b376e4635705e1339deb6e799b + + spawn goal account new -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Please enter the password for wallet 'Wallet_1_1617230151': + + Created new account with address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ + + Account Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ + spawn goal account list -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + [offline] Unnamed-0 GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ 0 microAlgosAccount Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ + spawn goal wallet new Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Please choose a password for wallet 'Wallet_2_1617230151': + + Please confirm the password: + + Creating wallet... + + Created wallet 'Wallet_2_1617230151' + + Your new wallet has a backup phrase that can be used for recovery. + + Keeping this backup phrase safe is extremely important. + + Would you like to see it now? (Y/n): y + + Your backup phrase is printed below. + + Keep this information safe -- never share it with anyone! + + + + One or more non-printable characters were ommited from the subsequent line: + + [32mcasual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump[0m + + WALLET_2_PASSPHRASE: casual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + ################################################## + + Wallet: Wallet_1_1617230151 + + ID: 12dd4a15929ae17827788883ca77479d + + ################################################## + + Wallet: Wallet_2_1617230151 + + ID: 2edbca9e4d78d43556f46cc991415da5 + + spawn goal account new -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Please enter the password for wallet 'Wallet_2_1617230151': 12345678 + + + + Created new account with address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UAAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA + spawn goal account list -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + [offline] Unnamed-1 F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA 0 microAlgosAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ, transaction ID: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA. Fee set to 1000 + + Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA still pending as of round 8 + + Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA committed in round 10 + + TRANSACTION_ID 1: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA + spawn goal account balance -a GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + 1000000000 microAlgos + + Account Balance: 1000000000 + Account balance OK: 1000000000 + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA, transaction ID: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ. Fee set to 1000 + + Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ still pending as of round 11 + + Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ committed in round 13 + + TRANSACTION_ID 2: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ + spawn goal account balance -a F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + 1000000000 microAlgos + + Account Balance: 1000000000 + Account balance OK: 1000000000 + setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work + + writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal' + reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal + #pragma version 2 + int 1 + + spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal + + /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA. Fee set to 1000 + + Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA still pending as of round 14 + + Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA committed in round 16 + + TRANSACTION_ID_APP: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + 1000000000 microAlgos + + Account Balance: 1000000000 + Account balance OK: 1000000000 + calling app create + calling goal app create + spawn goal app create --creator W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + + Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A) + + Issued transaction from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, txid JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA (fee 1000) + + Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA still pending as of round 19 + + Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA committed in round 21 + + Created app with app index 4App ID 4 + spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617230151 -i unsginedtransaction1.tx -o sginedtransaction1.tx + + Aborting with Error: Timed out signing transaction + GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + GLOBAL_NETWORK_NAME test_net_expect_1617230151 + Stopping network: test_net_expect_1617230151 + spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + + + stderr: +=== RUN TestGoalWithExpect/testInfraTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/testInfraTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/createWalletTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/createWalletTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalTxValidityTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalTxValidityTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/pingpongTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + expectFixture.go:157: err running 'pingpongTest.exp': exit status 1 + stdout: starting pinpongTest + TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod + TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + network create test_net_expect_1617230521 + spawn goal network create --network test_net_expect_1617230521 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.rootkey + + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.rootkey + + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.0.3000000.partkey + + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.0.3000000.partkey + + future 100000 + + Network test_net_expect_1617230521 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + network start test_net_expect_1617230521 + spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + network status test_net_expect_1617230521 + spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + + + [Primary] + + Last committed block: 0 + + Time since last block: 0.0s + + Sync Time: 0.0s + + Last consensus protocol: future + + Next consensus protocol: future + + Round for next consensus protocol: 1 + + Next consensus protocol supported: true + + + + [Node] + + Last committed block: 0 + + Time since last block: 0.0s + + Sync Time: 0.7s + + Last consensus protocol: future + + Next consensus protocol: future + + Round for next consensus protocol: 1 + + Next consensus protocol supported: true + + + + StartNetwork complete + Primary node address is: 127.0.0.1:37299 + Primary Node Address: 127.0.0.1:37299 + spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/ + + [online] 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 5000000000000000 microAlgos + + Account Address: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U Balance: 5000000000000000 + spawn goal account balance -w unencrypted-default-wallet -a 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/ + + 5000000000000000 microAlgos + + Wallet: unencrypted-default-wallet, Account: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U, Balance: 5000000000000000 + Primary Account Balance: 5000000000000000 + node status waiting for Round 1 + spawn node status + node status check complete, current round is 0 + Current Round: '0' is less than wait for round: '1' + sleep time 0 + spawn node status + node status check complete, current round is 0 + Current Round: '0' is less than wait for round: '1' + sleep time 1 + spawn node status + Aborting with Error: goal node status timed out + GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + GLOBAL_NETWORK_NAME test_net_expect_1617230521 + Stopping network: test_net_expect_1617230521 + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + stderr: +=== RUN TestGoalWithExpect/catchpointCatchupTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/catchpointCatchupTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalClerkGroupTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalClerkGroupTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalNodeStatusTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalNodeStatusTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealAppReadTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/statefulTealAppReadTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/corsTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/corsTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/goalAccountInfoTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/goalAccountInfoTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestGoalWithExpect/statefulTealCreateAppTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/statefulTealCreateAppTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +--- FAIL: TestGoalWithExpect (1538.34s) + --- PASS: TestGoalWithExpect/basicGoalTest.exp (37.79s) + --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (75.84s) + --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (4.91s) + --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.19s) + --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (8.93s) + --- PASS: TestGoalWithExpect/reportTest.exp (6.20s) + --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (21.86s) + --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.01s) + --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (67.63s) + --- PASS: TestGoalWithExpect/goalNodeTest.exp (15.93s) + --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (59.13s) + --- PASS: TestGoalWithExpect/goalAccountTest.exp (24.57s) + --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.98s) + --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (37.95s) + --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.16s) + --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.89s) + --- PASS: TestGoalWithExpect/tealConsensusTest.exp (12.31s) + --- PASS: TestGoalWithExpect/goalAssetTest.exp (41.96s) + --- PASS: TestGoalWithExpect/ledgerTest.exp (9.53s) + --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (115.77s) + --- PASS: TestGoalWithExpect/testInfraTest.exp (3.41s) + --- PASS: TestGoalWithExpect/createWalletTest.exp (243.76s) + --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (6.89s) + --- FAIL: TestGoalWithExpect/pingpongTest.exp (26.30s) + --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (207.24s) + --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (21.29s) + --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (18.58s) + --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (47.93s) + --- PASS: TestGoalWithExpect/corsTest.exp (9.63s) + --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (102.64s) + --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (201.07s) +FAIL +FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1538.381s +? github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy [no test files] +testing: warning: no tests to run +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/perf 0.131s [no tests to run] +? github.com/algorand/go-algorand/test/e2e-go/cli/tealdbg/cdtmock [no test files] +=== RUN TestTealdbgWithExpect +=== RUN TestTealdbgWithExpect/tealdbgSpinoffTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealdbgSpinoffTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +=== RUN TestTealdbgWithExpect/tealdbgTest.exp + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealdbgTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata +--- PASS: TestTealdbgWithExpect (3.29s) + --- PASS: TestTealdbgWithExpect/tealdbgSpinoffTest.exp (1.17s) + --- PASS: TestTealdbgWithExpect/tealdbgTest.exp (2.12s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/cli/tealdbg/expect 3.352s +=== RUN TestStartAndCancelAuctionNoBids + auctionCancel_test.go:31: +--- SKIP: TestStartAndCancelAuctionNoBids (0.00s) +=== RUN TestStartAndCancelAuctionOneUserTenBids +=== PAUSE TestStartAndCancelAuctionOneUserTenBids +=== RUN TestStartAndCancelAuctionEarlyOneUserTenBids +=== PAUSE TestStartAndCancelAuctionEarlyOneUserTenBids +=== RUN TestInvalidDeposit + auctionErrors_test.go:32: +--- SKIP: TestInvalidDeposit (0.00s) +=== RUN TestNoDepositAssociatedWithBid + auctionErrors_test.go:123: +--- SKIP: TestNoDepositAssociatedWithBid (0.00s) +=== RUN TestDeadbeatBid +=== PAUSE TestDeadbeatBid +=== RUN TestStartAndPartitionAuctionTenUsersTenBidsEach + auctionErrors_test.go:290: +--- SKIP: TestStartAndPartitionAuctionTenUsersTenBidsEach (0.00s) +=== RUN TestStartAndEndAuctionNoBids + basicAuction_test.go:43: +--- SKIP: TestStartAndEndAuctionNoBids (0.00s) +=== RUN TestStartAndEndAuctionOneUserOneBid + basicAuction_test.go:84: +--- SKIP: TestStartAndEndAuctionOneUserOneBid (0.00s) +=== RUN TestStartAndEndAuctionOneUserTenBids + basicAuction_test.go:153: +--- SKIP: TestStartAndEndAuctionOneUserTenBids (0.00s) +=== RUN TestStartAndEndAuctionTenUsersOneBidEach +=== PAUSE TestStartAndEndAuctionTenUsersOneBidEach +=== RUN TestStartAndEndAuctionTenUsersTenBidsEach +=== PAUSE TestStartAndEndAuctionTenUsersTenBidsEach +=== RUN TestDecayingPrice +=== PAUSE TestDecayingPrice +=== CONT TestDeadbeatBid +=== CONT TestStartAndEndAuctionTenUsersTenBidsEach +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(26744) : No REST API Token found. Generated token: 3a2ccc019ef899d0f74fc682edaae0fc21ebab457cc6d2124f6461b1fdbf4fce +algod(26744) : No Admin REST API Token found. Generated token: 1e83a756b672c783104a82c4eb2c069b1210b1ed17d9e957d60a20a1b2a82c8f +algod(26744) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Primary/node.log +algod(26750) : No REST API Token found. Generated token: 3f0ea30738cb6ec35d5c22675c966f8d1650dac5a7f0c5c6e17db3db8319708d +algod(26750) : No Admin REST API Token found. Generated token: 9c7928831784c226e9ca13227c002226b22198b589eb1161d1a8617f78390f11 +algod(26750) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Primary/node.log +algod(26750) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(26750) : Initializing the Algorand node... +algod(26744) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(26744) : Initializing the Algorand node... +algod(26744) : Success! +algod(26744) : ⇨ http server started on 127.0.0.1:44131 +algod(26744) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44131. Press Ctrl-C to exit +algod(26750) : Success! +algod(26750) : ⇨ http server started on 127.0.0.1:41477 +algod(26750) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41477. Press Ctrl-C to exit +algod(26758) : No REST API Token found. Generated token: 8439e808f8a9d54e492b0cdd6933db1b7b30a8ade373e7120659df2c4394af16 +algod(26758) : No Admin REST API Token found. Generated token: 0007bda5dbf7a5e3474357bc8b05895d1f082fd7653611dd068000437b8d1a32 +algod(26758) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDeadbeatBid/Node/node.log +algod(26763) : No REST API Token found. Generated token: 20ed80f6d65be56990c195ace475be18b45cace788c57e06e45871626fce9472 +algod(26763) : No Admin REST API Token found. Generated token: 0b2752e214fbefc5badd32ad0f31867c4c053410aad7b304d59496914b87c562 +algod(26763) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersTenBidsEach/Node/node.log +algod(26758) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(26758) : Initializing the Algorand node... +algod(26758) : Success! +algod(26758) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:32923. Press Ctrl-C to exit +algod(26758) : ⇨ http server started on 127.0.0.1:32923 +algod(26763) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(26763) : Initializing the Algorand node... +algod(26763) : Success! +algod(26763) : ⇨ http server started on 127.0.0.1:37463 +algod(26763) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37463. Press Ctrl-C to exit +=== CONT TestDeadbeatBid + auctionFixture.go:994: found a nonzero auctionID {AuctionKey:RHJM6E4PXICDIRFL6XZWMVSDEOOCXZHOMSCPW33EQNBTZOF3RD2DIJAC2U AuctionID:1} +time="2021-03-31T22:31:19.888841 +0000" level=error msg="[Stack] goroutine 207 [running]:\nruntime/debug.Stack(0xc000130930, 0xc0000a84f8, 0xc000124690)\n\t/home/travis/.gimme/versions/go1.14.7.linux.amd64/src/runtime/debug/stack.go:24 +0xab\ngithub.com/algorand/go-algorand/logging.logger.Error(0xc000130930, 0xc0000a84f8, 0xc000170ae0, 0x1, 0x1)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/logging/log.go:219 +0x56\ngithub.com/algorand/go-algorand/auction.(*RunningAuction).PlaceBid(0xc00045d2b0, 0x82d637ead87e5090, 0xcb2674832468e080, 0x1ba05b89ed224192, 0x78baeeefb7240cb2, 0x186a0, 0x88, 0x0, 0x3404ba8f13cfd289, 0x23435666f3f5ab44, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/logic.go:334 +0x149b\ngithub.com/algorand/go-algorand/auction.(*SerializedRunningAuction).PlaceBid(0xc000494930, 0x82d637ead87e5090, 0xcb2674832468e080, 0x1ba05b89ed224192, 0x78baeeefb7240cb2, 0x186a0, 0x88, 0x0, 0x3404ba8f13cfd289, 0x23435666f3f5ab44, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/serializedLogic.go:82 +0x125\ngithub.com/algorand/go-algorand/auction.(*Tracker).placeBid(0xc000089d10, 0x82d637ead87e5090, 0xcb2674832468e080, 0x1ba05b89ed224192, 0x78baeeefb7240cb2, 0x186a0, 0x88, 0x0, 0x3404ba8f13cfd289, 0x23435666f3f5ab44, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/tracker.go:224 +0x326\ngithub.com/algorand/go-algorand/auction.(*Tracker).ProcessMessage(0xc000089d10, 0xc0001c3630, 0x3, 0xc00002f540, 0x34, 0xc00002f580, 0x3a, 0x3e8, 0xf, 0x3f7, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/tracker.go:167 +0xc46\ngithub.com/algorand/go-algorand/auction.(*Tracker).LiveUpdateWithContext(0xc000089d10, 0x1a50320, 0xc000084540, 0xc00027da20, 0x16fea81, 0x4, 0x0, 0x0, 0x0, 0xc0003136a0, ...)\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/auction/tracker.go:311 +0x1ce\ncreated by github.com/algorand/go-algorand/test/framework/fixtures.(*AuctionFixture).GetAuctionTracker\n\t/home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/auctionFixture.go:415 +0x3d6\n" file=logic.go function="github.com/algorand/go-algorand/auction.(*RunningAuction).PlaceBid" line=334 +time="2021-03-31T22:31:19.893756 +0000" level=error msg="the amount of bid currency 100000 exceeds the deposited amount 10000, dropping message" file=logic.go function="github.com/algorand/go-algorand/auction.(*RunningAuction).PlaceBid" line=334 +time="2021-03-31T22:31:19.893986 +0000" level=warning msg="Placing bid failed, dropping message, err: the amount of bid currency 100000 exceeds the deposited amount 10000, dropping message" file=tracker.go function="github.com/algorand/go-algorand/auction.(*Tracker).ProcessMessage" line=168 +=== CONT TestStartAndEndAuctionTenUsersTenBidsEach + auctionFixture.go:994: found a nonzero auctionID {AuctionKey:3KGU5ICONSLT4V2WD4QY3CEKOFLN7DIJC7UKJAH2RHIPXWAZOYMXONMQUQ AuctionID:1} + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 15 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CGB7TUNMBWIPUNAOBKGKIK3O6TSUBCEV7SMML2WZW25MENCXFY2Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 15 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id NU2G3SKFNG6T7FDNG44JD6ARGU6OBJPGRMGNG5KBP4VGON24KBPA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 15 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id UD4Z443BP2EZ6ERC5U47YGMDQJD5ADNXOKKJ2UPDRM6OXOG5TJLQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LDTYDFSKGGFKKCQCVYQCMZW3GON2A3LRUK5G7NIK7N3R5HH7F5SQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id WYT4QUV7KXUCNFR3CE4AY7STC6FXMHVJUHHWLXUBKEYPNF3LNBXQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 65GCUE6VI5GDOZMUKE3QK2PV6XYKKAZAC6FQ64K66DBQSSL3N6BA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RXP4ZBP4JST7S6F2F2FKTMQ4TXUXRPUSP5BZNVZYLP4B6VXPQI4Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 7EWNH4D25CGJWU6QLL3F5ALEQ5Z4ZP4R4YPNRHKZRRARSEZJZVAA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id QU5ILT2U4LIXONDARZQ4J3EV2L5FHWWWNCVJVNMTRHJP4UW6AEOA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PWIKAVTNVUFDLAJKTF34BIRD5VZDJ5IQKAC7BKCTGFCQ4XGM7W7A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id AKCY4RQ2PTOVCMNSB4IJ6RTXEKQDLFAHBIA6JYGRXP7D3Z6H7YOA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id QWXPLTSGGSHKXMRX3SYLLAZUNOKUT5VYI7GG3SZ67UDH7IKTHFDA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LP673HDNDP2AJAXM4ASOMWK4I4X7CLNSUR6KW4OSWEUEGHMWKOHA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id YRR53AJYGRWKTA4UGIBZRQVWDJ2YHI7MTFOGXDLI2RAIWSGVGDPQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 5IMV4JLCADFHB2FLS7S4COZRS5NXZIGNRDR5A5S6PREWYGFCATTA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id OYD6IYRWNHGR7PUZILJMBD7AM7SKFL33IER5FV4IGTGSPG2CFK5Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id DK65RHFW3VA373QNGK3FS3ZFWZ7XS2B25P52Z3XOYD2AG6VE5ZUQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id UB52J7STY76ZO4NO2TW5FMZOEKUGLT3ZETS56RRPIEGBG75OVKXQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id X644QZAFMREINVUEIANHG26DYQZ7LEH3MTO4WGCFDLV57TBSZCAA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id HQJGQDDB5KB6HGJWTLTBB2ZAJIYKYM4A6Q3CPVONV3X7CD2WOP5Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id T3BFYRKQDLCVWXUC4H345R7W4CWPRDO7Q33J57XFXHI72I3YD3SQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id SUSPQOTYBWDD57O67PHYNJ7ZIKTZAMFMB35PF6LDJNOSOWCSQUFA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 4IG5JEPFUBMZOD6RHLMPQKDAYXWTAQP7GLSOV5UQPXR4VKEBHD6A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id V5LIOKNO6K345DZGJJTWT7LNWMDON5DFIGIFUNW5XZ5QFBMSZHTQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 366F6G54WMJFW6NX5JKUZLMQAHHY7KNKK5KV637WHONMCQI56MUQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ZVUIYLVP4UWXTHCKBRAYMZBKC46Y7DBTQGIRGEHKNIOI4YPCEKHQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CXCTZGQWZCCVLQ4NA4TBDJW2XSGN5CHH44E7XO2FZ4E7GL23C55A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id BY6FOP5LIFYDAPMFQDLXJT5O2IRUZTY5TEVUQI2UEXKYRWPEZVWA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id GBMV2ZE5N5EDBI2E773BB6CFRLJRTMAPYQGPEDMBYI4WEUKF54TA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PEW5FWPBA2ERKQSUIUY73V3C4GE7OB4ZKRW7F6AC5BWKE5FQ2STQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2ZHHYXL3ZLMQKIDJCGGRNIFFXN3O7D33EMFHAWOMVCULWKVNOIQA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LHSERBR7WD5BXI6RLT65CJ5FT3PXSNDM2RYX5NJYADZ6Q4YVGSIA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id GBSXFR32HVP7EYENSQKMTAKH6TONDRV5ELLPFC2PYX7ZK3I6DWLA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id YDVA45GPEPMBFNGBNDDBX5NFKV2QQK3ENBVDZ5EPRFPSGYOMB3TA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id TEWZ5HFEVJ65Z42CQTLIVISAHKKEF3A5VA7F5K2DKDVEYVTHBM6A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id OZJTB5H7DD7GAHCQLKNM3KZI6V75GVNXIY2UVFGMOVG24TZZEGBQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CYRNDRKS74LGAE444UVPBDDSA6V5CFNGWD775HHKIP2YWGTOYOVQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id U2MSW2Z3QJWAPSZHNO7ZNSKDHSQT45QDSATSHSLDANMD6XQVLZHQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id DJSAIJXZTHPALA6KL3W3S2NYDEIZ2462MZEOLO3KIH6ABJ6NII4Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id S52YCAX4LVMRIWTLDIYYENQDQ4FCVQDP36XK5MKWXFB65RB5FHPA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id DAO35U7DOCK4LO4GA5DUHRL6USDQLQ4UWW4Y7XFFJP4E2Q3AN26A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id HVKFQM5LZMKPKDOLA4QY3W6K3WAMWWL7IS6U2NTU6QOAB2XHIZTA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id L2FSE2EDMDYGN44RZS6JDZCN7ZI6JLJRYX36JN53FGXORLLLVM4A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id O7SSQIF4GGGR4R4TGKVBLASIT2LMDYJCGP7CJSKZMFOLHFN7F32A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id JULDLQLYYV2RBO6DXESQCZE3RQSTZUZLHCOUAQAX5E5QGINKP3RA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2G62PRT5UW2EMMQVJRXAGLWBKMXV4VOCLB4PJAQSZOX4NC4HG5AA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RJ3U3RUFFITOXIXZQH4SR6XTJOB5HMXFOIAJKXXLPU5RYUKBEHJA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id U44JE4RH5LATJNN7CK5Z3VPWUWBFW7LREDL2YGIRII34FWXIL2IQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id BC6MJPVNI6J2MOPM5VHMDBF6QR6BBH45DLGM5KL3WQG32XQU4BCA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 6D4HH2BEFTPCN3WKFSZCOWKHEGMD65M7ETBFGL376ICZWUER53AA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 4CHWXVGXGYSVSPTET342JVGZKGOQTIEJRVBKXYGHEYOVECMJN5IA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id J7AYF27HWS5OM7BWMX43JFYSFPX6FXKPQKJ5AMNR4ZBKZQVMLTOQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ICMNIXYDQPZKMXVA3N4ZVNYC2LL7W2UF36RGWJG4E2KUGJPTNSUQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id BBVLO3LRN4M5TVSKB3MNB2JNK2E5ZMGQB3E23NQNWTHTLTDAWL7A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CEM2RBZSBAFVYMLXUITY7VHCUUJBXLIJEBQ4LJORVA2DT3V5GIEQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id SXQU5J6X6U4IZSZYJSFUZQCBDEMNX6YKLIVSCSYILWLZZPQ4QFQA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id AKASPK4ZUCLDJU7PPONCB2OYUUPB7TYBMJBAF5DIHLSQ36QB6MXA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2AGH7ADWOCAMAIB7MDT5KDDS7JA53NYRI6IMBSCORD4WZ5KDXZPQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id WBDMLAVORJTSVDHGZTDTXNI3GQ2X4GHWQGO3KT4AKZE3OYQF2ESQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id WFV3XUJG337PTWV7XNM3PUPVQOQXVHYAOZY4RXU4PIVLQLG7RXBA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 37TZLJPRSADL6K4V3DLDESUA4MVWAZ7U3XRLDDUUDPPE2GFIFDLA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id JLT7IJAL4SQFDZEZUHFVK3MSLV7RNH2Y3V4FHW3JI2IW5I6SRRUQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id LAKSTB4LZVCJYC7S2YEU2VGCF4BBURPWAYITWQCL3WZ5SXBWZIVQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 7HDMRVY7O3EVIIQ3K6NFFRZXX5PZJSBCS6ESVNEGNUY3PNANLWZA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id GZVNBDVAWHH525CZZCMUW5JJZIGM7BCDXSUEOZOFI2OXSP5LCZPA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id HGPBR35AZHRS7WWXTYU264ULMGYBLMPXX6LHTO2G5425U5BLJZ7A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id Z3HDAIJGLK2QYTO43W2MFOG62CITZKF7AZVXDCCGBYEKC3ZE4EKA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 63TLUX2UAO6Q27B7D4FEDVQUNCIP4AQMOZEOQXL6A7KAID3YSDYQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ONY3H754A7V7G7MUETMXKCM3SXLIAPAF5RMBGMXUSJ2FNLGMSJZA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id E5QWBRBP3DEVNIA4TTFM3BX6ZJOHD2R4SB7HJUBXHLYRNNVBAZ6Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 5PW3OPK3WXEIEBFJYFQB4VSYODY2ZDBYFPY4MU7RHI3BLZHXBCQQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id XGHVKYBOOHYYR3O5LQ5HZYNJMUWCY3HILNWWYBJ4TOQ4DDYRIJAA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 3F76YEBNYVSVXUMOVL5KG64Q7I3FX4L4VWFFTYW4ZHGE4W5YNSKA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 4J32COCVUKGCTFHRXHI2JCXZNQL5L7UJN6V2AIXWX4DHCLLMMWLQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RRPW3LVIYVGCN6NOVC57F3SSEP2YO2HDY6A7JMRCVRXN5FRESL5Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PXGETO7VS6LITF4BUX6I2VZTACV54VNNU7HVVTX7ZXFVNBUYW63A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RQ2YKODV3MSMZIE7EFDVWSH72JTDLJFV72P24DIVJ6LZWXET7NMQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id IHYQ4LHYT2UTJF6QHRTMEST2EEQPDLI533CJEA6T2MUQ2OTPHIYA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id SOAUDXGXXPMD4XDM56MDOZHWKEWPROP45BONZ5IXGJ42OHT5T57A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 62XBOLGXSBITGTRWJ2LSG6EIIQUV544FWNR52RV5UIHLAORAYC7A + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PLSJK2C2FUTFLKQQSO3G3QT3Y4VDDQZK67ZIC4NVKSZKHO3ZCXEA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id H2AB2X4VWXV2AO6VL4LYHPJNCT77LS3VRZV6LTYOZYQY3HU5P7ZA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 54C36BZBPGD5H2MQ6MWZPZJSGEJ2EJWXIQ4MLGZ3MKBJN2MT52XQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 7PUP2AAPT2G644Y6VEI4Q5OGTGJANH7PPWTYBCKXB5LJORLVC66Q + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ROXLZA6LKUTOREI4DQLSESGH77EIEHWFFFDBIR2J3ICFBH6UKQTQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id NRPC2KPHMSRHDZDOYSKH5QAY2JQVMIVNBXF4VHNGZI3SF37M5LCQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 475GRHPZV52BHDGRLIH7ZLMRTUAERUHW7Y2KL3XHOAHE5CGNKJWA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id ECIWDHV2QELLN6UREXXMDWZOFGF3JHKTHJ4VTPSPZDGQCJ7B7OHA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id UH66VKFZIFSAP5237TPW5ND5U67LBQR4HUJRJ2FD7PSNNFJTWGUQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id IN3AQ5OHFUEXC6PDV534QJV5C2EC7X6QVDH5O3DNA3HSNYGCFPWQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id H5SGUVB2HWP5BN7N6E27YW5MQWKJ6H476DQFJKIM3PJ7G656FQEQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id TLDHT7EYAIIDJSJK56PCCII6OPRX2EPFSBGQGVP564TARTOJLGXQ + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2CUL4G2T7ERYYC7VPKIAYV4D5ZKPJTNQ7BAL5HLBEBLOLQHIRZAA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id PXATHVGK7JNMUUMPTUGM6M6UNYKWUTSNMVT7YWIP52L5X4RWMXDA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id NDMFOWC3I3TBCQQG576DF2ZSIUBLTYSRRQJDLBBCEI7ZLL5XQJSA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id CEU2E2VWUAHUGI6343K3U4WFYKY67VZX5CB2LCVBUGRW4D7GTFWA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id JQZCOCSUMG6MXHRJRXTZAU7RQCQRC6UDHEQCXU7APKUBYKMSCLGA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id RXS5GZITGCS3B3YEJPZDPD6HENWSJXT7GQFS6ZUTFYMIZLXIZ6XA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id 2BCUY4DGZM2PGBZZ7NJ4X64V3P5ED7ZUSAW7GTHBT2FI5Z4APCKA + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 17 + basicAuction_test.go:387: MakeAndPostBidAndDeposit() returned transaction id C4YIVO653QEPI3T4KHUZRK5BAOABIUO7NSNWCA2JMBEMLM7N5ZBA +algod(26758) : Exiting on terminated +algod(26744) : Exiting on terminated +=== CONT TestDecayingPrice +--- PASS: TestDeadbeatBid (142.06s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(28489) : No REST API Token found. Generated token: 65802ac34482efb6c1a8a71eed19b406fcb9573b3648ca84b4e25b25525b3a9e +algod(28489) : No Admin REST API Token found. Generated token: 7c3d27f385e1ae2395d7206931a5b586bdd8e08c4750bb6584d6f40bbbcb6d25 +algod(28489) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Primary/node.log +algod(28489) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(28489) : Initializing the Algorand node... +algod(28489) : Success! +algod(28489) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38547. Press Ctrl-C to exit +algod(28489) : ⇨ http server started on 127.0.0.1:38547 +algod(28497) : No REST API Token found. Generated token: 20e5cd50647585f93dced7555bd8f635daa7c61ba00c1cd465e9f68ff62be7d9 +algod(28497) : No Admin REST API Token found. Generated token: b406afd1c57c77fe72b9cba3c8b69a3d786047bce8f450edb4b67334b92bdcf9 +algod(28497) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDecayingPrice/Node/node.log +algod(28497) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(28497) : Initializing the Algorand node... +algod(28497) : Success! +algod(28497) : ⇨ http server started on 127.0.0.1:39333 +algod(28497) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39333. Press Ctrl-C to exit +algod(26750) : Exiting on terminated +algod(26763) : Exiting on terminated +=== CONT TestStartAndCancelAuctionEarlyOneUserTenBids +--- PASS: TestStartAndEndAuctionTenUsersTenBidsEach (168.01s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(28653) : No REST API Token found. Generated token: 5848a5c5a29c04aa478a9df0294e82299b5d21195f5e575a49b4dd0ac9b7bda6 +algod(28653) : No Admin REST API Token found. Generated token: 1c1fac49d071271fdbbe51a79b0d191ef6ad526c1018b70fe447cb5fbb940c1c +algod(28653) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Primary/node.log +algod(28653) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(28653) : Initializing the Algorand node... +algod(28653) : Success! +algod(28653) : ⇨ http server started on 127.0.0.1:8080 +algod(28653) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(28660) : No REST API Token found. Generated token: 6bf7fc768944d2aa31a3bab04c5fdd9f98724afc07e25ea1c9f48ce3cad79ea0 +algod(28660) : No Admin REST API Token found. Generated token: 6dfb49cdd467ce5d191f8055e5a8fe7096919d294f20d3bb41e3bd9701b47cc1 +algod(28660) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionEarlyOneUserTenBids/Node/node.log +algod(28660) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(28660) : Initializing the Algorand node... +algod(28660) : Success! +algod(28660) : ⇨ http server started on 127.0.0.1:33663 +algod(28660) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33663. Press Ctrl-C to exit +=== CONT TestDecayingPrice + auctionFixture.go:994: found a nonzero auctionID {AuctionKey:ZWCTP2VWOXCNIVI6WGL2GKRPNNOQBGW27RE2ELYW4KH2Y76DV4AMWKHQ34 AuctionID:1} +=== CONT TestStartAndCancelAuctionEarlyOneUserTenBids + auctionFixture.go:994: found a nonzero auctionID {AuctionKey:KABBWCKKTSN7GVUKXWZLQRJ5UK4KK73K73XQK4B3UBHTRHO2Z5QOQZJYFY AuctionID:1} + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 12 +=== CONT TestDecayingPrice + auctionFixture.go:388: warning, at.LastAuctionID() resulted in error: no auction has been seen yet +algod(28497) : Exiting on terminated +algod(28489) : Exiting on terminated +=== CONT TestStartAndEndAuctionTenUsersOneBidEach +--- PASS: TestDecayingPrice (108.82s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(29377) : No REST API Token found. Generated token: 2d1fca81c5fa6da76be3047986e88f3bfbacd70105e690590955717a694690d1 +algod(29377) : No Admin REST API Token found. Generated token: d07983f9682031626c657ad6b927863f27286b217d83dff1c0997b3cea122a91 +algod(29377) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Primary/node.log +algod(29377) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(29377) : Initializing the Algorand node... +algod(29377) : Success! +algod(29377) : ⇨ http server started on 127.0.0.1:42947 +algod(29377) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42947. Press Ctrl-C to exit +algod(29384) : No REST API Token found. Generated token: 3765fce59e5025f00f67051bf921ae19516c1b33c94c0e66fb90f1c26a4dfbe8 +algod(29384) : No Admin REST API Token found. Generated token: b41220cb5d6624098050072c54f687b15849f6af343d4cb88b90dac4bc78ad68 +algod(29384) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndEndAuctionTenUsersOneBidEach/Node/node.log +algod(29384) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(29384) : Initializing the Algorand node... +algod(29384) : Success! +algod(29384) : ⇨ http server started on 127.0.0.1:37789 +algod(29384) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37789. Press Ctrl-C to exit +algod(28653) : Exiting on terminated +algod(28660) : Exiting on terminated +=== CONT TestStartAndCancelAuctionOneUserTenBids +--- PASS: TestStartAndCancelAuctionEarlyOneUserTenBids (91.91s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(29452) : No REST API Token found. Generated token: d1ef602ac47b3e6c5ada5a2154f1adaa27dd54df8fc0a365ae933951819d4043 +algod(29452) : No Admin REST API Token found. Generated token: c1d845641585dffee2bec94dd1825aaf5e456797859f870e3e907b05c6503b56 +algod(29452) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Primary/node.log +algod(29452) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(29452) : Initializing the Algorand node... +algod(29452) : Success! +algod(29452) : ⇨ http server started on 127.0.0.1:8080 +algod(29452) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(29483) : No REST API Token found. Generated token: 1da6a05ae355f2c698aed17e2caf8f0a56da9bf9274f517e5674ad50a56e0b2c +algod(29483) : No Admin REST API Token found. Generated token: 12dac1b982712da265a272a5d6e5c1d0356af055ecbbde7a6aec0d671e0353f6 +algod(29483) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestStartAndCancelAuctionOneUserTenBids/Node/node.log +algod(29483) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(29483) : Initializing the Algorand node... +algod(29483) : Success! +algod(29483) : ⇨ http server started on 127.0.0.1:33379 +algod(29483) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33379. Press Ctrl-C to exit +=== CONT TestStartAndEndAuctionTenUsersOneBidEach + auctionFixture.go:994: found a nonzero auctionID {AuctionKey:KLZIKVEOD55R3YN2XECKECQHFF5VLQASW5WHHJQYQGEWPWICFMHWUQXMKM AuctionID:1} + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 14 +=== CONT TestStartAndCancelAuctionOneUserTenBids + auctionFixture.go:994: found a nonzero auctionID {AuctionKey:PWHI3UIPO5VNU7YBCYTOV7JA37RDAQH7SXMSMEG6CJ43FG5BDOKCXQ7RSE AuctionID:1} + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 + auctionFixture.go:924: Made and posted a deposit with bid when lastRound was 16 +algod(29452) : Exiting on terminated +algod(29483) : Exiting on terminated +--- PASS: TestStartAndCancelAuctionOneUserTenBids (135.28s) +algod(29377) : Exiting on terminated +algod(29384) : Exiting on terminated +--- PASS: TestStartAndEndAuctionTenUsersOneBidEach (155.63s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/auction 406.589s +=== RUN TestBasicCatchup + basicCatchup_test.go:35: +--- SKIP: TestBasicCatchup (0.00s) +=== RUN TestCatchupOverGossip +=== PAUSE TestCatchupOverGossip +=== RUN TestStoppedCatchupOnUnsupported + basicCatchup_test.go:198: +--- SKIP: TestStoppedCatchupOnUnsupported (0.00s) +=== RUN TestBasicCatchpointCatchup + catchpointCatchup_test.go:83: +--- SKIP: TestBasicCatchpointCatchup (0.00s) +=== CONT TestCatchupOverGossip + basicCatchup_test.go:100: +--- SKIP: TestCatchupOverGossip (0.00s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/catchup 0.064s +=== RUN TestCompactCerts +=== PAUSE TestCompactCerts +=== CONT TestCompactCerts +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet0.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet3.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet1.0.3000000.partkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet4.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet3.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet0.0.3000000.partkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet5.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet6.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet2.0.3000000.partkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet7.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet4.0.3000000.partkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet8.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet5.0.3000000.partkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet9.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet6.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet7.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet8.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Wallet9.0.3000000.partkey +test-fast-compactcert 100000 +algod(30825) : No REST API Token found. Generated token: ba77c334538ad9c8efcabcc4219c3eb8aa72eaf8a01dd3017d3da24e5b2c57c3 +algod(30825) : No Admin REST API Token found. Generated token: 76c72b69eae44bc7b2e837355b010323a7c3a558a3499e1f4cb61b23b6098431 +algod(30825) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Relay0/node.log +algod(30825) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30825) : Initializing the Algorand node... +algod(30825) : Success! +algod(30825) : ⇨ http server started on 127.0.0.1:8080 +algod(30825) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(30833) : No REST API Token found. Generated token: 9d19b37ef7dc396ecef42ffc95a745f2028c41edcdae2179dd1c861281e7a4ee +algod(30833) : No Admin REST API Token found. Generated token: 79113ae59d82f616e00348a996ee32032eaa6b3fcfb3cdbfcc506b648ea9e10c +algod(30833) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Relay1/node.log +algod(30833) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30833) : Initializing the Algorand node... +algod(30833) : Success! +algod(30833) : ⇨ http server started on 127.0.0.1:42427 +algod(30833) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42427. Press Ctrl-C to exit +algod(30840) : No REST API Token found. Generated token: 1343191004a68d29865f648c9e96a7094f43b7634befd54d8e18c0072958bf77 +algod(30840) : No Admin REST API Token found. Generated token: ce874573e75eb4a1ee711babac98821b5c9fd8bfd1c8e60ea64fa112b071d3bb +algod(30840) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node6/node.log +algod(30840) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30840) : Initializing the Algorand node... +algod(30840) : Success! +algod(30840) : ⇨ http server started on 127.0.0.1:43771 +algod(30840) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43771. Press Ctrl-C to exit +algod(30849) : No REST API Token found. Generated token: 0626b8160608e3e40642e7757889b4301c64c16b4eb789373cb356f414380579 +algod(30849) : No Admin REST API Token found. Generated token: 4f8c251dc96c4dd7d1fc9c09dca67034f1111bab420f852665b8d934074a892c +algod(30849) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node7/node.log +algod(30849) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30849) : Initializing the Algorand node... +algod(30849) : Success! +algod(30849) : ⇨ http server started on 127.0.0.1:35167 +algod(30849) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35167. Press Ctrl-C to exit +algod(30856) : No REST API Token found. Generated token: f065a2be901f2a9bde7918a92826ce00d58a75a88cdf57bb23ec719f72ab587d +algod(30856) : No Admin REST API Token found. Generated token: 75f4f0712e34eb97373dd3106b8e1073133ece2b8c64d91032c8e487bfad17c8 +algod(30856) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node9/node.log +algod(30856) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30856) : Initializing the Algorand node... +algod(30856) : Success! +algod(30856) : ⇨ http server started on 127.0.0.1:35443 +algod(30856) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35443. Press Ctrl-C to exit +algod(30867) : No REST API Token found. Generated token: b9d387405daa5eadd532cff740de842d251578c95e1bcb3f92410cafb10c463c +algod(30867) : No Admin REST API Token found. Generated token: 70eb1d6892b0ca5bae1f8415be9f8f83eb2b93998c363a73dc44c8a45f9db0b7 +algod(30867) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node0/node.log +algod(30867) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30867) : Initializing the Algorand node... +algod(30867) : Success! +algod(30867) : ⇨ http server started on 127.0.0.1:35497 +algod(30867) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35497. Press Ctrl-C to exit +algod(30879) : No REST API Token found. Generated token: 7adfa0dca3dc5a1aec7b78086f426ce943aa0d333a4413b11affd47c17ad1638 +algod(30879) : No Admin REST API Token found. Generated token: ae06ec29916724301b9ef86be6c33506450b3d6281a5475b51c17dc37122178a +algod(30879) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node1/node.log +algod(30879) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30879) : Initializing the Algorand node... +algod(30879) : Success! +algod(30879) : ⇨ http server started on 127.0.0.1:46197 +algod(30879) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46197. Press Ctrl-C to exit +algod(30903) : No REST API Token found. Generated token: ee4042840c681ea8a264e69eb86318fdbaa4588a91b9245f7d08e12e08259b6d +algod(30903) : No Admin REST API Token found. Generated token: c8753ee4ec25611c36b409dec13b7bbabe0ef758fe9d43301fc18c69d3bac00d +algod(30903) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node2/node.log +algod(30903) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30903) : Initializing the Algorand node... +algod(30903) : Success! +algod(30903) : ⇨ http server started on 127.0.0.1:45239 +algod(30903) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45239. Press Ctrl-C to exit +algod(30913) : No REST API Token found. Generated token: 8d5dfa5f3d9bdfc4e45c2cb45ab402c93d58316342c86ca9125ede633ed73783 +algod(30913) : No Admin REST API Token found. Generated token: 502f369399d6e2e3ccfb5eb890341c55956f8454a50309b7c8fcb13e2ab1264b +algod(30913) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node3/node.log +algod(30913) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30913) : Initializing the Algorand node... +algod(30913) : Success! +algod(30913) : ⇨ http server started on 127.0.0.1:44425 +algod(30913) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44425. Press Ctrl-C to exit +algod(30921) : No REST API Token found. Generated token: c4f6c0ed538352858dfcf336663ec2a62f0ee72ffbf30014f1692436039f8932 +algod(30921) : No Admin REST API Token found. Generated token: 61a67e0777aec9e91d4df76169a4545866810bc80600e904ce5ae70895471dcd +algod(30921) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node4/node.log +algod(30921) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30921) : Initializing the Algorand node... +algod(30921) : Success! +algod(30921) : ⇨ http server started on 127.0.0.1:34597 +algod(30921) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34597. Press Ctrl-C to exit +algod(30930) : No REST API Token found. Generated token: 957fd73808a15128bf7f43ccb8c67502c732ebe0cd9a4d726f8eaa50eb2066e3 +algod(30930) : No Admin REST API Token found. Generated token: 14a1f3e64476669e90f64162554965dc5cc6b44c5a5b16d6e1fa43af81d96216 +algod(30930) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node5/node.log +algod(30930) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30930) : Initializing the Algorand node... +algod(30930) : Success! +algod(30930) : ⇨ http server started on 127.0.0.1:38789 +algod(30930) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38789. Press Ctrl-C to exit +algod(30941) : No REST API Token found. Generated token: 0377f157f49c15136a97bbd439e499b36c6cfca945dfdc706d41c0d06ed74155 +algod(30941) : No Admin REST API Token found. Generated token: 022ac82c54333541141ea25526e319c03220939483305aa08e61bb58c00c1d98 +algod(30941) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestCompactCerts/Node8/node.log +algod(30941) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(30941) : Initializing the Algorand node... +algod(30941) : Success! +algod(30941) : ⇨ http server started on 127.0.0.1:35783 +algod(30941) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35783. Press Ctrl-C to exit + compactcert_test.go:89: Round 1, block {MXLPJWL5W5AEGA5NSZTEYOGLRPKZNOTTNBZ4XGAVVLTO2OQVLYEA ZVHVCLH63QH2JF3WMYD7UJLIIKH42TTMY55WF7IN36LBPDHM5MSA ZAQHQIY63IW2SEYXW3AA3Q6FEX5ZFP6TYATKGLGONRDFLU5NH6TA VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 1 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 249999999 {[]} 1617230231 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 2, block {WNFU3PX6AVYJNT2LF3QQ62FYIGX6D42S5N2FPROWELTKROFTK5FQ MXLPJWL5W5AEGA5NSZTEYOGLRPKZNOTTNBZ4XGAVVLTO2OQVLYEA ISRR6S6HALLUCL4GLBJL2N7A57JRJJ6DBFTEA56W2XOQIYZ4BLYQ VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 2 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 499999998 {[]} 1617230256 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 3, block {WJU7K23P6IIPL4HIKHP35CVOVUUX2C3BNJAGT5ORMSUYHX5I2HTA WNFU3PX6AVYJNT2LF3QQ62FYIGX6D42S5N2FPROWELTKROFTK5FQ OK4UTYM4C2ZWI7RS27G3YXXSERZWG3ZRXQ6VUKVQEWIIVV625TMQ VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 3 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 749999997 {[]} 1617230281 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 4, block {RYG5TI7X5UEUDSFNFEXY5PMWK7RUZJLOYLF3QG5WAOCJRQEZVIAQ WJU7K23P6IIPL4HIKHP35CVOVUUX2C3BNJAGT5ORMSUYHX5I2HTA IWD3FBOHBTKZVDHWVB75U25C3NURDDVNYYYT3BLQPZXTOIW7EXKQ VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 4 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 999999996 {[]} 1617230290 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 5, block {4A32O4MR3CF2NLTRIJTS2HBQPM3WL4BCM3NSI7RFUPWV6DOLN3OA RYG5TI7X5UEUDSFNFEXY5PMWK7RUZJLOYLF3QG5WAOCJRQEZVIAQ NTH26GJB3YLVHU3S2UJWMDCMELBFMTAYKJBCUIZMEUQVTIOUKM5A VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 5 0 AYM5ZM6BDR22ARY5XCOOIOSH45OE2RDHU4BUBO56C2MAOACZHMGQ 0 249999999 1249999995 {[{pay JES7FZRVNLJJ4MEQXFDWWAAD77STXUO7TQLKL6W3NJRSFOS5NT2A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 0xc00049ae80 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay CHOBLMAPA7R6NLPHSIIYQFNBYGW2FT44VVOPBRB2WKTBWVKSP56A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 0xc00049aec0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay 5M2WGIRDJLCDEKPZO3QRBZCRUOKBZDGOXSIYA65LCVXGGL4GH75Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 0xc00049af00 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay SIME6DZXLPLAGEPHNEMUSLLTTWSDKJR5YXHN4HYJCPALT4H4UD2Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 4 1004 [] [] 5 0xc00049af40 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230296 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 6, block {AAKLNUV2PV5RUBHTSYGZ2ZDEKSNPHV52YEM3E5WGVF7L7J4SJFIQ 4A32O4MR3CF2NLTRIJTS2HBQPM3WL4BCM3NSI7RFUPWV6DOLN3OA QK6LYQ22JQYETNNJDO6TOL3JBID4XBQWHH3MXK2EPTO3OFJZIZ2Q VXFGMS6M2Q23CACTDBXDT6KIO23EYB2DYJX5EHE34OIB2WDP567RQA24NA 6 0 SEI6KQQT3J2I7GZMSH356OJ2MMWP32FPTUPNWJSSHXNZELAV4MHA 0 249999999 1499999994 {[{pay V7QLLCZF7IC2R2VBL6OLDR3DTZT47OECW6NULGAHTCERU3DYRFRQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 5 1005 [] [] 6 0xc00049b980 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230300 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 7, block {UFQ4Z7N3WRW7RFKSQA2H3NMN3OGZT523JBSJDQIGNXKWMON6OLDA AAKLNUV2PV5RUBHTSYGZ2ZDEKSNPHV52YEM3E5WGVF7L7J4SJFIQ FCNZQYRZY3WNWZB3P4PIZGHPXZLDQBXGHMS3AEBX63FZNAUK47PA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 7 0 HEW5FKO6HXF6467OXUVHKAHAEVOSLSBR4O422MCE5F4SU2KQCZZA 0 249999999 1749999993 {[{pay FCXI56BQ7YMAQHPKW63OO5CO5XRC6ZHYSNP3V5LIGWFZSJ45J2UQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 6 1006 [] [] 7 0xc00049a280 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230306 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 8, block {7DUHCPHES25PPKLZPZRNQOLZN2NGLIP7XLWU4G3JPHB6JCJDCYBA UFQ4Z7N3WRW7RFKSQA2H3NMN3OGZT523JBSJDQIGNXKWMON6OLDA LSF7256QX47D36LIZE7KU2YQOJWZIPBNEB4JVCTRVPHPGYHY2F4A 3FIFNWTDOA5N623OIIXWXC4BIVJYKBVHFKHXW5TATVUZX3GZORF7RQOFU4 8 0 4XO3PFWPBCR236PLO3I4XA77L3MCT73VLMCZI37KNDBCS7XKNA7Q 0 249999999 1999999992 {[{pay OBFAC2TKRXOGBS7DSJZ2QRLLIKQSIWXR73KG2JXC7SIY2BOGZLWA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 7 1007 [] [] 8 0xc00007d9c0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230311 {test-fast-compactcert 0 0 0} { false} [99 84 170 186 131 3 195 113 174 221 228 139 49 25 237 38 96 176 139 154 221 6 176 161 11 132 51 203 87 127 67 206] 9999999999995000 16} + compactcert_test.go:89: Round 9, block {5XOBIAP23VRPLDOJKUGSWVNURZACD6YQUTL5H2MJG5JOHZNH3NTQ 7DUHCPHES25PPKLZPZRNQOLZN2NGLIP7XLWU4G3JPHB6JCJDCYBA VLMHCL6I2AD4ORUXENMHQ7MIPG7GDPV75PPJKZZ6TRFJSBWTLUHQ ICN7RP7DZRYOCDOJGVOT3TX6OPUH5SIOQGAOIJLW2C7E3QAUSTJ5IMNXLQ 9 0 27AFM5JRYOF5HJZLCWFVLG3HV5QBESW2BWFXXXFCWQOTXS3DEMFQ 0 249999999 2249999991 {[{pay PMFXZQDSNUKP35FIEUNDPSBKRATJX3A2GBIHWK7RCGNFLMZR7LOA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 8 1008 [] [] 9 0xc0002c3680 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230317 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 10, block {5FJVHTEGC4EBUUCZ7VHFEITB3TD4CKEKYCW4FZSNPQOPD3E5TR2Q 5XOBIAP23VRPLDOJKUGSWVNURZACD6YQUTL5H2MJG5JOHZNH3NTQ G3NY65OEN5ON5AKGK6YYC7CF5BD57F4P2ZRTON4B5YTMFQNRDGSA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 10 0 6MUHLAXEVY273M6NMHCPXICHN4O62OXGH5Z4BROVIDCMO7YPPV4Q 0 249999999 2499999990 {[{pay 5LCRQAULQEZW5ZMVR7FAGXTEJBTT5WOWAZMCMAH5RJZTSXKWW2OA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 9 1009 [] [] 10 0xc00064cd40 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230322 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 11, block {FDHAUUXMKTBT3CEM6CFTE7OQYT6U4XNJCEF343A5FJIXGEE76N7Q 5FJVHTEGC4EBUUCZ7VHFEITB3TD4CKEKYCW4FZSNPQOPD3E5TR2Q J7NY6DTVA27BB2XPXAQ34M7IDAVVNUYVSEWZXS6AM2U2C5XQEKHQ VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 11 0 BARLVCIDPI4RJKWXDJVZTE3UR35UILN4W6AA4QMUDPHEH747J6OA 0 249999999 2749999989 {[{pay TKLLOE6SQHBZMAKODFEMOLLRUD6QCOTISUVJI7M7VNEJYCVW4WYA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 10 1010 [] [] 11 0xc0006d6740 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230328 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 12, block {CYSTAL4LEEJNFIM7QPDIYNYFAPZHEVM4HAUDPQMEMASJMBCGVCUQ FDHAUUXMKTBT3CEM6CFTE7OQYT6U4XNJCEF343A5FJIXGEE76N7Q Z2G6TQ2NIJULJ7ABGI4V4QBPHPHQEKDXP65MXBOY54UQPN4QFNNA XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 12 0 LZSY3CTEWWXE34MNLLW2VZM5IRR4UESP4VKICRDLNIHFVHRRB2KQ 0 249999999 2999999988 {[{pay LM4PZOZGJYSSFOL3RQA75YVVJWE75PJ4LB73ZX222PN7SBLH6LVQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 11 1011 [] [] 12 0xc0006d72c0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230333 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 13, block {GLOTODLLX53NLOCFNBIOCCSK36SHC3Z7SEHR4RBYUUAGK6EVLOZQ CYSTAL4LEEJNFIM7QPDIYNYFAPZHEVM4HAUDPQMEMASJMBCGVCUQ WYEC5YEP5MPSIOA5IXPAVC7IYZHBTMQTJ3X25O3CBFIZW4VEDRTA ICN7RP7DZRYOCDOJGVOT3TX6OPUH5SIOQGAOIJLW2C7E3QAUSTJ5IMNXLQ 13 0 GOBZNZXR4VFPSLV4LES2GP3IUBTM442D2YNXO6UKLLJUZTPSYPBQ 0 249999999 3249999987 {[{pay A72RL6XPRXHHTSQ6KY32Q3DJRPPQP6XXI7ATDQXVVVP6SY4X3ISQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 12 1012 [] [] 13 0xc0006730c0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230338 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 14, block {XFT47WFAQM7AA4FLMLOI3DBUIFXXVNPGELKPUKYAZ7CTBJD4URDQ GLOTODLLX53NLOCFNBIOCCSK36SHC3Z7SEHR4RBYUUAGK6EVLOZQ FANFJ364DNE2WNI3XTK2AZCDRXW3VMFXJKEE6S6HUAFD2X2KGJ5Q 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 14 0 5K6DJPBPWDS2EE2AEHWW3PLRS2JYLCXY2U5U7X4IRQBY4F4U7UHA 0 249999999 3499999986 {[{pay S7G3NIUF5YHW5ULBDCCJXN455JX3Y2NIZCCZU2B23HSUWKHGFBHA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 12 1012 [] [] 14 0xc0001e2d00 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230344 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 15, block {GYJIWG3ADCW2TFSCGP5WEWPWZASUBKLAOPXI6ORQOSJB5T36DWQQ XFT47WFAQM7AA4FLMLOI3DBUIFXXVNPGELKPUKYAZ7CTBJD4URDQ P2AKQX6DJRF54UATEX472XEVPVVOEE6IKIEUTJK37QSYGUQO4XCA QE2T4IZYSGIR4O4W4NGACYFGZCODIPQFYIIK4YYFEI6C4ANM2CWPC4XL44 15 0 ZK4BYLTALCT2ZSWE4KU6464DZBHCNQ3DC5EV26VG72HYKLW2X62A 0 249999999 3749999985 {[{pay OBDLLDIULZMY62KHMMB2FNQ3UMH652MYGTLVXBVH2B6U6BVOYT6A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 14 1014 [] [] 15 0xc0002c3880 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230351 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 16, block {MCXJ4P7V2E7E565VMW6YAWOS5AGQHON37FPITKJWSAMYMAJCN7EA GYJIWG3ADCW2TFSCGP5WEWPWZASUBKLAOPXI6ORQOSJB5T36DWQQ R6VKNMF6UQLJGQN336I3YZE5FD6R3G4ZWD7VOXHDWWTHWTONSCDA XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 16 0 PD7ZDZECJ2EQSOF6IQ6ROMA32XIFIF2QBJXYNJVURZLEYZ7RZUTA 0 249999999 3999999984 {[{pay 2X4CW6EUOQHOPSUWM4EUSTQFO3WSY2ZETG6XP6Y2JCLSHFGSPJ6Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 15 1015 [] [] 16 0xc00007dbc0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230355 {test-fast-compactcert 0 0 0} { false} [187 97 223 219 164 82 107 123 150 229 176 156 112 139 64 234 138 1 57 145 216 105 218 99 156 83 120 159 42 238 87 122] 9999999999987000 16} + compactcert_test.go:89: Round 17, block {5PUZBMGQZPNJ5RVQ3MACDWSCKOE7XNDLMC7MIUSXZGWBYYCDDLJA MCXJ4P7V2E7E565VMW6YAWOS5AGQHON37FPITKJWSAMYMAJCN7EA 6GIHPG3FYNPAMFTOPJEC6O6LBDWNFSEHTGQAR2JLAKNZ2YNJKVIQ VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 17 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 4249999983 {[]} 1617230361 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 18, block {7RXA4EDJHKNU22MPAZV5OGUAEFKKKAUFZSY2BB2WG5PDCAOHFA5Q 5PUZBMGQZPNJ5RVQ3MACDWSCKOE7XNDLMC7MIUSXZGWBYYCDDLJA G7SLHCTDR2HLPELSISEKFYVPD4ONUWNM4ZRQ35HKCYHBTAWVJ25A FBCT3CKJXTBBBOGCNPCONOOXJUFWSMK5SLSE3DJ7LQC2T3BTYBEGZDNE34 18 0 5IRNKVYTFSSSHQ62HPABRMNZTRCYSBFKY7LOJZ626BXRMZPD3T3Q 0 249999999 4499999982 {[{pay OW4FFKP6L5ZUP444RJUCBU3S26H46TI6DIF7Z4QYABDOZL4ZQ6VA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 16 1016 [] [] 18 0xc00043a000 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay EZFQQF2XHNLWIQOVX72EGK2CQXJ5TYGRQKS2V6546SIFNOPWFJWQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 17 1017 [] [] 18 0xc00043a040 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230366 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 19, block {A6AKZ74CYSKI6M6UP64XF6I6I2HDGPTCBG5ZOQYRRNSHJYRUQJDQ 7RXA4EDJHKNU22MPAZV5OGUAEFKKKAUFZSY2BB2WG5PDCAOHFA5Q MWKB6ULUA2NIJ3FJAAOQOB6DP7LWYFS5SZMV3ZC3KQ2DPL52YOAQ XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 19 0 AT3DUAATCMKVZMACHXKH5CZ4CGPMYRQFWVPXRJFGJ6LGGCO7P6OA 0 249999999 4749999981 {[{pay QBEC3DC2SXUPG2S4N3Z2G3S3BO6EYQGOJ7F2MI3OOMCU6I24XEMA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 18 1018 [] [] 19 0xc00043abc0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230372 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 20, block {GW3D4YG62WNSL2OP7XFZDYRUZAGHJMZALO2TE4Q7FSLUIDKRS6IA A6AKZ74CYSKI6M6UP64XF6I6I2HDGPTCBG5ZOQYRRNSHJYRUQJDQ PVQFIKHXTR46F4JPCQYO4VIKIKYAOIUGZHGWYGP7Q2I54OIDHBBA XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 20 0 N24A7YZLIU36DEMVNDTSXSZMJPHZCM664OY2HRQU4UIVUU4KURHA 0 249999999 4999999980 {[{pay XLGJBEBYMYOFBG3VVEJORLNWLKLTEDBW5WNGCFLXKCTGE66LZL6Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 19 1019 [] [] 20 0xc0004b0ac0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay W7W6CBRUC55LX2M6U2VCKQ3VQSFNXWRZTQ4LC2TINLXO5DUOME6A 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 19 1019 [] [] 20 0xc0004b0b00 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230378 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 21, block {D4S5MR343YI6Z25GCSHDA64ZX4WGEFVQNGMM3QC3VQERJ3H4F5NA GW3D4YG62WNSL2OP7XFZDYRUZAGHJMZALO2TE4Q7FSLUIDKRS6IA 4B6QNIMPSIMMHSZ3MKWDQUENGJV4VHHGO3UZ4ZCNX6X7V4S3H5WA QE2T4IZYSGIR4O4W4NGACYFGZCODIPQFYIIK4YYFEI6C4ANM2CWPC4XL44 21 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 5249999979 {[]} 1617230383 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 22, block {JOP24AFMKGFB3UQHTFXVGDH7HFOQUEYFCPI23EDPU4LOZHX52YVQ D4S5MR343YI6Z25GCSHDA64ZX4WGEFVQNGMM3QC3VQERJ3H4F5NA JW2NUTCFO4VDL3OZCFLUT64WPJBEF22V42ECAQTBC2U75QAYHMSQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 22 0 RMRCTE3I7VAWFN22V6QAXOYRTKGBXDPGF2KRLUZKE6KXNURSNIYA 0 249999999 5499999978 {[{pay X7VCWHLJ3DHRW7VZNRD2HOYNVO6TJ3GL6CSG7BJC36NC2OKZFUVA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 20 1020 [] [] 22 0xc00049a840 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230388 {test-fast-compactcert 0 0 0} { false} [] 0 16} + compactcert_test.go:89: Round 23, block {T3LFHX7XW2NSWGTWSL45YGRSSU52WRU5GM2CZYDNAS4J6M5SDCZQ JOP24AFMKGFB3UQHTFXVGDH7HFOQUEYFCPI23EDPU4LOZHX52YVQ ACKOSC4HML4CXRBWMQIHMINTKCEJ6IE2TNG7J2GL3W5WHBWSJ2JA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 23 0 S2ILORSTAXSFPET2ARE5UU345EG7FMTFAOXDHQKZPN6MQYEWSZ7A 0 249999999 5749999977 {[{cert 3NRLMV7E5NRR5FJ4NSFW4HCKQHEYO6UBJZ4RDBKNRMT3HZK6M3JQ 4BBME3TBESPNF4WZROAM3EBMMGSYLTOH7USWH2C37UEGNWHYFKXLLLMHOA 0 22 1022 [] [] 23 0xc0002c0220 0 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay UJVFOKR67GO2INJZTORYUQ6UCVX4JRY7YE7SDKBYCE2ZZNQG26ZA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 22 1022 [] [] 23 0xc0002c3f80 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230394 {test-fast-compactcert 0 0 0} { false} [] 0 24} + compactcert_test.go:89: Round 24, block {VHIGVJ4T5VMRHN754PVFJXWI4XMQEEUXT25Y4AOYIGQANVR7FA6Q T3LFHX7XW2NSWGTWSL45YGRSSU52WRU5GM2CZYDNAS4J6M5SDCZQ 4ENS6HLVX5WGE33OZAABVZZXEWTCNV4WKS7PFA6YCKGL4JLNV7YQ ICN7RP7DZRYOCDOJGVOT3TX6OPUH5SIOQGAOIJLW2C7E3QAUSTJ5IMNXLQ 24 0 6LIKNDUDBQNC7PAI5CLWA3V26PVIQ35SH5GS5IRET2DMF4FDJGSQ 0 249999999 5999999976 {[{pay JK7QOOI22BKJNVA6IFZEE4A7OWBG4CLXCWSKXSAWQAEXREM7ZICA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 23 1023 [] [] 24 0xc0002cc080 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230399 {test-fast-compactcert 0 0 0} { false} [183 79 22 14 195 141 139 6 182 177 143 252 117 84 5 199 36 245 68 163 251 94 231 235 196 194 158 244 158 168 121 193] 9999999999979000 24} + compactcert_test.go:89: Round 25, block {KDLXFDZKRIGHF4QTCOJHY3UUXKRQK3MX3BHFAVWGPCRLU7FZNQXA VHIGVJ4T5VMRHN754PVFJXWI4XMQEEUXT25Y4AOYIGQANVR7FA6Q IF3ORFRHF2PBTZIGYVLZC24NUHTWB772ZIQKVSK6KVZKIBIETUDQ 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 25 0 CQYRR37NYTUHDYPEAOV3PCCLDLOQORQKLJB754Y2D4WSEWW4ZBBQ 0 249999999 6249999975 {[{pay WWKT3FG7OY5WT6ROWRETL254T2Z5NSGNGW7FABTA5JRSE4BT3BQQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 24 1024 [] [] 25 0xc00021e680 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230404 {test-fast-compactcert 0 0 0} { false} [] 0 24} + compactcert_test.go:89: Round 26, block {64DGKJURDYAYHXLEX5MUJTKR7MKRI7RARQWWWDIIPHK6GBCGBJVA KDLXFDZKRIGHF4QTCOJHY3UUXKRQK3MX3BHFAVWGPCRLU7FZNQXA WG2GIJTOIH52L3XCBIBIHEBLEKYYUZZPERH72FPUZJIDTITY5IFQ XPVRVZH3VHFZUAVBKASRMDLXPHHAS4TE6TEL3JTZPYR6BX72EKDZA7MIDQ 26 0 M4TTKGJKW6DNC6JE5ZWHBRLMCTQ5KLMC7YW2TKZNBMMA7YIYJXJA 0 249999999 6499999974 {[{pay XMAUZD3TIVKIFWIBR7KMAEQQYMOPFQFX3IKZZXE5N2AJGMCAQ2TQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 25 1025 [] [] 26 0xc00021ec40 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230410 {test-fast-compactcert 0 0 0} { false} [] 0 24} + compactcert_test.go:89: Round 27, block {IL7KHIO7FTS4ZOVZ2M2WS6N3F2VP72W2CAFQJCHAT7OCKT2C4SSA 64DGKJURDYAYHXLEX5MUJTKR7MKRI7RARQWWWDIIPHK6GBCGBJVA WRQSWRX7DYZIIBJSOGSIPTLED46R3PJV2UHFXUPBNREXJ3AKZNKQ B4D4WAMUWDSKPLZT5M245BX3UGLOTMU5BPND2Q4FRXF4BHVPE4HHQPH6XA 27 0 JA7HOJX7IUJGOS5FGXMG6LWZP66N7AXOZ6VHGKWHZWYBT35UOMPQ 0 249999999 6749999973 {[{pay GZNFPJIP6J4XRZIEFWYUETGNZLRT3WEWBFTKDOMPSLBQSK357HYA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 25 1025 [] [] 27 0xc0002c2940 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230417 {test-fast-compactcert 0 0 0} { false} [] 0 24} + compactcert_test.go:89: Round 28, block {R75G3ZJVBSJEMPIDZN4OHUKHO22PWMTEJVUJONZMEBYMTZLLYZQQ IL7KHIO7FTS4ZOVZ2M2WS6N3F2VP72W2CAFQJCHAT7OCKT2C4SSA GJ35VV4UVT4YYMOXLZ4YAYO2JXB445K2X3TO5YX3F5SJ4DDXPFIA VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 28 0 AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA 0 249999999 6999999972 {[]} 1617230422 {test-fast-compactcert 0 0 0} { false} [] 0 24} + compactcert_test.go:89: Round 29, block {ATPD63JSWSOVD7FZ54FFM6LH4ZDYSPFESSI47Q3WNB5Z3N6GERZA R75G3ZJVBSJEMPIDZN4OHUKHO22PWMTEJVUJONZMEBYMTZLLYZQQ U3PT63BMP23DGHMDGE6CN2NZENJK3YWKJMER43ZH4IXXPOYGX3LA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 29 0 KTXF52QZTF4NYE5PZPENWWRI76DLD6MLVOA4VFCEVK3JES66IIHQ 0 249999999 7249999971 {[{pay O2OJJCNT6TNUQI3OQ6WZGQDXRY5ENLXDQCRZQ6Q5KC3P62IYZ2PQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 27 1027 [] [] 29 0xc00007dd80 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay FD2RZWZWHZEWMYWQYNL54NT572JLORRPQ2TR7QNUZZNPIT2QN3YQ 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 28 1028 [] [] 29 0xc00007ddc0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []} {pay KQIPGV3ENMJNXUTBDK2QFTDCJ52PEE3ISBC2FMHZSVUSZ6GKWCNA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 28 1028 [] [] 29 0xc00007de00 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230427 {test-fast-compactcert 0 0 0} { false} [] 0 24} + compactcert_test.go:89: Round 30, block {7U5H4FEBP4GMAKUEW2564G2LI23FH43UWVMTYJ6B75JTQGQ65WQA ATPD63JSWSOVD7FZ54FFM6LH4ZDYSPFESSI47Q3WNB5Z3N6GERZA ZCI2IFUVJBXMOUDAH2CEWRRIJCO5IOH5LWQUDQ64TPKFNBJBXS3A QE2T4IZYSGIR4O4W4NGACYFGZCODIPQFYIIK4YYFEI6C4ANM2CWPC4XL44 30 0 JM3TIGR5EROVCTQ2EPTRA5NROSMU35XWVHPIWSSCQJ75YXURSJXQ 0 249999999 7499999970 {[{cert GMLXKXZINOP7QELU6RH22HDCFOAD5UP4IOLQ4AI5COWVJRTQR3MQ 4BBME3TBESPNF4WZROAM3EBMMGSYLTOH7USWH2C37UEGNWHYFKXLLLMHOA 0 29 1029 [] [] 30 0xc000596520 0 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230432 {test-fast-compactcert 0 0 0} { false} [] 0 32} + compactcert_test.go:89: Round 31, block {4HG2C4XXT6VBZ7DLDZ3KOMAMVMAP5QO52ELKGTCD2B7QPPKOJW5Q 7U5H4FEBP4GMAKUEW2564G2LI23FH43UWVMTYJ6B75JTQGQ65WQA WLPPPBRW7ULPCSIUQ6AXNZCHHZC7WNYMHIXV3NDSDL6HNQZD55WA VWNC7Y3JJM7UL2SMZOM62M3AAK2NSO2G4RB45JIII7DU5VJE3PQKZ62K4E 31 0 FRPB4ZO7BBWJVIYZEDLU3BIWTROSYK37RXGIEDYDNB446VI2SY5A 0 249999999 7749999969 {[{pay 6T7LX42PE65DY47HWSVRS57RCARJUCK2G67H7DCMILICROUWGS5Q 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 29 1029 [] [] 31 0xc00021f7c0 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230438 {test-fast-compactcert 0 0 0} { false} [] 0 32} + compactcert_test.go:89: Round 32, block {63Y2URATG3XTICZJCT4MFXNJALV27YYMUAB35WISVEPWIQSB4Q3Q 4HG2C4XXT6VBZ7DLDZ3KOMAMVMAP5QO52ELKGTCD2B7QPPKOJW5Q 6KNQXYGN6VNDONCHH2F7DDNFPJEODUKKJE3UJA5C26S7UOH7H6JA 5IJMRPGGPKTTJJ42PYNELMYJMRBIEM2TGXEVC4W7IR7IYCLK6UT3EYMJYQ 32 0 PMYCX3IZFVE3F4NQDFPUB6N6RHOKD5DSFTL4P5FC3R6EMZKPPGEQ 0 249999999 7999999968 {[{pay 3ZMHN7MM3HYVTRCOSQIGWOLDDXLU7I5PL7OW74YSQUZCL2TEK6LA 3TFPAT63WD2WQUTVJC6VEOTZO6BQUJW2DYIBXWURZMTBRKTW63TH62MS6Q 1000 31 1031 [] [] 32 0xc00059e640 0 test-v1 [172 81 17 110 171 248 176 101 120 12 59 241 173 6 175 112 11 92 38 251 41 210 84 60 151 143 137 192 66 101 239 219] []}]} 1617230444 {test-fast-compactcert 0 0 0} { false} [233 145 169 123 87 83 112 145 172 208 106 68 213 247 28 237 130 73 209 98 28 184 77 97 145 124 130 151 111 242 51 135] 9999999999971000 32} +algod(30833) : Exiting on terminated +algod(30879) : Exiting on terminated +algod(30867) : Exiting on terminated +algod(30921) : Exiting on terminated +algod(30941) : Exiting on terminated +algod(30840) : Exiting on terminated +algod(30856) : Exiting on terminated +algod(30903) : Exiting on terminated +algod(30913) : Exiting on terminated +algod(30930) : Exiting on terminated +algod(30849) : Exiting on terminated +algod(30825) : Exiting on terminated +--- PASS: TestCompactCerts (224.23s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/compactcert 224.312s +=== RUN TestBasicMultisig +=== PAUSE TestBasicMultisig +=== RUN TestZeroThreshold +=== PAUSE TestZeroThreshold +=== RUN TestZeroSigners +=== PAUSE TestZeroSigners +=== RUN TestDuplicateKeys +=== PAUSE TestDuplicateKeys +=== CONT TestZeroSigners +=== CONT TestDuplicateKeys +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(31530) : No REST API Token found. Generated token: f80b041bcfb19db234f704bf8d9c5fe1f57da15dcc119617ea726c0268eb59f4 +algod(31530) : No Admin REST API Token found. Generated token: 964a11e61aecea5ad297a94911fcc7070516ef558dd025681a18d86b71a4e155 +algod(31530) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Primary/node.log +algod(31530) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31530) : Initializing the Algorand node... +algod(31537) : No REST API Token found. Generated token: 0456667b9c472fa4c6b5a85c8829303d43b527ef39dbca4341df21dffc2c6c6d +algod(31537) : No Admin REST API Token found. Generated token: f31b840d17d9815c8c4d0ad163ae5404448b14088266894dc0f597a671379a4e +algod(31537) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Primary/node.log +algod(31530) : Success! +algod(31530) : ⇨ http server started on 127.0.0.1:8080 +algod(31530) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(31537) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31537) : Initializing the Algorand node... +algod(31537) : Success! +algod(31537) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40777. Press Ctrl-C to exit +algod(31537) : ⇨ http server started on 127.0.0.1:40777 +algod(31543) : No REST API Token found. Generated token: bb56ba29b190894bbd93711dbc9bece0851ae6428e03c33f146c52de4bdea38d +algod(31543) : No Admin REST API Token found. Generated token: 3c10300c731fb4295320d35aa5f8c46a08512f3efdf055e15c7f4a512f821f13 +algod(31543) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroSigners/Node/node.log +algod(31551) : No REST API Token found. Generated token: 6a64df15d7fca1498381e32068bbdb6cde110cbfdf94cadff8c44160409a9e9e +algod(31551) : No Admin REST API Token found. Generated token: 92bfaff60d1072ba253089e0b9b66f93715e392010772121aaafc11d56025f21 +algod(31551) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestDuplicateKeys/Node/node.log +algod(31543) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31543) : Initializing the Algorand node... +algod(31543) : Success! +algod(31543) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43247. Press Ctrl-C to exit +algod(31543) : ⇨ http server started on 127.0.0.1:43247 +algod(31551) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31551) : Initializing the Algorand node... +algod(31551) : Success! +algod(31551) : ⇨ http server started on 127.0.0.1:41349 +algod(31551) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41349. Press Ctrl-C to exit +algod(31530) : Exiting on terminated +algod(31543) : Exiting on terminated +--- PASS: TestZeroSigners (15.21s) +=== CONT TestBasicMultisig +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(31636) : No REST API Token found. Generated token: cb0ce1e70821180e42593e6b7b81b8010cdc6aef341138de812c18e64ded1aea +algod(31636) : No Admin REST API Token found. Generated token: 00cc450958ef513b1493cdfbedfebcaa08277a45e754dc32e445b2178d3efc66 +algod(31636) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Primary/node.log +algod(31636) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31636) : Initializing the Algorand node... +algod(31636) : Success! +algod(31636) : ⇨ http server started on 127.0.0.1:8080 +algod(31636) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(31643) : No REST API Token found. Generated token: f0b78103e8424c75bcc0e0158e6a6708fdf0d8145b1356408471f1ec1b72c7cb +algod(31643) : No Admin REST API Token found. Generated token: f2f6603e4969ffffd6875dd2f5097783ed67d33a11cdcdc194435f3b215323c9 +algod(31643) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestBasicMultisig/Node/node.log +algod(31643) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31643) : Initializing the Algorand node... +algod(31643) : Success! +algod(31643) : ⇨ http server started on 127.0.0.1:37557 +algod(31643) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37557. Press Ctrl-C to exit +algod(31551) : Exiting on terminated +algod(31537) : Exiting on terminated +--- PASS: TestDuplicateKeys (31.65s) +=== CONT TestZeroThreshold +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(31722) : No REST API Token found. Generated token: e7ffddccbe27858147d28d72035691139fa3fd690c9763ed55a96fe2ad5d05d9 +algod(31722) : No Admin REST API Token found. Generated token: d504a0276590b42d8e6f4a5730373fe7a5d7241695a4b475299cfee1e8beca94 +algod(31722) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Primary/node.log +algod(31722) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31722) : Initializing the Algorand node... +algod(31722) : Success! +algod(31722) : ⇨ http server started on 127.0.0.1:41599 +algod(31722) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41599. Press Ctrl-C to exit +algod(31729) : No REST API Token found. Generated token: 2605ba53edb3b8748b8dd23452fa63da60556264b865e03d802d372d09b44c60 +algod(31729) : No Admin REST API Token found. Generated token: 0f75ec66fd7d7025de193962c6f22219cb8c126961f026481424979e4fdbdcfc +algod(31729) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestZeroThreshold/Node/node.log +algod(31729) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(31729) : Initializing the Algorand node... +algod(31729) : Success! +algod(31729) : ⇨ http server started on 127.0.0.1:36485 +algod(31729) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36485. Press Ctrl-C to exit +algod(31722) : Exiting on terminated +algod(31729) : Exiting on terminated +--- PASS: TestZeroThreshold (14.61s) +algod(31643) : Exiting on terminated +algod(31636) : Exiting on terminated +--- PASS: TestBasicMultisig (52.16s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/multisig 67.439s +=== RUN TestParticipationKeyOnlyAccountParticipatesCorrectly +=== PAUSE TestParticipationKeyOnlyAccountParticipatesCorrectly +=== RUN TestNewAccountCanGoOnlineAndParticipate + onlineOfflineParticipation_test.go:105: +--- SKIP: TestNewAccountCanGoOnlineAndParticipate (0.00s) +=== RUN TestOnlineOfflineRewards +=== PAUSE TestOnlineOfflineRewards +=== RUN TestPartkeyOnlyRewards + participationRewards_test.go:137: +--- SKIP: TestPartkeyOnlyRewards (0.00s) +=== RUN TestRewardUnitThreshold +=== PAUSE TestRewardUnitThreshold +=== RUN TestRewardRateRecalculation +=== PAUSE TestRewardRateRecalculation +=== CONT TestParticipationKeyOnlyAccountParticipatesCorrectly +=== CONT TestRewardUnitThreshold +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Online.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Offline.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Online.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Partkey.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Rich.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Partkey.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/SmallWallet.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Offline.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Partkey.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Online.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Rich.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Partkey.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Online.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/SmallWallet.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(32118) : No REST API Token found. Generated token: b2941383a9de10a3657bccdff84a90f8cf5adac07b5dbf8ee5e741ce2b70ba5f +algod(32118) : No Admin REST API Token found. Generated token: f490b0d3ec37b1e31ba0fb5fe923202d4bfe0b6326f7b85fb07fbaa9643c0d90 +algod(32118) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Primary/node.log +algod(32118) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32118) : Initializing the Algorand node... +algod(32124) : No REST API Token found. Generated token: 501c2596368437b03b1c0bde5abd3b3c0d2850e16b4c7bace8a75cfb47edd81f +algod(32124) : No Admin REST API Token found. Generated token: b84d3268f62fb30ef1e88ebe444b028c1c79c11b52f3ee81809eee3578137eed +algod(32124) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Online/node.log +algod(32118) : Success! +algod(32118) : ⇨ http server started on 127.0.0.1:8080 +algod(32118) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(32124) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32124) : Initializing the Algorand node... +algod(32124) : Success! +algod(32124) : ⇨ http server started on 127.0.0.1:45635 +algod(32124) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45635. Press Ctrl-C to exit +algod(32131) : No REST API Token found. Generated token: d1e45dd1b1bc6a3980083ed65353a9e8c0fb8dd201a0d800289a626a34ad2e5a +algod(32131) : No Admin REST API Token found. Generated token: b524097f7267e2b00d1ea2913fecd4b879e585d39f3890917e51869095371501 +algod(32131) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestParticipationKeyOnlyAccountParticipatesCorrectly/Node/node.log +algod(32131) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32131) : Initializing the Algorand node... +algod(32145) : No REST API Token found. Generated token: 2af7fcf94aaec0bb7e762d7591902a5ed5aabb29aadfcaee4b3139e4b987cbeb +algod(32145) : No Admin REST API Token found. Generated token: b793bed62f695ab0790c3edf501d3007f9789d83edae62987a3560cc298c2a0f +algod(32145) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Offline/node.log +algod(32131) : Success! +algod(32131) : ⇨ http server started on 127.0.0.1:41251 +algod(32131) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41251. Press Ctrl-C to exit +algod(32145) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32145) : Initializing the Algorand node... +algod(32145) : Success! +algod(32145) : ⇨ http server started on 127.0.0.1:40343 +algod(32145) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40343. Press Ctrl-C to exit +algod(32169) : No REST API Token found. Generated token: 76ddef86fb25c12f2674036230fb16a014d21b51f0f88a598a76cbb1e3560349 +algod(32169) : No Admin REST API Token found. Generated token: db3d468ddc8df18d04b273488839d65a5ef2352ad0146c2250e51fb274e1bf7e +algod(32169) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/Partkey/node.log +algod(32169) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32169) : Initializing the Algorand node... +algod(32169) : Success! +algod(32169) : ⇨ http server started on 127.0.0.1:39177 +algod(32169) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39177. Press Ctrl-C to exit +algod(32189) : No REST API Token found. Generated token: a1eb278a09fd5575eb1e4f2de669e5f5d92d0eed41af13aa530c5b168142574c +algod(32189) : No Admin REST API Token found. Generated token: 4e69519005b624d18a5dc40b2bf92fbf21bdcdeea8a3571ede6a27f39fd9661e +algod(32189) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardUnitThreshold/SmallNode/node.log +algod(32189) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32189) : Initializing the Algorand node... +algod(32189) : Success! +algod(32189) : ⇨ http server started on 127.0.0.1:44589 +algod(32189) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44589. Press Ctrl-C to exit +algod(32131) : Exiting on terminated +algod(32118) : Exiting on terminated +--- PASS: TestParticipationKeyOnlyAccountParticipatesCorrectly (32.97s) +=== CONT TestOnlineOfflineRewards +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Offline.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Online.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/SmallWallet.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Partkey.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Online.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/SmallWallet.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Partkey.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(32347) : No REST API Token found. Generated token: 0fbfe2b13ae71d16dac85ad687a11c23f6d7f0d0ac4389bb19f7da5d4e4bed8c +algod(32347) : No Admin REST API Token found. Generated token: 1173696532ee5a8eb784c45c9a4401efd2a85f01e2d49a1efdf260ff8010bf98 +algod(32347) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Online/node.log +algod(32347) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32347) : Initializing the Algorand node... +algod(32347) : Success! +algod(32347) : ⇨ http server started on 127.0.0.1:8080 +algod(32347) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(32354) : No REST API Token found. Generated token: bd2d0f855c93caba74886e5e88b35e4353a66d16eca8f3c263c4f04a4e4a0978 +algod(32354) : No Admin REST API Token found. Generated token: 35f20a7d2d9164ae1b9f76888c6ea8500863800bf0ff3273e8039de04a4258d4 +algod(32354) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/SmallNode/node.log +algod(32354) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32354) : Initializing the Algorand node... +algod(32354) : Success! +algod(32354) : ⇨ http server started on 127.0.0.1:36723 +algod(32354) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36723. Press Ctrl-C to exit +algod(32361) : No REST API Token found. Generated token: 5e7cad1a41f02a0ea22861bf5cb6fcbe89bb6c1766551563679a3be14de9a17a +algod(32361) : No Admin REST API Token found. Generated token: 0e420dafaddb9ea7334fa809b3ae1d22d0b6598b3e2f7f6ea547271570b8cd43 +algod(32361) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Offline/node.log +algod(32361) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32361) : Initializing the Algorand node... +algod(32361) : Success! +algod(32361) : ⇨ http server started on 127.0.0.1:40721 +algod(32361) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40721. Press Ctrl-C to exit +algod(32369) : No REST API Token found. Generated token: ebea49b10fc3c244dec7fca982469554dfd2a8684b963134d205cbf80dd14b0d +algod(32369) : No Admin REST API Token found. Generated token: 99503eb4275df182b1a94445e8c1211f7e1d734c4d757bc692e6282a19a91a39 +algod(32369) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOnlineOfflineRewards/Partkey/node.log +algod(32369) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(32369) : Initializing the Algorand node... +algod(32369) : Success! +algod(32369) : ⇨ http server started on 127.0.0.1:32959 +algod(32369) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:32959. Press Ctrl-C to exit +algod(32189) : Exiting on terminated +algod(32169) : Exiting on terminated +algod(32145) : Exiting on terminated +algod(32124) : Exiting on terminated +=== CONT TestRewardRateRecalculation +--- PASS: TestRewardUnitThreshold (79.11s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Wallet2.0.3000000.partkey +test-fast-reward-recalculation 100000 +algod(307) : No REST API Token found. Generated token: 2902709fd1b51789752d7b399a5a405aefb0971d06fe27559b124e946fe9c321 +algod(307) : No Admin REST API Token found. Generated token: 95d9b364f4c826c36ef4723893533ea4668db2ce10b15b4175eb4635484d8e2a +algod(307) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Primary/node.log +algod(307) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(307) : Initializing the Algorand node... +algod(307) : Success! +algod(307) : ⇨ http server started on 127.0.0.1:39351 +algod(307) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39351. Press Ctrl-C to exit +algod(314) : No REST API Token found. Generated token: f7c0d43b1905580c5657c197e1276ca9382e2fe2854a526d35bbeb728d2e7d2d +algod(314) : No Admin REST API Token found. Generated token: 82a13bab7b191392149b7bcdead54616b2678c9173951b70409e814801ae67d6 +algod(314) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRewardRateRecalculation/Node/node.log +algod(314) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(314) : Initializing the Algorand node... +algod(314) : Success! +algod(314) : ⇨ http server started on 127.0.0.1:44593 +algod(314) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44593. Press Ctrl-C to exit +algod(32354) : Exiting on terminated +algod(32369) : Exiting on terminated +algod(32361) : Exiting on terminated +algod(32347) : Exiting on terminated +--- PASS: TestOnlineOfflineRewards (97.68s) +algod(307) : Exiting on terminated +algod(314) : Exiting on terminated +--- PASS: TestRewardRateRecalculation (105.57s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/participation 184.782s +=== RUN TestBasicPartitionRecovery + partitionRecovery_test.go:34: +--- SKIP: TestBasicPartitionRecovery (0.00s) +=== RUN TestPartitionRecoverySwapStartup + partitionRecovery_test.go:78: +--- SKIP: TestPartitionRecoverySwapStartup (0.00s) +=== RUN TestPartitionRecoveryStaggerRestart + partitionRecovery_test.go:98: +--- SKIP: TestPartitionRecoveryStaggerRestart (0.00s) +=== RUN TestBasicPartitionRecoveryPartOffline + partitionRecovery_test.go:159: +--- SKIP: TestBasicPartitionRecoveryPartOffline (0.00s) +=== RUN TestPartitionHalfOffline + partitionRecovery_test.go:210: +--- SKIP: TestPartitionHalfOffline (0.00s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/partitionRecovery 0.107s +=== RUN TestTealCompile + compile_test.go:32: +--- SKIP: TestTealCompile (0.00s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/teal 0.084s +=== RUN TestAccountInformationV2 +=== PAUSE TestAccountInformationV2 +=== RUN TestAssetValidRounds +=== PAUSE TestAssetValidRounds +=== RUN TestAssetConfig + asset_test.go:188: +--- SKIP: TestAssetConfig (0.00s) +=== RUN TestAssetInformation +=== PAUSE TestAssetInformation +=== RUN TestAssetGroupCreateSendDestroy +=== PAUSE TestAssetGroupCreateSendDestroy +=== RUN TestAssetSend +=== PAUSE TestAssetSend +=== RUN TestAssetCreateWaitRestartDelete +=== PAUSE TestAssetCreateWaitRestartDelete +=== RUN TestAssetCreateWaitBalLookbackDelete + asset_test.go:964: +--- SKIP: TestAssetCreateWaitBalLookbackDelete (0.00s) +=== RUN TestAccountsCanClose +=== PAUSE TestAccountsCanClose +=== RUN TestGroupTransactions +=== PAUSE TestGroupTransactions +=== RUN TestGroupTransactionsDifferentSizes +=== PAUSE TestGroupTransactionsDifferentSizes +=== RUN TestGroupTransactionsSubmission +=== PAUSE TestGroupTransactionsSubmission +=== RUN TestLeaseTransactionsSameSender +=== PAUSE TestLeaseTransactionsSameSender +=== RUN TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 +=== PAUSE TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 +=== RUN TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 +=== PAUSE TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 +=== RUN TestLeaseTransactionsSameSenderDifferentLease +=== PAUSE TestLeaseTransactionsSameSenderDifferentLease +=== RUN TestLeaseTransactionsDifferentSender +=== PAUSE TestLeaseTransactionsDifferentSender +=== RUN TestOverlappingLeases +=== PAUSE TestOverlappingLeases +=== RUN TestAccountsCanChangeOnlineState +=== PAUSE TestAccountsCanChangeOnlineState +=== RUN TestAccountsCanChangeOnlineStateInTheFuture +=== PAUSE TestAccountsCanChangeOnlineStateInTheFuture +=== RUN TestTxnMerkleProof +=== PAUSE TestTxnMerkleProof +=== RUN TestAccountsCanSendMoney +=== PAUSE TestAccountsCanSendMoney +=== RUN TestTransactionPoolOrderingAndClearing + transactionPool_test.go:30: test is flaky as of 2019-06-18 +--- SKIP: TestTransactionPoolOrderingAndClearing (0.00s) +=== RUN TestTransactionPoolExponentialFees + transactionPool_test.go:115: new FIFO pool does not have exponential fee txn replacement +--- SKIP: TestTransactionPoolExponentialFees (0.00s) +=== CONT TestAssetInformation +=== CONT TestLeaseTransactionsSameSender +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Wallet2.0.3000000.partkey +future 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Wallet1.0.3000000.partkey +algod(1888) : No REST API Token found. Generated token: a9bccfd5e553a163d804322c73067f2323648df7d0e17490c19b27bb4b75e70a +algod(1888) : No Admin REST API Token found. Generated token: 18f3104cbd3a84072b7eb430ed92a75ecb289484d9bf3ec30d47be720890e12c +algod(1888) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Primary/node.log +algod(1893) : No REST API Token found. Generated token: c2fb5dd0976d4ebdf869d3ac535646f78e7c19b70db7f2585acaafe7dfc290b8 +algod(1893) : No Admin REST API Token found. Generated token: a39db8eb80a56f025be2cb4fbc4fd5b3622135a54f7df645dff310ebfbf1309f +algod(1893) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Primary/node.log +algod(1888) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(1888) : Initializing the Algorand node... +algod(1888) : Success! +algod(1888) : ⇨ http server started on 127.0.0.1:8080 +algod(1888) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(1893) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(1893) : Initializing the Algorand node... +algod(1893) : Success! +algod(1893) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43599. Press Ctrl-C to exit +algod(1893) : ⇨ http server started on 127.0.0.1:43599 +algod(1901) : No REST API Token found. Generated token: 1d72e0228cf45054ecc40c9675b5a96f6432386bb5cf6ac54179c8add84373b8 +algod(1901) : No Admin REST API Token found. Generated token: d13d2737c812745791ef45ad9a141eddb99f1a5c24b89f07bbcd413d17b4b6be +algod(1901) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetInformation/Node/node.log +algod(1901) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(1901) : Initializing the Algorand node... +algod(1911) : No REST API Token found. Generated token: 21101d739e3564521574add43044801616cd2478d80b42f3fac89e4f39b64524 +algod(1911) : No Admin REST API Token found. Generated token: 4ef81653435d972cbb472572e380214eb6987ad23198abed43ca846594ec1939 +algod(1911) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSender/Node/node.log +algod(1901) : Success! +algod(1901) : ⇨ http server started on 127.0.0.1:35003 +algod(1901) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35003. Press Ctrl-C to exit +algod(1911) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(1911) : Initializing the Algorand node... +algod(1911) : Success! +algod(1911) : ⇨ http server started on 127.0.0.1:40385 +algod(1911) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40385. Press Ctrl-C to exit +algod(1893) : Exiting on terminated +algod(1911) : Exiting on terminated +--- PASS: TestLeaseTransactionsSameSender (21.84s) +=== CONT TestTxnMerkleProof +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTxnMerkleProof/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTxnMerkleProof/Wallet1.0.3000000.partkey +future 100000 +algod(2137) : No REST API Token found. Generated token: ed17eff8b94d0feec94e0464f02c99ed73ff6dc4382b0c78e9f0c6e2fbc899fe +algod(2137) : No Admin REST API Token found. Generated token: 61da9362b421dd18418da4841e69845f3602af166d0acf88e15cc25b8e2276ea +algod(2137) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTxnMerkleProof/Primary/node.log +algod(2137) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2137) : Initializing the Algorand node... +algod(2137) : Success! +algod(2137) : ⇨ http server started on 127.0.0.1:41093 +algod(2137) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41093. Press Ctrl-C to exit +algod(1901) : Exiting on terminated +algod(1888) : Exiting on terminated +=== CONT TestAccountsCanSendMoney +--- PASS: TestAssetInformation (29.59s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(2326) : No REST API Token found. Generated token: a445763d6d79f7dca5a3933cb10d0c39dbcaa25daa4613f7848935ebcb8ba7c4 +algod(2326) : No Admin REST API Token found. Generated token: 66e3b53755b799d221f9a0b75a980a1f9700ee827bb9dca4dc1b08b992ac0b56 +algod(2326) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Primary/node.log +algod(2326) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2326) : Initializing the Algorand node... +algod(2326) : Success! +algod(2326) : ⇨ http server started on 127.0.0.1:8080 +algod(2326) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(2333) : No REST API Token found. Generated token: ad700aa33104b665090e0fa77c228cf72e9f4043b7a6c4183edf2a810be9bdca +algod(2333) : No Admin REST API Token found. Generated token: 3f27a303940f8667501466be62c7454957707cc09b3700147f6fac86b233b657 +algod(2333) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoney/Node/node.log +algod(2333) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2333) : Initializing the Algorand node... +algod(2333) : Success! +algod(2333) : ⇨ http server started on 127.0.0.1:42243 +algod(2333) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42243. Press Ctrl-C to exit +algod(2137) : Exiting on terminated +=== CONT TestAccountsCanChangeOnlineStateInTheFuture +--- PASS: TestTxnMerkleProof (9.01s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Offline2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Offline1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Online2.0.3000000.partkey +future 100000 +algod(2355) : No REST API Token found. Generated token: cc465040843d821ac35b8476ff03c4e5c69be1d901dd91e20049b0d82b9dd0e6 +algod(2355) : No Admin REST API Token found. Generated token: f8b203d41be7500857937ce8c09b67bd4dff4eb58bdfcfd7249cbeaae0b40b32 +algod(2355) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Primary/node.log +algod(2355) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2355) : Initializing the Algorand node... +algod(2355) : Success! +algod(2355) : ⇨ http server started on 127.0.0.1:45241 +algod(2355) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:45241. Press Ctrl-C to exit +algod(2362) : No REST API Token found. Generated token: 4a64c0065fd49e91a64b0885127a97d6c99209236ec515bd4fdce74cdd2c8a1b +algod(2362) : No Admin REST API Token found. Generated token: a5addca1ab95ca5d5df86db7ec8028253454890459032ac7b62c384b991b2e07 +algod(2362) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineStateInTheFuture/Node/node.log +algod(2362) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2362) : Initializing the Algorand node... +algod(2362) : Success! +algod(2362) : ⇨ http server started on 127.0.0.1:35755 +algod(2362) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35755. Press Ctrl-C to exit +algod(2355) : Exiting on terminated +algod(2362) : Exiting on terminated +=== CONT TestAccountsCanChangeOnlineState +--- PASS: TestAccountsCanChangeOnlineStateInTheFuture (23.47s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Offline1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Offline2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Online1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(2596) : No REST API Token found. Generated token: 208ce4ef05348f54e54ca06a7eecf6de3712c060063b47f50b01c3bc7a96e7cc +algod(2596) : No Admin REST API Token found. Generated token: e78f853ccba6d88093b53cb94df633f2d576054d792b640df74a76f9ad674ae8 +algod(2596) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Primary/node.log +algod(2596) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2596) : Initializing the Algorand node... +algod(2596) : Success! +algod(2596) : ⇨ http server started on 127.0.0.1:35941 +algod(2596) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35941. Press Ctrl-C to exit +algod(2613) : No REST API Token found. Generated token: 92fc6265977b9eefeeeb338f84d4181b04d1d84bfe8e5631d65dc32e58d821a5 +algod(2613) : No Admin REST API Token found. Generated token: c08f7c987ea66597c312a210dde8ec474e6256ce150247db466da5f742652a30 +algod(2613) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanChangeOnlineState/Node/node.log +algod(2613) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2613) : Initializing the Algorand node... +algod(2613) : Success! +algod(2613) : ⇨ http server started on 127.0.0.1:40417 +algod(2613) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40417. Press Ctrl-C to exit +algod(2333) : Exiting on terminated +algod(2326) : Exiting on terminated +=== CONT TestOverlappingLeases +--- PASS: TestAccountsCanSendMoney (31.42s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(2651) : No REST API Token found. Generated token: 5044e47d7d24130f465cfede46eb29baa6865c3d364fcad0bf18615d69a41bc0 +algod(2651) : No Admin REST API Token found. Generated token: a334f4d7bfda83c87be07b247e1165a6d01f5b33ff5ce0f5bcb5d3821e769ed2 +algod(2651) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Primary/node.log +algod(2651) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2651) : Initializing the Algorand node... +algod(2651) : Success! +algod(2651) : ⇨ http server started on 127.0.0.1:8080 +algod(2651) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(2659) : No REST API Token found. Generated token: 49a41d209110733ad464d423d6d6a39a5e9dd7ab7f471a6db898b64f7fba4b45 +algod(2659) : No Admin REST API Token found. Generated token: 1f510a91143ca0f51b37266bd7510faf3bdd2e09331687023f122a199d94008f +algod(2659) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestOverlappingLeases/Node/node.log +algod(2659) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2659) : Initializing the Algorand node... +algod(2659) : Success! +algod(2659) : ⇨ http server started on 127.0.0.1:38481 +algod(2659) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38481. Press Ctrl-C to exit +algod(2596) : Exiting on terminated +algod(2613) : Exiting on terminated +--- PASS: TestAccountsCanChangeOnlineState (13.95s) +=== CONT TestLeaseTransactionsDifferentSender +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(2747) : No REST API Token found. Generated token: d05dc47dfdcd5867b668bdbbd3329e6d9e8d353749026f2bd7d3b4f97a995577 +algod(2747) : No Admin REST API Token found. Generated token: 51671fafe290424540790330d3bcc7715ba2242eaeb77619a3f0defefe5f521c +algod(2747) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Primary/node.log +algod(2747) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2747) : Initializing the Algorand node... +algod(2747) : Success! +algod(2747) : ⇨ http server started on 127.0.0.1:38775 +algod(2747) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38775. Press Ctrl-C to exit +algod(2768) : No REST API Token found. Generated token: b578cf7ea7bc909dd68652b4269d96b351acc265a66b613ffd977f2d0d22e35a +algod(2768) : No Admin REST API Token found. Generated token: b56ef1e06674d4135077259ce907c29792a0f40281935310ce06f77fc3bc6609 +algod(2768) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsDifferentSender/Node/node.log +algod(2768) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2768) : Initializing the Algorand node... +algod(2768) : Success! +algod(2768) : ⇨ http server started on 127.0.0.1:36057 +algod(2768) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36057. Press Ctrl-C to exit +algod(2747) : Exiting on terminated +algod(2768) : Exiting on terminated +--- PASS: TestLeaseTransactionsDifferentSender (18.40s) +=== CONT TestLeaseTransactionsSameSenderDifferentLease +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(2923) : No REST API Token found. Generated token: 3256a9cbe2fff4efa5a9ad018058d232ffd49bf838157c4b6318801c4fcb86bc +algod(2923) : No Admin REST API Token found. Generated token: 8a81d6ad362ba976efbe15056f3f6753a0723e0110b9527fd7426f9f47360185 +algod(2923) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Primary/node.log +algod(2923) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2923) : Initializing the Algorand node... +algod(2923) : Success! +algod(2923) : ⇨ http server started on 127.0.0.1:46719 +algod(2923) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46719. Press Ctrl-C to exit +algod(2931) : No REST API Token found. Generated token: a315d15fc5cd13d6bd86ac40c56f7090e7260bbb3dc466cba8919fec9edddaee +algod(2931) : No Admin REST API Token found. Generated token: b2662b8003c1daf70a4a677762bfa062b1b1c9d86d5f28b71c6552f2b6be5d26 +algod(2931) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseTransactionsSameSenderDifferentLease/Node/node.log +algod(2931) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2931) : Initializing the Algorand node... +algod(2931) : Success! +algod(2931) : ⇨ http server started on 127.0.0.1:37117 +algod(2931) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37117. Press Ctrl-C to exit +algod(2923) : Exiting on terminated +algod(2931) : Exiting on terminated +=== CONT TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 +--- PASS: TestLeaseTransactionsSameSenderDifferentLease (9.77s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(2972) : No REST API Token found. Generated token: e575c48e7b32a005d6a43ba2940cbb3afb8747b2450baf25dbae1386d5c867ec +algod(2972) : No Admin REST API Token found. Generated token: a3c31fe893487d421cc3dec4ab6d97955491e674c4727a92a3c25f58b455982e +algod(2972) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Primary/node.log +algod(2972) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2972) : Initializing the Algorand node... +algod(2972) : Success! +algod(2972) : ⇨ http server started on 127.0.0.1:35385 +algod(2972) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35385. Press Ctrl-C to exit +algod(2979) : No REST API Token found. Generated token: d1479d652dba2d951aa16dba8c439f68210c4e469dfca2e68f957dfa0de63f65 +algod(2979) : No Admin REST API Token found. Generated token: 08445f9a889abf331066bd9806d105f0c5fce41c27892ec2c8e5fcfc9b3eef59 +algod(2979) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7/Node/node.log +algod(2979) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(2979) : Initializing the Algorand node... +algod(2979) : Success! +algod(2979) : ⇨ http server started on 127.0.0.1:39773 +algod(2979) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39773. Press Ctrl-C to exit +algod(2979) : Exiting on terminated +algod(2972) : Exiting on terminated +=== CONT TestAssetCreateWaitRestartDelete +--- PASS: TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 (14.01s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(3031) : No REST API Token found. Generated token: ec3d3e2bf69cbee0507d52dce6f66721d4ef85b164d5b4738ff3f881b1b8ceb8 +algod(3031) : No Admin REST API Token found. Generated token: d7041f7d88c318fba21fcdc2e5e1039c1cfaf872976502003ded1985d0ba8252 +algod(3031) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Primary/node.log +algod(3031) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3031) : Initializing the Algorand node... +algod(3031) : Success! +algod(3031) : ⇨ http server started on 127.0.0.1:34485 +algod(3031) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34485. Press Ctrl-C to exit +algod(3038) : No REST API Token found. Generated token: f1035e193ec2e2d9cc9e1460861010fd7479bb8238565d72d75aa13527df0025 +algod(3038) : No Admin REST API Token found. Generated token: 8f348a2fc82ba703ea32485fa6074b6beae3bf72c87d2fc862d2ef04b50f039f +algod(3038) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Node/node.log +algod(3038) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3038) : Initializing the Algorand node... +algod(3038) : Success! +algod(3038) : ⇨ http server started on 127.0.0.1:35537 +algod(3038) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35537. Press Ctrl-C to exit +algod(3038) : Exiting on terminated +algod(3031) : Exiting on terminated +algod(3119) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Primary/node.log +algod(3119) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3119) : Initializing the Algorand node... +algod(3119) : Success! +algod(3119) : ⇨ http server started on 127.0.0.1:44433 +algod(3119) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44433. Press Ctrl-C to exit +algod(3135) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetCreateWaitRestartDelete/Node/node.log +algod(3135) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3135) : Initializing the Algorand node... +algod(3135) : Success! +algod(3135) : ⇨ http server started on 127.0.0.1:41977 +algod(3135) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41977. Press Ctrl-C to exit +algod(3119) : Exiting on terminated +algod(3135) : Exiting on terminated +--- PASS: TestAssetCreateWaitRestartDelete (35.88s) +=== CONT TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/57016b942f6d97e6d4c0688b373bb0a2fc85a1a2 100000 +algod(3344) : No REST API Token found. Generated token: 2cacb019662ea692fc499f1c97d97edd64d24a1e5a5ce6481d1ed33967c5a3bc +algod(3344) : No Admin REST API Token found. Generated token: cfffb61c27a394a01065d8d9475bf0e514e5ba479a7c0c0a30eb25300fdac652 +algod(3344) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Primary/node.log +algod(3344) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3344) : Initializing the Algorand node... +algod(3344) : Success! +algod(3344) : ⇨ http server started on 127.0.0.1:46793 +algod(3344) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46793. Press Ctrl-C to exit +algod(3352) : No REST API Token found. Generated token: 44f375208159d29ff0a55edff5ccdbbddafd703aae93798ccc305f78994232e4 +algod(3352) : No Admin REST API Token found. Generated token: 3830c76d22d506edc39fc82aa57144e61609594d501f98d5e65e1b4b2b81389b +algod(3352) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7/Node/node.log +algod(3352) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3352) : Initializing the Algorand node... +algod(3352) : Success! +algod(3352) : ⇨ http server started on 127.0.0.1:46863 +algod(3352) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46863. Press Ctrl-C to exit +algod(2659) : Exiting on terminated +algod(2651) : Exiting on terminated +=== CONT TestGroupTransactionsSubmission +--- PASS: TestOverlappingLeases (92.91s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(3387) : No REST API Token found. Generated token: 705cc1c02ec3439405f458d850aeff8905f493cf165f3fb6bc7d987edec8b44e +algod(3387) : No Admin REST API Token found. Generated token: 2268a1cc7f749ecec27494a0891a9a04ab11b9682544d89f451468834203a0dd +algod(3387) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Primary/node.log +algod(3387) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3387) : Initializing the Algorand node... +algod(3387) : Success! +algod(3387) : ⇨ http server started on 127.0.0.1:8080 +algod(3387) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(3394) : No REST API Token found. Generated token: 7549713d9c9cb41f7208451e94b461de7f4b921adb2c5ddaa6ab9b9de4f857cd +algod(3394) : No Admin REST API Token found. Generated token: cb1dd6acc299f981f1f62e8be47c8406efa7ef93a5ddc04c81bedc0778d0bc5b +algod(3394) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsSubmission/Node/node.log +algod(3394) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3394) : Initializing the Algorand node... +algod(3394) : Success! +algod(3394) : ⇨ http server started on 127.0.0.1:37777 +algod(3394) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37777. Press Ctrl-C to exit +algod(3394) : Exiting on terminated +algod(3387) : Exiting on terminated +--- PASS: TestGroupTransactionsSubmission (7.79s) +=== CONT TestGroupTransactionsDifferentSizes +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Wallet1.0.3000000.partkey +future 100000 +algod(3431) : No REST API Token found. Generated token: 51ed572ae8e715a9fd93e4396c77bac9ff68ac070f999ae9e143e6fc631f2fd1 +algod(3431) : No Admin REST API Token found. Generated token: 90a79eac61e3f4fa1286fbaea51b364b6bacb63a5d1eaf08855f324eff7de67b +algod(3431) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Primary/node.log +algod(3431) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3431) : Initializing the Algorand node... +algod(3431) : Success! +algod(3431) : ⇨ http server started on 127.0.0.1:8080 +algod(3431) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(3438) : No REST API Token found. Generated token: 0e48cda927a45a6f928fd54a9bd1ca8ffa3eff504f32f389445d3577b175a480 +algod(3438) : No Admin REST API Token found. Generated token: a0583194e478b515638491d0c3b973c817237e8a90ea8f028270680c43146610 +algod(3438) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactionsDifferentSizes/Node/node.log +algod(3438) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3438) : Initializing the Algorand node... +algod(3438) : Success! +algod(3438) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46151. Press Ctrl-C to exit +algod(3438) : ⇨ http server started on 127.0.0.1:46151 +algod(3352) : Exiting on terminated +algod(3344) : Exiting on terminated +--- PASS: TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 (17.76s) +=== CONT TestAccountsCanClose +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Wallet1.0.3000000.partkey +https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622 100000 +algod(3462) : No REST API Token found. Generated token: bed0457f34050eee13746a4d1c6ac74b9b71c5179e4fb0529d7adee760ae882a +algod(3462) : No Admin REST API Token found. Generated token: 4ac2e2c9c0f69bd6ca3867bfaf2eed6e6fd09b014ab53253c16d6612b75d228a +algod(3462) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Primary/node.log +algod(3462) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3462) : Initializing the Algorand node... +algod(3462) : Success! +algod(3462) : ⇨ http server started on 127.0.0.1:35331 +algod(3462) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35331. Press Ctrl-C to exit +algod(3469) : No REST API Token found. Generated token: 82975d866965c6e2df5c26b97b379f81f7aa83330a4be4c6dc283979aa506ec6 +algod(3469) : No Admin REST API Token found. Generated token: 84d4ba6080b608544939119a7ca223b44fa7e5124ff05d002aca68cd7857473f +algod(3469) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanClose/Node/node.log +algod(3469) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3469) : Initializing the Algorand node... +algod(3469) : Success! +algod(3469) : ⇨ http server started on 127.0.0.1:33011 +algod(3469) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33011. Press Ctrl-C to exit +algod(3431) : Exiting on terminated +algod(3438) : Exiting on terminated +--- PASS: TestGroupTransactionsDifferentSizes (34.62s) +=== CONT TestAssetSend +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Wallet1.0.3000000.partkey +future 100000 +algod(3568) : No REST API Token found. Generated token: 55158e9028c78273ec427ffd00d2eb41006c8589068b30239afce8281d2b653a +algod(3568) : No Admin REST API Token found. Generated token: e0a7fe5e318af5f257cdff7cfc62b2b2e8903e0f352fdc42f73938cb2295df99 +algod(3568) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Primary/node.log +algod(3568) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3568) : Initializing the Algorand node... +algod(3568) : Success! +algod(3568) : ⇨ http server started on 127.0.0.1:8080 +algod(3568) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(3575) : No REST API Token found. Generated token: a18a0a80459d66929f3b0d5e9099d77042e20541336125eff33dbbabfe3e5afd +algod(3575) : No Admin REST API Token found. Generated token: 4e1b7c43e673ba832167611db8cb1095eff35e9c60362e2529a745424ee08786 +algod(3575) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetSend/Node/node.log +algod(3575) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3575) : Initializing the Algorand node... +algod(3575) : Success! +algod(3575) : ⇨ http server started on 127.0.0.1:39309 +algod(3575) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39309. Press Ctrl-C to exit +algod(3462) : Exiting on terminated +algod(3469) : Exiting on terminated +=== CONT TestGroupTransactions +--- PASS: TestAccountsCanClose (34.81s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Wallet2.0.3000000.partkey +future 100000 +algod(3599) : No REST API Token found. Generated token: b7ce42035e67094b77e66ba60fef420e7a65b09b357f16cfb413db066d59f675 +algod(3599) : No Admin REST API Token found. Generated token: 319bc581de0b5c951338d18d0386cf66b443cb0c23b8d454cd6ba3c0c90f9b67 +algod(3599) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Primary/node.log +algod(3599) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3599) : Initializing the Algorand node... +algod(3599) : Success! +algod(3599) : ⇨ http server started on 127.0.0.1:36173 +algod(3599) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36173. Press Ctrl-C to exit +algod(3607) : No REST API Token found. Generated token: b9180f9fcaba5b8511b8404c4b56d17319047f5c028a8b7c52fd96e5ab3917b2 +algod(3607) : No Admin REST API Token found. Generated token: 2219dee18ae2f15f83d68236cf8c1f5be711558dd2871036ff846d299ee00071 +algod(3607) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestGroupTransactions/Node/node.log +algod(3607) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3607) : Initializing the Algorand node... +algod(3607) : Success! +algod(3607) : ⇨ http server started on 127.0.0.1:41333 +algod(3607) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41333. Press Ctrl-C to exit +algod(3599) : Exiting on terminated +algod(3607) : Exiting on terminated +=== CONT TestAssetGroupCreateSendDestroy +--- PASS: TestGroupTransactions (30.62s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(3694) : No REST API Token found. Generated token: 4e8d9a408c8586b9707cac412189a912e52e0767d2a4de37a9d0721473e39bd5 +algod(3694) : No Admin REST API Token found. Generated token: 972bbd1343961f18ccf3c43e81479387537c9cd647d28f990a85657f8543e01c +algod(3694) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Primary/node.log +algod(3694) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3694) : Initializing the Algorand node... +algod(3694) : Success! +algod(3694) : ⇨ http server started on 127.0.0.1:42547 +algod(3694) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42547. Press Ctrl-C to exit +algod(3701) : No REST API Token found. Generated token: 09392c60ad772ac0f1fc4244a8d2fbe674e1d364916c8609b12f3a4086e722b3 +algod(3701) : No Admin REST API Token found. Generated token: d56b394192c1761209d5f4e867c2083abbf367b755fa5b150c69a2b0012a6ab8 +algod(3701) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetGroupCreateSendDestroy/Node/node.log +algod(3701) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3701) : Initializing the Algorand node... +algod(3701) : Success! +algod(3701) : ⇨ http server started on 127.0.0.1:46421 +algod(3701) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46421. Press Ctrl-C to exit +algod(3575) : Exiting on terminated +algod(3568) : Exiting on terminated +=== CONT TestAssetValidRounds +--- PASS: TestAssetSend (34.51s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Wallet2.0.3000000.partkey +future 100000 +algod(3723) : No REST API Token found. Generated token: 7ab8df151581af28f266c667a833d26841b08e511d73b57057ca3da12e412a9a +algod(3723) : No Admin REST API Token found. Generated token: 2385a69c299f062884bd472f01a6e68231ee4d8feaf3d6d5d5a0288694239bc9 +algod(3723) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Primary/node.log +algod(3723) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3723) : Initializing the Algorand node... +algod(3723) : Success! +algod(3723) : ⇨ http server started on 127.0.0.1:8080 +algod(3723) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(3730) : No REST API Token found. Generated token: 911ef1829965d0c1877d02d1e9ae1e216e13a8a4285194885bf0da84ebfe2b74 +algod(3730) : No Admin REST API Token found. Generated token: 8e0d38847c4c0f19aab77e4c5059c2871b8d4013022e2fa81132594c390123a1 +algod(3730) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAssetValidRounds/Node/node.log +algod(3730) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3730) : Initializing the Algorand node... +algod(3730) : Success! +algod(3730) : ⇨ http server started on 127.0.0.1:42821 +algod(3730) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42821. Press Ctrl-C to exit +algod(3730) : Exiting on terminated +algod(3723) : Exiting on terminated +--- PASS: TestAssetValidRounds (10.39s) +=== CONT TestAccountInformationV2 +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Wallet1.0.3000000.partkey +future 100000 +algod(3805) : No REST API Token found. Generated token: 48c890a0228705e1d0e8d379fa012a47a274369ff5ce97adcfbc05c124a382be +algod(3805) : No Admin REST API Token found. Generated token: d0abe6f7ef9bd29558b634a9c3060f87e8775cace24b7da135f61a644e2d35aa +algod(3805) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Primary/node.log +algod(3805) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3805) : Initializing the Algorand node... +algod(3805) : Success! +algod(3805) : ⇨ http server started on 127.0.0.1:37935 +algod(3805) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37935. Press Ctrl-C to exit +algod(3817) : No REST API Token found. Generated token: 3fc86480598a43f2aaaf5eb8b6e67ea80df2bf22ec914ba4b510f397d966a271 +algod(3817) : No Admin REST API Token found. Generated token: 25892b8667414fe9cf4b9edb5325abae3f2bdc72749fa8aa9815a5b6b2a14387 +algod(3817) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountInformationV2/Node/node.log +algod(3817) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(3817) : Initializing the Algorand node... +algod(3817) : Success! +algod(3817) : ⇨ http server started on 127.0.0.1:35409 +algod(3817) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35409. Press Ctrl-C to exit +algod(3701) : Exiting on terminated +algod(3694) : Exiting on terminated +--- PASS: TestAssetGroupCreateSendDestroy (31.74s) +algod(3805) : Exiting on terminated +algod(3817) : Exiting on terminated +--- PASS: TestAccountInformationV2 (28.95s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 270.274s +? github.com/algorand/go-algorand/test/e2e-go/globals [no test files] +=== RUN TestServerStartsStopsSuccessfully +=== PAUSE TestServerStartsStopsSuccessfully +=== RUN TestBadAuthFails +=== PAUSE TestBadAuthFails +=== RUN TestGoodAuthSucceeds +=== PAUSE TestGoodAuthSucceeds +=== RUN TestNonAbsSQLiteWalletConfigFails +=== PAUSE TestNonAbsSQLiteWalletConfigFails +=== RUN TestAbsSQLiteWalletConfigSucceeds +=== PAUSE TestAbsSQLiteWalletConfigSucceeds +=== RUN TestGenerateAndListKeys +=== PAUSE TestGenerateAndListKeys +=== RUN TestImportKey +=== PAUSE TestImportKey +=== RUN TestExportKey +=== PAUSE TestExportKey +=== RUN TestDeleteKey +=== PAUSE TestDeleteKey +=== RUN TestSignTransaction +=== PAUSE TestSignTransaction +=== RUN TestSignProgram +=== PAUSE TestSignProgram +=== RUN TestMasterKeyImportExport +=== PAUSE TestMasterKeyImportExport +=== RUN TestMasterKeyGeneratePastImportedKeys +=== PAUSE TestMasterKeyGeneratePastImportedKeys +=== RUN TestMultisigImportList +=== PAUSE TestMultisigImportList +=== RUN TestMultisigExportDelete +=== PAUSE TestMultisigExportDelete +=== RUN TestMultisigSign +=== PAUSE TestMultisigSign +=== RUN TestMultisigSignWithSigner +=== PAUSE TestMultisigSignWithSigner +=== RUN TestMultisigSignWithWrongSigner +=== PAUSE TestMultisigSignWithWrongSigner +=== RUN TestMultisigSignProgram +=== PAUSE TestMultisigSignProgram +=== RUN TestWalletCreation +=== PAUSE TestWalletCreation +=== RUN TestBlankWalletCreation +=== PAUSE TestBlankWalletCreation +=== RUN TestWalletRename +=== PAUSE TestWalletRename +=== RUN TestWalletSessionRelease +=== PAUSE TestWalletSessionRelease +=== RUN TestWalletSessionRenew +=== PAUSE TestWalletSessionRenew +=== RUN TestWalletSessionExpiry +=== PAUSE TestWalletSessionExpiry +=== CONT TestGoodAuthSucceeds +=== CONT TestMasterKeyGeneratePastImportedKeys +--- PASS: TestGoodAuthSucceeds (0.21s) +=== CONT TestMultisigImportList +--- PASS: TestMasterKeyGeneratePastImportedKeys (0.30s) +=== CONT TestSignProgram +=== CONT TestMasterKeyImportExport +--- PASS: TestMultisigImportList (0.26s) +=== CONT TestDeleteKey +--- PASS: TestSignProgram (0.25s) +--- PASS: TestMasterKeyImportExport (0.32s) +=== CONT TestSignTransaction +--- PASS: TestDeleteKey (0.25s) +=== CONT TestImportKey +--- PASS: TestSignTransaction (0.26s) +=== CONT TestGenerateAndListKeys +--- PASS: TestImportKey (0.26s) +=== CONT TestAbsSQLiteWalletConfigSucceeds +--- PASS: TestAbsSQLiteWalletConfigSucceeds (0.00s) +=== CONT TestExportKey +--- PASS: TestExportKey (0.26s) +=== CONT TestNonAbsSQLiteWalletConfigFails +--- PASS: TestNonAbsSQLiteWalletConfigFails (0.00s) +=== CONT TestBadAuthFails +--- PASS: TestGenerateAndListKeys (0.27s) +=== CONT TestServerStartsStopsSuccessfully +--- PASS: TestBadAuthFails (0.21s) +=== CONT TestWalletCreation +--- PASS: TestServerStartsStopsSuccessfully (0.21s) +=== CONT TestWalletSessionExpiry +=== CONT TestWalletSessionRenew +--- PASS: TestWalletCreation (0.23s) +--- PASS: TestWalletSessionExpiry (2.23s) +=== CONT TestWalletRename +=== CONT TestBlankWalletCreation +--- PASS: TestWalletSessionRenew (2.23s) +--- PASS: TestWalletRename (0.24s) +=== CONT TestMultisigSignWithSigner +=== CONT TestWalletSessionRelease +--- PASS: TestBlankWalletCreation (0.23s) +=== CONT TestMultisigSignWithWrongSigner +--- PASS: TestMultisigSignWithSigner (0.25s) +--- PASS: TestWalletSessionRelease (0.23s) +=== CONT TestMultisigSignProgram +--- PASS: TestMultisigSignWithWrongSigner (0.26s) +=== CONT TestMultisigSign +--- PASS: TestMultisigSignProgram (0.26s) +=== CONT TestMultisigExportDelete +--- PASS: TestMultisigSign (0.25s) +--- PASS: TestMultisigExportDelete (0.25s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/kmd 5.048s +testing: warning: no tests to run +PASS +ok github.com/algorand/go-algorand/test/e2e-go/perf 0.069s [no tests to run] +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(4283) : No REST API Token found. Generated token: c76798a34a35657df663a6c0b6a81cb39ff4a67359c32b63943204829b79d87c +algod(4283) : No Admin REST API Token found. Generated token: 69e7af0bc9e5973a855e6d28859b7345a63236b05a091b84d74ce74cda33085b +algod(4283) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Primary/node.log +algod(4283) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4283) : Initializing the Algorand node... +algod(4283) : Success! +algod(4283) : ⇨ http server started on 127.0.0.1:38947 +algod(4283) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38947. Press Ctrl-C to exit +algod(4291) : No REST API Token found. Generated token: 0e846834bba67419a34ace81b938e33f0c9dc012d23929aee2ff583e40be9277 +algod(4291) : No Admin REST API Token found. Generated token: a64db0006bbabb7cbfbba67e8d980ed9669998aee89f02fe99d4b7ac55773063 +algod(4291) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/RestClientTests/Node/node.log +algod(4291) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4291) : Initializing the Algorand node... +algod(4291) : Success! +algod(4291) : ⇨ http server started on 127.0.0.1:34379 +algod(4291) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34379. Press Ctrl-C to exit +=== RUN TestClientCanGetStatus +--- PASS: TestClientCanGetStatus (0.01s) +=== RUN TestClientCanGetStatusAfterBlock +--- PASS: TestClientCanGetStatusAfterBlock (13.80s) +=== RUN TestTransactionsByAddr +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Wallet2.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(4336) : No REST API Token found. Generated token: 475c456823a93eec658a1cce2534bd90e3019eeaca13a6aded647d92a3475168 +algod(4336) : No Admin REST API Token found. Generated token: 36c927d2e19fc8c79c8adac754948e6f24b8f1c57f8039fe801578c35db0c365 +algod(4336) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Primary/node.log +algod(4336) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4336) : Initializing the Algorand node... +algod(4336) : Success! +algod(4336) : ⇨ http server started on 127.0.0.1:40873 +algod(4336) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40873. Press Ctrl-C to exit +algod(4343) : No REST API Token found. Generated token: f7c522f3ef7668ffde8df7fae80edf803bc5e621db32674c6f752d209a252161 +algod(4343) : No Admin REST API Token found. Generated token: 70335dc94d7f3adcfd32e57ece56514b3637015e9d6e9ba02a3db6157b9a2527 +algod(4343) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestTransactionsByAddr/Node/node.log +algod(4343) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4343) : Initializing the Algorand node... +algod(4343) : Success! +algod(4343) : ⇨ http server started on 127.0.0.1:33153 +algod(4343) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:33153. Press Ctrl-C to exit + restClient_test.go:230: rnd[2] created txn TJFY6J2BGXMNQPFBAZEZWCGZPSA2HZFTK3JI3RQ32Y4ZR354KERA + restClient_test.go:237: rnd 4 +algod(4336) : Exiting on terminated +algod(4343) : Exiting on terminated +--- PASS: TestTransactionsByAddr (17.97s) +=== RUN TestClientCanGetVersion +--- PASS: TestClientCanGetVersion (0.00s) +=== RUN TestClientCanGetSuggestedFee +--- PASS: TestClientCanGetSuggestedFee (0.02s) +=== RUN TestClientCanGetMinTxnFee +--- PASS: TestClientCanGetMinTxnFee (0.01s) +=== RUN TestClientCanGetBlockInfo +--- PASS: TestClientCanGetBlockInfo (0.03s) +=== RUN TestClientRejectsBadFromAddressWhenSending +--- PASS: TestClientRejectsBadFromAddressWhenSending (0.01s) +=== RUN TestClientRejectsBadToAddressWhenSending +--- PASS: TestClientRejectsBadToAddressWhenSending (0.02s) +=== RUN TestClientRejectsMutatedFromAddressWhenSending +--- PASS: TestClientRejectsMutatedFromAddressWhenSending (0.02s) +=== RUN TestClientRejectsMutatedToAddressWhenSending +--- PASS: TestClientRejectsMutatedToAddressWhenSending (0.01s) +=== RUN TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey +--- PASS: TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey (0.01s) +=== RUN TestClientOversizedNote +--- PASS: TestClientOversizedNote (0.02s) +=== RUN TestClientCanSendAndGetNote +--- PASS: TestClientCanSendAndGetNote (7.32s) +=== RUN TestClientCanGetTransactionStatus + restClient_test.go:424: { + "amt": 100000, + "fee": 10000, + "fv": 11, + "gen": "test-v1", + "gh": "TFra7RvB7ra3ON9ZblZuqoCHu6gWiwpVHfGr0eN1C9s=", + "lv": 1011, + "rcv": "YSCN4ZDAXRU2WO2W2WNDCWBVRQKZETY2QF2YTI2LGBLPVUBNZZLBXOEKQI", + "snd": "3BWJVN4HGXGNLN2OUBWGJINILE575VG4F6BTHE4NWOIUEXK3YOCHALOKPA", + "type": "pay" + } + restClient_test.go:426: 6NQGEVFKIYCCVKQKGICVKLHCH24ZYTDVWHBE553W7DF53BPYE4OQ +--- PASS: TestClientCanGetTransactionStatus (8.23s) +=== RUN TestAccountBalance +--- PASS: TestAccountBalance (8.33s) +=== RUN TestAccountParticipationInfo +--- PASS: TestAccountParticipationInfo (8.33s) +=== RUN TestSupply +--- PASS: TestSupply (0.00s) +=== RUN TestClientCanGetGoRoutines +--- PASS: TestClientCanGetGoRoutines (0.01s) +=== RUN TestSendingTooMuchFails + restClient_test.go:557: HTTP 400 Bad Request: TransactionPool.Remember: transaction HV3NYJPDGYHZH6LH4IBB54BAACA2F45B552S4JQA4CD56SD5EPKA: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:90000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {100100}) + restClient_test.go:562: HTTP 400 Bad Request: TransactionPool.Remember: transaction IQEXYSWOWK6O6NNE75AEV6VLPBZFKPKES4EYBTSPCG6MCOVDASQQ: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:90000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {18446744073709551615}) + restClient_test.go:567: HTTP 400 Bad Request: TransactionPool.Remember: transaction EARCABJLCKMRRBEZW2DGRVPBRKVS6D6TILGS3C6ZZEZWAE4E2AQQ: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:100000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {100100}) + restClient_test.go:572: HTTP 400 Bad Request: TransactionPool.Remember: transaction W64ZQNQU4QF4KGG562ZHVM4KY2C5IOMJOU6ZGOKL2MC7USAAQWIA: overspend (account P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA, data {_struct:{} Status:Offline MicroAlgos:{Raw:100000} RewardsBase:0 RewardedMicroAlgos:{Raw:0} VoteID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionID:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirstValid:0 VoteLastValid:0 VoteKeyDilution:0 AssetParams:map[] Assets:map[] AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AppLocalStates:map[] AppParams:map[] TotalAppSchema:{_struct:{} NumUint:0 NumByteSlice:0}}, tried to spend {18446744073709551615}) +--- PASS: TestSendingTooMuchFails (0.07s) +=== RUN TestSendingFromEmptyAccountFails +--- PASS: TestSendingFromEmptyAccountFails (0.04s) +=== RUN TestSendingTooLittleToEmptyAccountFails +--- PASS: TestSendingTooLittleToEmptyAccountFails (0.06s) +=== RUN TestSendingLowFeeFails + restClient_test.go:663: HTTP 400 Bad Request: transaction {_struct:{} Sig:[216 70 241 73 25 40 190 87 19 12 178 201 6 82 78 8 43 150 231 41 157 153 251 241 70 143 175 59 174 26 68 48 198 95 50 97 16 28 204 12 154 168 200 167 230 134 48 209 216 242 56 31 96 75 79 204 81 249 232 82 170 48 118 13] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Lsig:{_struct:{} Logic:[] Sig:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Args:[]} Txn:{_struct:{} Type:pay Header:{_struct:{} Sender:3BWJVN4HGXGNLN2OUBWGJINILE575VG4F6BTHE4NWOIUEXK3YOCHALOKPA Fee:{Raw:1} FirstValid:17 LastValid:1017 Note:[] GenesisID:test-v1 GenesisHash:JRNNV3I3YHXLNNZY35MW4VTOVKAIPO5IC2FQUVI56GV5DY3VBPNQ Group:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA Lease:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] RekeyTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} KeyregTxnFields:{_struct:{} VotePK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionPK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirst:0 VoteLast:0 VoteKeyDilution:0 Nonparticipation:false} PaymentTxnFields:{_struct:{} Receiver:P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA Amount:{Raw:100000} CloseRemainderTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetConfigTxnFields:{_struct:{} ConfigAsset:0 AssetParams:{_struct:{} Total:0 Decimals:0 DefaultFrozen:false UnitName: AssetName: URL: MetadataHash:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Manager:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Reserve:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Freeze:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Clawback:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ}} AssetTransferTxnFields:{_struct:{} XferAsset:0 AssetAmount:0 AssetSender:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetReceiver:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetCloseTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetFreezeTxnFields:{_struct:{} FreezeAccount:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ FreezeAsset:0 AssetFrozen:false} ApplicationCallTxnFields:{_struct:{} ApplicationID:0 OnCompletion:NoOpOC ApplicationArgs:[] Accounts:[] ForeignApps:[] ForeignAssets:[] LocalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} GlobalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} ApprovalProgram:[] ClearStateProgram:[]} CompactCertTxnFields:{_struct:{} CertRound:0 CertType:0 Cert:{_struct:{} SigCommit:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA SignedWeight:0 SigProofs:[] PartProofs:[] Reveals:map[]}}} AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} invalid : transaction had fee 1, which is less than the minimum 1000 + restClient_test.go:669: HTTP 400 Bad Request: transaction {_struct:{} Sig:[65 160 77 104 227 30 37 105 31 244 54 175 138 167 38 34 129 192 253 108 205 101 142 23 42 251 126 196 80 218 31 185 178 104 206 9 2 4 120 11 111 70 174 129 124 145 164 237 80 203 76 44 155 138 146 212 44 66 121 157 21 188 47 0] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Lsig:{_struct:{} Logic:[] Sig:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Msig:{_struct:{} Version:0 Threshold:0 Subsigs:[]} Args:[]} Txn:{_struct:{} Type:pay Header:{_struct:{} Sender:3BWJVN4HGXGNLN2OUBWGJINILE575VG4F6BTHE4NWOIUEXK3YOCHALOKPA Fee:{Raw:0} FirstValid:17 LastValid:1017 Note:[] GenesisID:test-v1 GenesisHash:JRNNV3I3YHXLNNZY35MW4VTOVKAIPO5IC2FQUVI56GV5DY3VBPNQ Group:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA Lease:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] RekeyTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} KeyregTxnFields:{_struct:{} VotePK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] SelectionPK:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] VoteFirst:0 VoteLast:0 VoteKeyDilution:0 Nonparticipation:false} PaymentTxnFields:{_struct:{} Receiver:P7LZTPX5YJZ7YG5QEQWMQH54CBPIKATUQMIWS6BK6BJ7HYDNJJLFZC75XA Amount:{Raw:100000} CloseRemainderTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetConfigTxnFields:{_struct:{} ConfigAsset:0 AssetParams:{_struct:{} Total:0 Decimals:0 DefaultFrozen:false UnitName: AssetName: URL: MetadataHash:[0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0] Manager:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Reserve:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Freeze:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ Clawback:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ}} AssetTransferTxnFields:{_struct:{} XferAsset:0 AssetAmount:0 AssetSender:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetReceiver:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ AssetCloseTo:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} AssetFreezeTxnFields:{_struct:{} FreezeAccount:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ FreezeAsset:0 AssetFrozen:false} ApplicationCallTxnFields:{_struct:{} ApplicationID:0 OnCompletion:NoOpOC ApplicationArgs:[] Accounts:[] ForeignApps:[] ForeignAssets:[] LocalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} GlobalStateSchema:{_struct:{} NumUint:0 NumByteSlice:0} ApprovalProgram:[] ClearStateProgram:[]} CompactCertTxnFields:{_struct:{} CertRound:0 CertType:0 Cert:{_struct:{} SigCommit:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA SignedWeight:0 SigProofs:[] PartProofs:[] Reveals:map[]}}} AuthAddr:AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ} invalid : transaction had fee 0, which is less than the minimum 1000 +--- PASS: TestSendingLowFeeFails (0.07s) +=== RUN TestSendingNotClosingAccountFails +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Wallet2.0.3000000.partkey +algod(4606) : No REST API Token found. Generated token: b3f22a106eb6122ba6ebb7ad5f5cad4ab76e4ff00be9cea089fa552c85d5cbd3 +algod(4606) : No Admin REST API Token found. Generated token: 7b59e843d468b91e7501755971d61821a89b7a1c3a38f9082d32aef207ec1ee1 +algod(4606) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Primary/node.log +algod(4606) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4606) : Initializing the Algorand node... +algod(4606) : Success! +algod(4606) : ⇨ http server started on 127.0.0.1:37903 +algod(4606) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:37903. Press Ctrl-C to exit +algod(4613) : No REST API Token found. Generated token: 3629a039e9d20f5fa4a7c402082cad5a52528c011fcd4d6a398790b6d7034682 +algod(4613) : No Admin REST API Token found. Generated token: 588c2499a87d2439f094806ee562af55ab8b7e6020704cd6e7962c601c9233e9 +algod(4613) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestSendingNotClosingAccountFails/Node/node.log +algod(4613) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4613) : Initializing the Algorand node... +algod(4613) : Success! +algod(4613) : ⇨ http server started on 127.0.0.1:42947 +algod(4613) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42947. Press Ctrl-C to exit +algod(4613) : Exiting on terminated +algod(4606) : Exiting on terminated +--- PASS: TestSendingNotClosingAccountFails (3.89s) +=== RUN TestClientCanGetPendingTransactions +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(4637) : No REST API Token found. Generated token: 5debbf720dd845682f9b78137216b0adf9fce8379e1d127f945b20af3c559d4a +algod(4637) : No Admin REST API Token found. Generated token: 0be670daf19d8bd88830c8817b35d44eb823ca048359f57873b24a06b8eea831 +algod(4637) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Primary/node.log +algod(4637) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4637) : Initializing the Algorand node... +algod(4637) : Success! +algod(4637) : ⇨ http server started on 127.0.0.1:42439 +algod(4637) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42439. Press Ctrl-C to exit +algod(4645) : No REST API Token found. Generated token: 13b1a26df08074ad74dfdb621d686293190ee02664685de45c42b743cf1d50c2 +algod(4645) : No Admin REST API Token found. Generated token: df4f852af3e5dab9e157593c2a6e3854470213262853dc1926a89d11db12a3ed +algod(4645) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientCanGetPendingTransactions/Node/node.log +algod(4645) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4645) : Initializing the Algorand node... +algod(4645) : Success! +algod(4645) : ⇨ http server started on 127.0.0.1:39457 +algod(4645) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39457. Press Ctrl-C to exit +algod(4645) : Exiting on terminated +algod(4637) : Exiting on terminated +--- PASS: TestClientCanGetPendingTransactions (8.20s) +=== RUN TestClientTruncatesPendingTransactions +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(4680) : No REST API Token found. Generated token: 0a26701d5d1dcbc52f2a51af23e7b73c94b27fbaf878929d169b5b6d0520564e +algod(4680) : No Admin REST API Token found. Generated token: 9a11b167638a5629daf54bbccb6aa5c948ea18010296339b153e461553c4bb38 +algod(4680) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Primary/node.log +algod(4680) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4680) : Initializing the Algorand node... +algod(4680) : Success! +algod(4680) : ⇨ http server started on 127.0.0.1:34217 +algod(4680) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:34217. Press Ctrl-C to exit +algod(4687) : No REST API Token found. Generated token: 3fd8152e288635ae00d5ee39ac4337c2cd74fd6b807ba69ac83d65f070e5be08 +algod(4687) : No Admin REST API Token found. Generated token: bddf2d08ad914b1e5d7453a6d396aeb53a6092b32d26d482338f369f59c1f61f +algod(4687) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestClientTruncatesPendingTransactions/Node/node.log +algod(4687) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4687) : Initializing the Algorand node... +algod(4687) : Success! +algod(4687) : ⇨ http server started on 127.0.0.1:39073 +algod(4687) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39073. Press Ctrl-C to exit +algod(4687) : Exiting on terminated +algod(4680) : Exiting on terminated +--- PASS: TestClientTruncatesPendingTransactions (9.60s) +=== RUN TestClientPrioritizesPendingTransactions + restClient_test.go:785: new FIFO pool does not have prioritization +--- SKIP: TestClientPrioritizesPendingTransactions (0.00s) +PASS +algod(4291) : Exiting on terminated +algod(4283) : Exiting on terminated +ok github.com/algorand/go-algorand/test/e2e-go/restAPI 89.426s +=== RUN TestManyAccountsCanGoOnline +=== PAUSE TestManyAccountsCanGoOnline +=== CONT TestManyAccountsCanGoOnline +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Wallet1.0.3000000.partkey +https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff 100000 +algod(4769) : No REST API Token found. Generated token: 88327cd4affa9699a330e54d6579146ad1d6768ce66e64d6ed6b437d1df97981 +algod(4769) : No Admin REST API Token found. Generated token: 9dd29f02c9ea2430d1fb553c8a36118036ba6818f66b20cff7858159a68f26ba +algod(4769) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Primary/node.log +algod(4769) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4769) : Initializing the Algorand node... +algod(4769) : Success! +algod(4769) : ⇨ http server started on 127.0.0.1:40167 +algod(4769) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:40167. Press Ctrl-C to exit +algod(4776) : No REST API Token found. Generated token: f2073e4dc776f7d347590988f8e49a70d7f6b46f6b5a56984e55e1c6e266dd3f +algod(4776) : No Admin REST API Token found. Generated token: 99a4d7d7138b18d603444543c99f786709239a5621114daf28ec0a7f47bab856 +algod(4776) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestManyAccountsCanGoOnline/Node/node.log +algod(4776) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4776) : Initializing the Algorand node... +algod(4776) : Success! +algod(4776) : ⇨ http server started on 127.0.0.1:36175 +algod(4776) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:36175. Press Ctrl-C to exit +algod(4769) : Exiting on terminated +algod(4776) : Exiting on terminated +--- PASS: TestManyAccountsCanGoOnline (21.76s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/stress/transactions 21.821s +=== RUN TestApplicationsUpgradeOverREST +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Wallet2.0.3000000.partkey +test-unupgraded-protocol 100000 +algod(4864) : No REST API Token found. Generated token: 6261b8e000f0b0071cb84915d8c055a327148502db2ebf064797b5d3db704c87 +algod(4864) : No Admin REST API Token found. Generated token: 0d60fa1560cb641e7586ac81dce034942c8361901b4ab7f287866ec21c3fe612 +algod(4864) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Primary/node.log +algod(4864) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4864) : Initializing the Algorand node... +algod(4864) : Success! +algod(4864) : ⇨ http server started on 127.0.0.1:39413 +algod(4864) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39413. Press Ctrl-C to exit +algod(4872) : No REST API Token found. Generated token: e2f05bcbff87d14ce8accf989be6326f4b80f9c22cc610b2e96c166b44b18530 +algod(4872) : No Admin REST API Token found. Generated token: f1119a50b7179a73a2dd8597d9effe86ee1fbd48dba87279921597b732d95626 +algod(4872) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverREST/Node/node.log +algod(4872) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4872) : Initializing the Algorand node... +algod(4872) : Success! +algod(4872) : ⇨ http server started on 127.0.0.1:38505 +algod(4872) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:38505. Press Ctrl-C to exit +algod(4864) : Exiting on terminated +algod(4872) : Exiting on terminated +--- PASS: TestApplicationsUpgradeOverREST (69.75s) +=== RUN TestApplicationsUpgradeOverGossip +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Wallet2.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Wallet2.0.3000000.partkey +test-unupgraded-protocol 100000 +algod(4973) : No REST API Token found. Generated token: fa0d13dd1788cb933faa4caeb2da99d0d0508797949771a15fe0ed22b13741d7 +algod(4973) : No Admin REST API Token found. Generated token: 06dab83008eb02d2b3294a34c4e9ac5692c8c175f017f6f4e01295a51083bccf +algod(4973) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Primary/node.log +algod(4973) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4973) : Initializing the Algorand node... +algod(4973) : Success! +algod(4973) : ⇨ http server started on 127.0.0.1:8080 +algod(4973) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(4980) : No REST API Token found. Generated token: a3dbdae529fa78b662bafc0936cabdedfe13f026df26abd5f6ef5f5cf588d5e8 +algod(4980) : No Admin REST API Token found. Generated token: f8e4ea07a936339a5302f06ab62d56c69f95119a56ec6dca436cc633e3a0bf4b +algod(4980) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestApplicationsUpgradeOverGossip/Node/node.log +algod(4980) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(4980) : Initializing the Algorand node... +algod(4980) : Success! +algod(4980) : ⇨ http server started on 127.0.0.1:46847 +algod(4980) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46847. Press Ctrl-C to exit +algod(4973) : Exiting on terminated +algod(4980) : Exiting on terminated +--- PASS: TestApplicationsUpgradeOverGossip (72.80s) +=== RUN TestRekeyUpgrade +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Wallet2.0.3000000.partkey +test-unupgraded-protocol 100000 +algod(5011) : No REST API Token found. Generated token: de8e10c1f94daa7f8e8cd7693717d0f765cf78ed5dff60e7ec7766ecbbdabb16 +algod(5011) : No Admin REST API Token found. Generated token: 11b074b9d81ca6210f3111e3be922949114f81955aaf94ec04aa58ed7f8cec25 +algod(5011) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Primary/node.log +algod(5011) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5011) : Initializing the Algorand node... +algod(5011) : Success! +algod(5011) : ⇨ http server started on 127.0.0.1:8080 +algod(5011) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(5018) : No REST API Token found. Generated token: 24e16935b6aa9de4f99f5993c7898ed46ecc48c3c3e591ef9e0adbdd0f0a4b36 +algod(5018) : No Admin REST API Token found. Generated token: aa0092539af074a6f53b2f51e49e1223ddc486f57f05525c25cf3ff323f10338 +algod(5018) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestRekeyUpgrade/Node/node.log +algod(5018) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5018) : Initializing the Algorand node... +algod(5018) : Success! +algod(5018) : ⇨ http server started on 127.0.0.1:46697 +algod(5018) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:46697. Press Ctrl-C to exit +algod(5011) : Exiting on terminated +algod(5018) : Exiting on terminated +--- PASS: TestRekeyUpgrade (53.44s) +=== RUN TestAccountsCanSendMoneyAcrossUpgradeV15toV16 +=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV15toV16 +=== RUN TestAccountsCanSendMoneyAcrossUpgradeV21toV22 +=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV21toV22 +=== RUN TestAccountsCanSendMoneyAcrossUpgradeV22toV23 +=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV22toV23 +=== RUN TestAccountsCanSendMoneyAcrossUpgradeV23toV24 +=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV23toV24 +=== RUN TestAccountsCanSendMoneyAcrossUpgradeV24toV25 +=== PAUSE TestAccountsCanSendMoneyAcrossUpgradeV24toV25 +=== CONT TestAccountsCanSendMoneyAcrossUpgradeV15toV16 +=== CONT TestAccountsCanSendMoneyAcrossUpgradeV22toV23 +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet1.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Wallet1.0.3000000.partkey +test-fast-upgrade-https://github.com/algorand/spec/tree/a26ed78ed8f834e2b9ccb6eb7d3ee9f629a6e622 100000 +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Wallet1.0.3000000.partkey +test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/57016b942f6d97e6d4c0688b373bb0a2fc85a1a2 100000 +algod(5073) : No REST API Token found. Generated token: dbd55a4c1f6b1b47a374d34ae807450e71609219eb752a148c75b9b2d559e69d +algod(5073) : No Admin REST API Token found. Generated token: e7437d51b75133f1f9e1704e9bd5df61c5d0ad442b33adf086308c6248cb5610 +algod(5073) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Primary/node.log +algod(5079) : No REST API Token found. Generated token: 9972f22898ef28ba5154338bce28992cfe9114122591220138cfc4e2d731e5c5 +algod(5079) : No Admin REST API Token found. Generated token: df461d6e095fec2dbbbace10bcb4c6ceefd9d923f2a26b16abc1480939a139a1 +algod(5079) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Primary/node.log +algod(5073) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5073) : Initializing the Algorand node... +algod(5079) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5079) : Initializing the Algorand node... +algod(5079) : Success! +algod(5073) : Success! +algod(5073) : ⇨ http server started on 127.0.0.1:8080 +algod(5073) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(5079) : ⇨ http server started on 127.0.0.1:41335 +algod(5079) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:41335. Press Ctrl-C to exit +algod(5087) : No REST API Token found. Generated token: 410317c6b612b3934f9319c2d10e01663b28ce707f5800b41b59dd0f707f66d6 +algod(5087) : No Admin REST API Token found. Generated token: 584a7a6eec2558f9f004c865d3ca315cd79caa39af1712cb0d771c8fe1120103 +algod(5087) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV22toV23/Node/node.log +algod(5092) : No REST API Token found. Generated token: eae2fee1b135b3ac21cbcb3d706aa0aae337e1f6fbecc8ad41513ce96a958e78 +algod(5092) : No Admin REST API Token found. Generated token: 1291bcfa197105c5b063eb0238d7d0091d1de4aac8fe967167bafc4c6dfc60d3 +algod(5092) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV15toV16/Node/node.log +algod(5092) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5092) : Initializing the Algorand node... +algod(5087) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5087) : Initializing the Algorand node... +algod(5092) : Success! +algod(5092) : ⇨ http server started on 127.0.0.1:42041 +algod(5092) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42041. Press Ctrl-C to exit +algod(5087) : Success! +algod(5087) : ⇨ http server started on 127.0.0.1:44361 +algod(5087) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:44361. Press Ctrl-C to exit +algod(5079) : Exiting on terminated +algod(5092) : Exiting on terminated +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV15toV16 (63.93s) +=== CONT TestAccountsCanSendMoneyAcrossUpgradeV21toV22 +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Wallet1.0.3000000.partkey +test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/8096e2df2da75c3339986317f9abe69d4fa86b4b 100000 +algod(5160) : No REST API Token found. Generated token: cce645e6dde952b23ed9d4a9cea0ea30df4dc63be8f11c1ce62e2a5eb58a9c19 +algod(5160) : No Admin REST API Token found. Generated token: be5557090ce2d219d8213874f99d2a819e5f5b0b05f52aa4b6d08ded58db5968 +algod(5160) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Primary/node.log +algod(5160) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5160) : Initializing the Algorand node... +algod(5160) : Success! +algod(5160) : ⇨ http server started on 127.0.0.1:43497 +algod(5160) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43497. Press Ctrl-C to exit +algod(5168) : No REST API Token found. Generated token: 01a3a9cf7e402d3d8aa269f10d50dfc9bf489ab40ddfd9ae310d54e66df22aad +algod(5168) : No Admin REST API Token found. Generated token: 0a9403999ac6c305759f7a5cefaaac7bb638864b290f31d70faab927dd5723d3 +algod(5168) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV21toV22/Node/node.log +algod(5168) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5168) : Initializing the Algorand node... +algod(5168) : Success! +algod(5168) : ⇨ http server started on 127.0.0.1:43463 +algod(5168) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:43463. Press Ctrl-C to exit +algod(5073) : Exiting on terminated +algod(5087) : Exiting on terminated +=== CONT TestAccountsCanSendMoneyAcrossUpgradeV24toV25 +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV22toV23 (80.36s) +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet2.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Wallet1.0.3000000.partkey +test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/3a83c4c743f8b17adfd73944b4319c25722a6782 100000 +algod(5229) : No REST API Token found. Generated token: 49bf9511e15ec96766017c74abcde66e975ad057ddcf114d8ca92ad871bccf04 +algod(5229) : No Admin REST API Token found. Generated token: c4cd35cf2024fdc64c2111be09b1e840730cfdcea2564e3f375af5106848c7f9 +algod(5229) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Primary/node.log +algod(5229) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5229) : Initializing the Algorand node... +algod(5229) : Success! +algod(5229) : ⇨ http server started on 127.0.0.1:8080 +algod(5229) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:8080. Press Ctrl-C to exit +algod(5236) : No REST API Token found. Generated token: 4f02a6b1c63caafcfd982b1a71ba17e9b66ab6fa78a4afc8115740467769a90d +algod(5236) : No Admin REST API Token found. Generated token: 5b52529e886690ea5999af26697d0d6c38ed53e97c315c56da5e4c4cd3165465 +algod(5236) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV24toV25/Node/node.log +algod(5236) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5236) : Initializing the Algorand node... +algod(5236) : Success! +algod(5236) : ⇨ http server started on 127.0.0.1:35143 +algod(5236) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:35143. Press Ctrl-C to exit +algod(5160) : Exiting on terminated +algod(5168) : Exiting on terminated +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV21toV22 (62.73s) +=== CONT TestAccountsCanSendMoneyAcrossUpgradeV23toV24 +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet2.rootkey +Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet1.rootkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet1.0.3000000.partkey +Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Wallet2.0.3000000.partkey +test-fast-upgrade-https://github.com/algorandfoundation/specs/tree/e5f565421d720c6f75cdd186f7098495caf9101f 100000 +algod(5272) : No REST API Token found. Generated token: 8638c98dbae7c4121d822b1031ac8545eb1751fd2846222d1dffc0253895de29 +algod(5272) : No Admin REST API Token found. Generated token: cd4d700c443ab16a438276edbc1bfdb8b9cbf5302ac9a7c8000a0749cb4123f3 +algod(5272) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Primary/node.log +algod(5272) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5272) : Initializing the Algorand node... +algod(5272) : Success! +algod(5272) : ⇨ http server started on 127.0.0.1:39637 +algod(5272) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:39637. Press Ctrl-C to exit +algod(5279) : No REST API Token found. Generated token: 53a88690387e7aaeea3de1dc1ed525337114f994a34ba14f02cdac1b64596fb6 +algod(5279) : No Admin REST API Token found. Generated token: 8cd9a4bdb35fbeedbb59d904fb486dd4374efe4d5eb5cebadea3b369e369c44d +algod(5279) : Logging to: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/TestAccountsCanSendMoneyAcrossUpgradeV23toV24/Node/node.log +algod(5279) : Deadlock detection is set to: enabled (Default state is 'enable') +algod(5279) : Initializing the Algorand node... +algod(5279) : Success! +algod(5279) : ⇨ http server started on 127.0.0.1:42017 +algod(5279) : Node running and accepting RPC requests over HTTP on port 127.0.0.1:42017. Press Ctrl-C to exit +algod(5236) : Exiting on terminated +algod(5229) : Exiting on terminated +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV24toV25 (80.65s) +algod(5272) : Exiting on terminated +algod(5279) : Exiting on terminated +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV23toV24 (63.01s) +PASS +ok github.com/algorand/go-algorand/test/e2e-go/upgrades 385.737s +FAIL + diff --git a/debug/logfilter/example6.out.expected b/debug/logfilter/example6.out.expected new file mode 100644 index 0000000000..b3d517124a --- /dev/null +++ b/debug/logfilter/example6.out.expected @@ -0,0 +1,385 @@ +--- PASS: TestAlgodLogsToFile (1.25s) +--- PASS: TestNodeControllerCleanup (5.44s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/algod 5.507s +--- PASS: TestAlgodWithExpect (0.07s) + --- PASS: TestAlgodWithExpect/algodTelemetryLocationTest.exp (0.07s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/algod/expect 0.150s +--- PASS: TestAlgohWithExpect (213.98s) + --- PASS: TestAlgohWithExpect/algohTimeoutTest.exp (213.98s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/algoh/expect 214.091s +--- PASS: TestAccountNew (1.75s) +--- PASS: TestAccountNewDuplicateFails (0.30s) +--- PASS: TestAccountRename (0.85s) +--- PASS: TestAccountMultipleImportRootKey (0.49s) +--- PASS: TestClerkSendNoteEncoding (18.65s) +--- PASS: TestGoalNodeCleanup (0.44s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/goal 26.354s + +--- FAIL: TestGoalWithExpect (1538.34s) + --- PASS: TestGoalWithExpect/basicGoalTest.exp (37.79s) + --- PASS: TestGoalWithExpect/goalAppAccountAddressTest.exp (75.84s) + --- PASS: TestGoalWithExpect/goalNodeConnectionTest.exp (4.91s) + --- PASS: TestGoalWithExpect/goalNodeSystemdTest.exp (0.19s) + --- PASS: TestGoalWithExpect/multisigCreationDeletionTest.exp (8.93s) + --- PASS: TestGoalWithExpect/reportTest.exp (6.20s) + --- PASS: TestGoalWithExpect/statefulTealAppInfoTest.exp (21.86s) + --- PASS: TestGoalWithExpect/basicExpectTest.exp (0.01s) + --- PASS: TestGoalWithExpect/listExpiredParticipationKeyTest.exp (67.63s) + --- PASS: TestGoalWithExpect/goalNodeTest.exp (15.93s) + --- PASS: TestGoalWithExpect/doubleSpendingTest.exp (59.13s) + --- PASS: TestGoalWithExpect/goalAccountTest.exp (24.57s) + --- PASS: TestGoalWithExpect/goalCmdFlagsTest.exp (0.98s) + --- PASS: TestGoalWithExpect/goalDryrunRestTest.exp (37.95s) + --- PASS: TestGoalWithExpect/goalFormattingTest.exp (0.16s) + --- PASS: TestGoalWithExpect/limitOrderTest.exp (107.89s) + --- PASS: TestGoalWithExpect/tealConsensusTest.exp (12.31s) + --- PASS: TestGoalWithExpect/goalAssetTest.exp (41.96s) + --- PASS: TestGoalWithExpect/ledgerTest.exp (9.53s) + + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + expectFixture.go:157: err running 'tealAndStatefulTealTest.exp': exit status 1 + stdout: TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod + TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + network create test_net_expect_1617230151 + spawn goal network create --network test_net_expect_1617230151 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.rootkey + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.rootkey + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet1.0.3000000.partkey + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Wallet2.0.3000000.partkey + future 100000 + Network test_net_expect_1617230151 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + network start test_net_expect_1617230151 + spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + network status test_net_expect_1617230151 + spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + [Primary] + Last committed block: 0 + Time since last block: 0.0s + Sync Time: 0.0s + Last consensus protocol: future + Next consensus protocol: future + Round for next consensus protocol: 1 + Next consensus protocol supported: true + + [Node] + Last committed block: 0 + Time since last block: 0.0s + Sync Time: 0.6s + Last consensus protocol: future + Next consensus protocol: future + Round for next consensus protocol: 1 + Next consensus protocol supported: true + + StartNetwork complete + Primary node address is: 127.0.0.1:43613 + Primary Node Address: 127.0.0.1:43613 + spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + [online] W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE 5000000000000000 microAlgos + Account Address: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE Balance: 5000000000000000 + spawn goal account balance -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + 5000000000000000 microAlgos + Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Balance: 5000000000000000 + Primary Account Balance: 5000000000000000 + spawn goal account rewards -w unencrypted-default-wallet -a W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + 0 microAlgos + Wallet: unencrypted-default-wallet, Account: W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, Rewards: 0 + Primary Account Rewards: 0 + spawn goal wallet new Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Please choose a password for wallet 'Wallet_1_1617230151': + Please confirm the password: + Creating wallet... + Created wallet 'Wallet_1_1617230151' + Your new wallet has a backup phrase that can be used for recovery. + Keeping this backup phrase safe is extremely important. + Would you like to see it now? (Y/n): y + Your backup phrase is printed below. + Keep this information safe -- never share it with anyone! + + One or more non-printable characters were ommited from the subsequent line: + [32mattract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret[0mWALLET_1_PASSPHRASE: attract shy usage prison umbrella december sail finish struggle spring walk wisdom bread globe eyebrow admit typical tag december poet labor cable radar absent secret + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + ################################################## + Wallet: Wallet_1_1617230151 + ID: 12dd4a15929ae17827788883ca77479d + ################################################## + Wallet: unencrypted-default-wallet + ID: ec9a33b376e4635705e1339deb6e799b + spawn goal account new -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Please enter the password for wallet 'Wallet_1_1617230151': + Created new account with address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ + Account Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ + spawn goal account list -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + [offline] Unnamed-0 GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ 0 microAlgosAccount Address: GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ + spawn goal wallet new Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Please choose a password for wallet 'Wallet_2_1617230151': + Please confirm the password: + Creating wallet... + Created wallet 'Wallet_2_1617230151' + Your new wallet has a backup phrase that can be used for recovery. + Keeping this backup phrase safe is extremely important. + Would you like to see it now? (Y/n): y + Your backup phrase is printed below. + Keep this information safe -- never share it with anyone! + + One or more non-printable characters were ommited from the subsequent line: + [32mcasual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump[0m + WALLET_2_PASSPHRASE: casual double chuckle method salmon talent cabbage maze parade luggage under elite pass best session sign december cliff master proud since crunch joy above jump + spawn goal wallet list -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + ################################################## + Wallet: Wallet_1_1617230151 + ID: 12dd4a15929ae17827788883ca77479d + ################################################## + Wallet: Wallet_2_1617230151 + ID: 2edbca9e4d78d43556f46cc991415da5 + spawn goal account new -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Please enter the password for wallet 'Wallet_2_1617230151': 12345678 + + Created new account with address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UAAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA + spawn goal account list -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + [offline] Unnamed-1 F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA 0 microAlgosAccount Address: F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ, transaction ID: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA. Fee set to 1000 + Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA still pending as of round 8 + Transaction ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA committed in round 10 + TRANSACTION_ID 1: ZUOOECVQQBAMVGTSWHYJG5RGHEUEQJZHL2BUJTSL5YKYBYX5HJJA + spawn goal account balance -a GVKIH6LJCB5SSA5EYVU3K2KLQJLHX6HDXUOMPUO6RXTI6D7JZBKU5QODBQ -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + 1000000000 microAlgos + Account Balance: 1000000000 + Account balance OK: 1000000000 + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA, transaction ID: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ. Fee set to 1000 + Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ still pending as of round 11 + Transaction NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ committed in round 13 + TRANSACTION_ID 2: NAFNAJF5Q2OTTRVGFRV63GTIHPPDECXND3I6HES3FKJNALLMEDPQ + spawn goal account balance -a F4FUBJ5HEKVFHZAJCESFD7GU2RSQEFJP7AYSKQLYLZINRMBHENBVCPV2UA -w Wallet_2_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + 1000000000 microAlgos + Account Balance: 1000000000 + Account balance OK: 1000000000 + setting up working dir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work + + writing teal script to file '/home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal' + reading from file /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal + #pragma version 2 + int 1 + + spawn goal clerk compile /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal + /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + spawn goal clerk send --fee 1000 --wallet unencrypted-default-wallet --amount 1000000000 --from W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --to YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Sent 1000000000 MicroAlgos from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE to address YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA, transaction ID: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA. Fee set to 1000 + Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA still pending as of round 14 + Transaction IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA committed in round 16 + TRANSACTION_ID_APP: IAS7I66XI6GH7S5RJVJ5ENTMSES62SQKODCF56N6QV7LYV3CCHTA, APP_ACCOUNT_ADDRESS: YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA + spawn goal account balance -a YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA -w Wallet_1_1617230151 -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + 1000000000 microAlgos + Account Balance: 1000000000 + Account balance OK: 1000000000 + calling app create + calling goal app create + spawn goal app create --creator W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE --approval-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal --global-byteslices 1 --global-ints 0 --local-byteslices 0 --local-ints 0 --clear-prog /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/work/simple.teal -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ + Attempting to create app (approval size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A; clear size 5, hash YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646A) + Issued transaction from account W7FNWH6L7DEUMNXU2AIQOYINPYUSU65FE6A6D2A4VI7BQEDTWZJ2FPKJQE, txid JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA (fee 1000) + Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA still pending as of round 19 + Transaction JHHBKLA2OMDYMOPX4MTKQJ2G5HRTORQASBRS5MGPABS4RYE5R7KA committed in round 21 + Created app with app index 4App ID 4 + spawn goal clerk sign -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root/Primary/ -w Wallet_1_1617230151 -i unsginedtransaction1.tx -o sginedtransaction1.tx + Aborting with Error: Timed out signing transaction + GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + GLOBAL_NETWORK_NAME test_net_expect_1617230151 + Stopping network: test_net_expect_1617230151 + spawn goal network stop -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/tealAndStatefulTealTest/algod/root + + + stderr: + --- FAIL: TestGoalWithExpect/tealAndStatefulTealTest.exp (115.77s) + --- PASS: TestGoalWithExpect/testInfraTest.exp (3.41s) + --- PASS: TestGoalWithExpect/createWalletTest.exp (243.76s) + --- PASS: TestGoalWithExpect/goalTxValidityTest.exp (6.89s) + + expectFixture.go:120: algoDir: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod + testDataDir:/home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + expectFixture.go:157: err running 'pingpongTest.exp': exit status 1 + stdout: starting pinpongTest + TEST_ALGO_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod + TEST_DATA_DIR: /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata + network create test_net_expect_1617230521 + spawn goal network create --network test_net_expect_1617230521 --template /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/TwoNodes50EachFuture.json --datadir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod --rootdir /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.rootkey + Created new rootkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.rootkey + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet1.0.3000000.partkey + Created new partkey: /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Wallet2.0.3000000.partkey + future 100000 + Network test_net_expect_1617230521 created under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + network start test_net_expect_1617230521 + spawn goal network start -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + Network Started under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + network status test_net_expect_1617230521 + spawn goal network status -r /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + [Primary] + Last committed block: 0 + Time since last block: 0.0s + Sync Time: 0.0s + Last consensus protocol: future + Next consensus protocol: future + Round for next consensus protocol: 1 + Next consensus protocol supported: true + + [Node] + Last committed block: 0 + Time since last block: 0.0s + Sync Time: 0.7s + Last consensus protocol: future + Next consensus protocol: future + Round for next consensus protocol: 1 + Next consensus protocol supported: true + + StartNetwork complete + Primary node address is: 127.0.0.1:37299 + Primary Node Address: 127.0.0.1:37299 + spawn goal account list -w unencrypted-default-wallet -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/ + [online] 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U 5000000000000000 microAlgos + Account Address: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U Balance: 5000000000000000 + spawn goal account balance -w unencrypted-default-wallet -a 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U -d /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521/Primary/ + 5000000000000000 microAlgos + Wallet: unencrypted-default-wallet, Account: 3LRXUJIBC6H3JMAGTHMR5Z2DMXAPNS2B3WBAPCZWUVSQB76R243UEM672U, Balance: 5000000000000000 + Primary Account Balance: 5000000000000000 + node status waiting for Round 1 + spawn node status + node status check complete, current round is 0 + Current Round: '0' is less than wait for round: '1' + sleep time 0 + spawn node status + node status check complete, current round is 0 + Current Round: '0' is less than wait for round: '1' + sleep time 1 + spawn node status + Aborting with Error: goal node status timed out + GLOBAL_TEST_ROOT_DIR /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + GLOBAL_NETWORK_NAME test_net_expect_1617230521 + Stopping network: test_net_expect_1617230521 + Network Stopped under /home/travis/gopath/src/github.com/algorand/go-algorand/tmp/out/e2e/104122-1617229030279/pingpongTest/algod/root_1617230521 + + stderr: + --- FAIL: TestGoalWithExpect/pingpongTest.exp (26.30s) + --- PASS: TestGoalWithExpect/catchpointCatchupTest.exp (207.24s) + --- PASS: TestGoalWithExpect/goalClerkGroupTest.exp (21.29s) + --- PASS: TestGoalWithExpect/goalNodeStatusTest.exp (18.58s) + --- PASS: TestGoalWithExpect/statefulTealAppReadTest.exp (47.93s) + --- PASS: TestGoalWithExpect/corsTest.exp (9.63s) + --- PASS: TestGoalWithExpect/goalAccountInfoTest.exp (102.64s) + --- PASS: TestGoalWithExpect/statefulTealCreateAppTest.exp (201.07s) +FAIL github.com/algorand/go-algorand/test/e2e-go/cli/goal/expect 1538.381s +ok github.com/algorand/go-algorand/test/e2e-go/cli/perf 0.131s [no tests to run] +--- PASS: TestTealdbgWithExpect (3.29s) + --- PASS: TestTealdbgWithExpect/tealdbgSpinoffTest.exp (1.17s) + --- PASS: TestTealdbgWithExpect/tealdbgTest.exp (2.12s) +ok github.com/algorand/go-algorand/test/e2e-go/cli/tealdbg/expect 3.352s +--- PASS: TestDeadbeatBid (142.06s) +--- PASS: TestStartAndEndAuctionTenUsersTenBidsEach (168.01s) +--- PASS: TestDecayingPrice (108.82s) +--- PASS: TestStartAndCancelAuctionEarlyOneUserTenBids (91.91s) +--- PASS: TestStartAndCancelAuctionOneUserTenBids (135.28s) +--- PASS: TestStartAndEndAuctionTenUsersOneBidEach (155.63s) +ok github.com/algorand/go-algorand/test/e2e-go/features/auction 406.589s +--- PASS: TestCompactCerts (224.23s) +ok github.com/algorand/go-algorand/test/e2e-go/features/compactcert 224.312s +--- PASS: TestZeroSigners (15.21s) +--- PASS: TestDuplicateKeys (31.65s) +--- PASS: TestZeroThreshold (14.61s) +--- PASS: TestBasicMultisig (52.16s) +ok github.com/algorand/go-algorand/test/e2e-go/features/multisig 67.439s +--- PASS: TestParticipationKeyOnlyAccountParticipatesCorrectly (32.97s) +--- PASS: TestRewardUnitThreshold (79.11s) +--- PASS: TestOnlineOfflineRewards (97.68s) +--- PASS: TestRewardRateRecalculation (105.57s) +ok github.com/algorand/go-algorand/test/e2e-go/features/participation 184.782s +--- PASS: TestLeaseTransactionsSameSender (21.84s) +--- PASS: TestAssetInformation (29.59s) +--- PASS: TestTxnMerkleProof (9.01s) +--- PASS: TestAccountsCanChangeOnlineStateInTheFuture (23.47s) +--- PASS: TestAccountsCanSendMoney (31.42s) +--- PASS: TestAccountsCanChangeOnlineState (13.95s) +--- PASS: TestLeaseTransactionsDifferentSender (18.40s) +--- PASS: TestLeaseTransactionsSameSenderDifferentLease (9.77s) +--- PASS: TestLeaseRegressionFaultyFirstValidCheckNew_2f3880f7 (14.01s) +--- PASS: TestAssetCreateWaitRestartDelete (35.88s) +--- PASS: TestOverlappingLeases (92.91s) +--- PASS: TestGroupTransactionsSubmission (7.79s) +--- PASS: TestLeaseRegressionFaultyFirstValidCheckOld_2f3880f7 (17.76s) +--- PASS: TestGroupTransactionsDifferentSizes (34.62s) +--- PASS: TestAccountsCanClose (34.81s) +--- PASS: TestGroupTransactions (30.62s) +--- PASS: TestAssetSend (34.51s) +--- PASS: TestAssetValidRounds (10.39s) +--- PASS: TestAssetGroupCreateSendDestroy (31.74s) +--- PASS: TestAccountInformationV2 (28.95s) +ok github.com/algorand/go-algorand/test/e2e-go/features/transactions 270.274s +--- PASS: TestGoodAuthSucceeds (0.21s) +--- PASS: TestMasterKeyGeneratePastImportedKeys (0.30s) +--- PASS: TestMultisigImportList (0.26s) +--- PASS: TestSignProgram (0.25s) +--- PASS: TestMasterKeyImportExport (0.32s) +--- PASS: TestDeleteKey (0.25s) +--- PASS: TestSignTransaction (0.26s) +--- PASS: TestImportKey (0.26s) +--- PASS: TestAbsSQLiteWalletConfigSucceeds (0.00s) +--- PASS: TestExportKey (0.26s) +--- PASS: TestNonAbsSQLiteWalletConfigFails (0.00s) +--- PASS: TestGenerateAndListKeys (0.27s) +--- PASS: TestBadAuthFails (0.21s) +--- PASS: TestServerStartsStopsSuccessfully (0.21s) +--- PASS: TestWalletCreation (0.23s) +--- PASS: TestWalletSessionExpiry (2.23s) +--- PASS: TestWalletSessionRenew (2.23s) +--- PASS: TestWalletRename (0.24s) +--- PASS: TestBlankWalletCreation (0.23s) +--- PASS: TestMultisigSignWithSigner (0.25s) +--- PASS: TestWalletSessionRelease (0.23s) +--- PASS: TestMultisigSignWithWrongSigner (0.26s) +--- PASS: TestMultisigSignProgram (0.26s) +--- PASS: TestMultisigSign (0.25s) +--- PASS: TestMultisigExportDelete (0.25s) +ok github.com/algorand/go-algorand/test/e2e-go/kmd 5.048s +ok github.com/algorand/go-algorand/test/e2e-go/perf 0.069s [no tests to run] +--- PASS: TestClientCanGetStatus (0.01s) +--- PASS: TestClientCanGetStatusAfterBlock (13.80s) +--- PASS: TestTransactionsByAddr (17.97s) +--- PASS: TestClientCanGetVersion (0.00s) +--- PASS: TestClientCanGetSuggestedFee (0.02s) +--- PASS: TestClientCanGetMinTxnFee (0.01s) +--- PASS: TestClientCanGetBlockInfo (0.03s) +--- PASS: TestClientRejectsBadFromAddressWhenSending (0.01s) +--- PASS: TestClientRejectsBadToAddressWhenSending (0.02s) +--- PASS: TestClientRejectsMutatedFromAddressWhenSending (0.02s) +--- PASS: TestClientRejectsMutatedToAddressWhenSending (0.01s) +--- PASS: TestClientRejectsSendingMoneyFromAccountForWhichItHasNoKey (0.01s) +--- PASS: TestClientOversizedNote (0.02s) +--- PASS: TestClientCanSendAndGetNote (7.32s) +--- PASS: TestClientCanGetTransactionStatus (8.23s) +--- PASS: TestAccountBalance (8.33s) +--- PASS: TestAccountParticipationInfo (8.33s) +--- PASS: TestSupply (0.00s) +--- PASS: TestClientCanGetGoRoutines (0.01s) +--- PASS: TestSendingTooMuchFails (0.07s) +--- PASS: TestSendingFromEmptyAccountFails (0.04s) +--- PASS: TestSendingTooLittleToEmptyAccountFails (0.06s) +--- PASS: TestSendingLowFeeFails (0.07s) +--- PASS: TestSendingNotClosingAccountFails (3.89s) +--- PASS: TestClientCanGetPendingTransactions (8.20s) +--- PASS: TestClientTruncatesPendingTransactions (9.60s) +--- PASS: TestManyAccountsCanGoOnline (21.76s) +ok github.com/algorand/go-algorand/test/e2e-go/stress/transactions 21.821s +--- PASS: TestApplicationsUpgradeOverREST (69.75s) +--- PASS: TestApplicationsUpgradeOverGossip (72.80s) +--- PASS: TestRekeyUpgrade (53.44s) +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV15toV16 (63.93s) +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV22toV23 (80.36s) +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV21toV22 (62.73s) +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV24toV25 (80.65s) +--- PASS: TestAccountsCanSendMoneyAcrossUpgradeV23toV24 (63.01s) +ok github.com/algorand/go-algorand/test/e2e-go/upgrades 385.737s diff --git a/debug/logfilter/main.go b/debug/logfilter/main.go new file mode 100644 index 0000000000..d8efd92352 --- /dev/null +++ b/debug/logfilter/main.go @@ -0,0 +1,137 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// logfilter buffer go test output and make sure to limit the output to only the error-related stuff. +package main + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +type test struct { + name string + outputBuffer string +} + +func logFilter(inFile io.Reader, outFile io.Writer) int { + scanner := bufio.NewScanner(inFile) + + tests := make(map[string]test) + currentTestName := "" + // packageOutputBuffer is used to buffer messages that are package-oriented. i.e. TestMain() generated messages, + // which are called before any test starts to run. + packageOutputBuffer := "" + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 { + continue + } + if strings.HasPrefix(line, "=== RUN") { + var testName string + fmt.Sscanf(line, "=== RUN %s", &testName) + currentTestName = testName + if _, have := tests[currentTestName]; !have { + tests[currentTestName] = test{name: currentTestName} + } + continue + } + if strings.HasPrefix(line, "=== CONT") { + var testName string + fmt.Sscanf(line, "=== CONT %s", &testName) + currentTestName = testName + if _, have := tests[currentTestName]; !have { + panic(fmt.Errorf("test %s is missing", currentTestName)) + } + continue + } + if strings.HasPrefix(line, "=== PAUSE") { + var testName string + fmt.Sscanf(line, "=== PAUSE %s", &testName) + currentTestName = "" + if _, have := tests[testName]; !have { + panic(fmt.Errorf("test %s is missing", testName)) + } + continue + } + if idx := strings.Index(line, "--- PASS:"); idx >= 0 { + var testName string + fmt.Sscanf(line[idx:], "--- PASS: %s", &testName) + if _, have := tests[testName]; !have { + panic(fmt.Errorf("test '%s' is missing, when parsing '%s'", testName, line)) + } + fmt.Fprintf(outFile, line+"\r\n") + delete(tests, testName) + continue + } + if idx := strings.Index(line, "--- FAIL:"); idx >= 0 { + var testName string + fmt.Sscanf(line[idx:], "--- FAIL: %s", &testName) + test, have := tests[testName] + if !have { + panic(fmt.Errorf("test %s is missing", testName)) + } + fmt.Fprintf(outFile, test.outputBuffer+"\r\n") + fmt.Fprintf(outFile, line+"\r\n") + test.outputBuffer = "" + tests[testName] = test + continue + } + // otherwise, add the line to the current test ( if there is such ) + currentTest, have := tests[currentTestName] + if have { + currentTest.outputBuffer += "\r\n" + line + tests[currentTestName] = currentTest + continue + } + // no current test is only legit if we're PASS, FAIL or package test line summary. + if line == "PASS" || line == "FAIL" { + continue + } + if strings.HasPrefix(line, "ok ") { + fmt.Fprintf(outFile, line+"\r\n") + packageOutputBuffer = "" + continue + } + if strings.HasPrefix(line, "FAIL ") { + if len(packageOutputBuffer) > 0 { + fmt.Fprintf(outFile, line+"...\r\n%s\r\n", packageOutputBuffer) + } + packageOutputBuffer = "" + fmt.Fprintf(outFile, line+"\r\n") + continue + } + // this is package-oriented output + packageOutputBuffer += line + "\r\n" + } + scannerErr := scanner.Err() + if scannerErr != nil { + if currentTestName != "" && tests[currentTestName].outputBuffer != "" { + fmt.Fprint(outFile, tests[currentTestName].outputBuffer) + } + fmt.Fprintf(outFile, "logfilter: the following error received on the input stream : %v\r\n", scannerErr) + return 0 + } + return 0 +} + +func main() { + retCode := logFilter(os.Stdin, os.Stdout) + os.Exit(retCode) +} diff --git a/debug/logfilter/main_test.go b/debug/logfilter/main_test.go new file mode 100644 index 0000000000..be173271cb --- /dev/null +++ b/debug/logfilter/main_test.go @@ -0,0 +1,59 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +// logfilter buffer go test output and make sure to limit the output to only the error-related stuff. +package main + +import ( + "bytes" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLogFilterExamples(t *testing.T) { + // iterate on all the example files in the local directroy. + exampleFiles := []string{} + filepath.Walk(".", func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + return nil + } + if strings.Contains(info.Name(), "example") && strings.HasSuffix(info.Name(), ".in") { + exampleFiles = append(exampleFiles, path) + } + return nil + }) + for _, exampleFileName := range exampleFiles { + // load the expected result file. + expectedOutFile := strings.Replace(exampleFileName, ".in", ".out.expected", 1) + expectedOutBytes, err := ioutil.ReadFile(expectedOutFile) + require.NoError(t, err) + + inFile, err := os.Open(exampleFileName) + require.NoError(t, err) + writingBuffer := bytes.NewBuffer(nil) + errCode := logFilter(inFile, writingBuffer) + require.Zero(t, errCode) + require.Equal(t, string(expectedOutBytes), writingBuffer.String()) + } +} diff --git a/test/scripts/e2e_go_tests.sh b/test/scripts/e2e_go_tests.sh index 076f206018..0802b1f4ee 100755 --- a/test/scripts/e2e_go_tests.sh +++ b/test/scripts/e2e_go_tests.sh @@ -3,6 +3,7 @@ echo "######################################################################" echo " e2e_go_tests" echo "######################################################################" set -e +set -o pipefail export GOPATH=$(go env GOPATH) export GO111MODULE=on @@ -95,14 +96,14 @@ if [ "${#TESTPATTERNS[@]}" -eq 0 ]; then for TEST_DIR in ${TESTS_DIRECTORIES[@]}; do TESTS=$(go test -list ".*" ${TEST_DIR} -vet=off | grep -v "github.com" || true) for TEST_NAME in ${TESTS[@]}; do - go test ${RACE_OPTION} -timeout 1h -vet=off -v ${SHORTTEST} -run ${TEST_NAME} ${TEST_DIR} - KMD_INSTANCES_COUNT=$(ps -Af | grep kmd | grep -v "grep" | wc -l | tr -d ' ') + go test ${RACE_OPTION} -timeout 1h -vet=off -v ${SHORTTEST} -run ${TEST_NAME} ${TEST_DIR} | logfilter + KMD_INSTANCES_COUNT=$(set +o pipefail; ps -Af | grep kmd | grep -v "grep" | wc -l | tr -d ' ') if [ "${KMD_INSTANCES_COUNT}" != "0" ]; then echo "One or more than one KMD instances remains running:" ps -Af | grep kmd | grep -v "grep" exit 1 fi - ALGOD_INSTANCES_COUNT=$(ps -Af | grep algod | grep -v "grep" | wc -l | tr -d ' ') + ALGOD_INSTANCES_COUNT=$(set +o pipefail; ps -Af | grep algod | grep -v "grep" | wc -l | tr -d ' ') if [ "${ALGOD_INSTANCES_COUNT}" != "0" ]; then echo "One or more than one algod instances remains running:" ps -Af | grep algod | grep -v "grep" @@ -111,11 +112,11 @@ if [ "${#TESTPATTERNS[@]}" -eq 0 ]; then done done else - go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} ./... + go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} ./... | logfilter fi else for TEST in ${TESTPATTERNS[@]}; do - go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} -run ${TEST} ./... + go test ${RACE_OPTION} -timeout 1h -v ${SHORTTEST} -run ${TEST} ./... | logfilter done fi From ab78f5f6f03b8ee00ab541e0d180c2cdc4b2b069 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 8 Apr 2021 15:54:46 -0400 Subject: [PATCH 164/215] Extend catchpointdump utility to verify merkle trie (#2050) This PR extends the `catchpointdump` utility and add the ability to scan a tracker database and retrieve the merkle trie statistics. The statistics themselves aren't very useful on their own, but the scan process verify the structure of the merkle trie, allowing us to verify the consistency of a given database. --- cmd/catchpointdump/commands.go | 2 - cmd/catchpointdump/database.go | 68 +++++++++++++++++++++++++++++ crypto/merkletrie/cache_test.go | 2 +- crypto/merkletrie/committer_test.go | 68 ++++++++++++++++++++++++++--- crypto/merkletrie/node.go | 12 ++--- crypto/merkletrie/trie.go | 8 ++-- crypto/merkletrie/trie_test.go | 18 ++++---- ledger/accountdb.go | 18 +++++--- ledger/acctupdates.go | 16 ++++--- ledger/catchupaccessor.go | 16 +++---- 10 files changed, 177 insertions(+), 51 deletions(-) diff --git a/cmd/catchpointdump/commands.go b/cmd/catchpointdump/commands.go index 96cb34a390..0ee9b433d3 100644 --- a/cmd/catchpointdump/commands.go +++ b/cmd/catchpointdump/commands.go @@ -41,11 +41,9 @@ var kmdDataDirFlag string var versionCheck bool func init() { - // file.go rootCmd.AddCommand(fileCmd) rootCmd.AddCommand(netCmd) rootCmd.AddCommand(databaseCmd) - } var rootCmd = &cobra.Command{ diff --git a/cmd/catchpointdump/database.go b/cmd/catchpointdump/database.go index 95beb16ee7..1330af7a27 100644 --- a/cmd/catchpointdump/database.go +++ b/cmd/catchpointdump/database.go @@ -17,11 +17,16 @@ package main import ( + "context" + "database/sql" + "fmt" "os" "github.com/spf13/cobra" + "github.com/algorand/go-algorand/crypto/merkletrie" "github.com/algorand/go-algorand/ledger" + "github.com/algorand/go-algorand/util/db" ) var ledgerTrackerFilename string @@ -29,6 +34,9 @@ var ledgerTrackerFilename string func init() { databaseCmd.Flags().StringVarP(&ledgerTrackerFilename, "tracker", "t", "", "Specify the ledger tracker file name ( i.e. ./ledger.tracker.sqlite )") databaseCmd.Flags().StringVarP(&outFileName, "output", "o", "", "Specify an outfile for the dump ( i.e. ledger.dump.txt )") + databaseCmd.AddCommand(checkCmd) + + checkCmd.Flags().StringVarP(&ledgerTrackerFilename, "tracker", "t", "", "Specify the ledger tracker file name ( i.e. ./ledger.tracker.sqlite )") } var databaseCmd = &cobra.Command{ @@ -56,3 +64,63 @@ var databaseCmd = &cobra.Command{ } }, } + +var checkCmd = &cobra.Command{ + Use: "check", + Short: "Performs a consistency checking on the accounts merkle trie", + Long: "Performs a consistency checking on the accounts merkle trie", + Args: validateNoPosArgsFn, + Run: func(cmd *cobra.Command, args []string) { + if ledgerTrackerFilename == "" { + cmd.HelpFunc()(cmd, args) + return + } + + outFile := os.Stdout + fmt.Fprintf(outFile, "Checking tracker database at %s.\n", ledgerTrackerFilename) + err := checkDatabase(ledgerTrackerFilename, outFile) + if err != nil { + reportErrorf("Error checking database : %v", err) + } + }, +} + +func checkDatabase(databaseName string, outFile *os.File) error { + dbAccessor, err := db.MakeAccessor(databaseName, true, false) + if err != nil || dbAccessor.Handle == nil { + return err + } + if dbAccessor.Handle == nil { + return fmt.Errorf("database handle is nil when opening database %s", databaseName) + } + defer func() { + dbAccessor.Close() + }() + + var stats merkletrie.Stats + err = dbAccessor.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { + committer, err := ledger.MakeMerkleCommitter(tx, false) + if err != nil { + return err + } + trie, err := merkletrie.MakeTrie(committer, ledger.TrieMemoryConfig) + if err != nil { + return err + } + stats, err = trie.GetStats() + if err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + + fmt.Fprintf(outFile, "Merkle trie statistics:\n") + fmt.Fprintf(outFile, " Nodes count: %d\n", stats.NodesCount) + fmt.Fprintf(outFile, " Leaf count: %d\n", stats.LeafCount) + fmt.Fprintf(outFile, " Depth: %d\n", stats.Depth) + fmt.Fprintf(outFile, " Size: %d\n", stats.Size) + return nil +} diff --git a/crypto/merkletrie/cache_test.go b/crypto/merkletrie/cache_test.go index 8b50d8ea75..9d35f92aa4 100644 --- a/crypto/merkletrie/cache_test.go +++ b/crypto/merkletrie/cache_test.go @@ -474,7 +474,7 @@ func TestCacheLoadingDeferedPage(t *testing.T) { require.NoError(t, err) // verify that the cache doesn't reset the mtc.deferedPageLoad on loading a non-defered page. - dupMem := memoryCommitter1.Duplicate() + dupMem := memoryCommitter1.Duplicate(false) mt2, _ := MakeTrie(dupMem, defaultTestMemoryConfig) lastPage := int64(mt2.nextNodeID) / defaultTestMemoryConfig.NodesCountPerPage require.Equal(t, uint64(lastPage), mt2.cache.deferedPageLoad) diff --git a/crypto/merkletrie/committer_test.go b/crypto/merkletrie/committer_test.go index c9eb54091d..c82a6f9156 100644 --- a/crypto/merkletrie/committer_test.go +++ b/crypto/merkletrie/committer_test.go @@ -26,10 +26,16 @@ import ( ) // Duplicate duplicates the current memory committer. -func (mc *InMemoryCommitter) Duplicate() (out *InMemoryCommitter) { +func (mc *InMemoryCommitter) Duplicate(flat bool) (out *InMemoryCommitter) { out = &InMemoryCommitter{memStore: make(map[uint64][]byte)} for k, v := range mc.memStore { - out.memStore[k] = v + if flat { + out.memStore[k] = v + } else { + bytes := make([]byte, len(v)) + copy(bytes[:], v[:]) + out.memStore[k] = bytes + } } return } @@ -53,7 +59,7 @@ func TestInMemoryCommitter(t *testing.T) { } releasedNodes, err := mt1.Evict(true) require.NoError(t, err) - savedMemoryCommitter := memoryCommitter.Duplicate() + savedMemoryCommitter := memoryCommitter.Duplicate(false) require.Equal(t, 19282, releasedNodes) for i := len(hashes) / 2; i < len(hashes); i++ { mt1.Add(hashes[i][:]) @@ -78,8 +84,8 @@ func TestInMemoryCommitter(t *testing.T) { } require.Equal(t, 2425675, storageSize) // 2,425,575 / 50,000 ~= 48 bytes/leaf. stats, _ := mt1.GetStats() - require.Equal(t, leafsCount, int(stats.leafCount)) - require.Equal(t, 61926, int(stats.nodesCount)) + require.Equal(t, leafsCount, int(stats.LeafCount)) + require.Equal(t, 61926, int(stats.NodesCount)) } @@ -123,8 +129,8 @@ func TestNoRedundentPages(t *testing.T) { } } stats, _ := mt1.GetStats() - require.Equal(t, testSize, int(stats.leafCount)) - nodesCount := int(stats.nodesCount) + require.Equal(t, testSize, int(stats.LeafCount)) + nodesCount := int(stats.NodesCount) require.Equal(t, nodesCount, len(trieNodes)) require.Equal(t, nodesCount, mt1.cache.cachedNodeCount) } @@ -187,3 +193,51 @@ func TestMultipleCommits(t *testing.T) { } require.Equal(t, storageSize1, storageSize2) } + +func TestIterativeCommits(t *testing.T) { + testSize := 1000 + + memConfig := MemoryConfig{ + NodesCountPerPage: 116, + CachedNodesCount: 9000, + PageFillFactor: 0.95, + MaxChildrenPagesThreshold: 64, + } + + hashes := make([]crypto.Digest, testSize) + for i := 0; i < len(hashes); i++ { + hashes[i] = crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(0), byte(0)}) + } + + // initialize memory container. + mc := &InMemoryCommitter{} + mt, _ := MakeTrie(mc, memConfig) + for i := 0; i < len(hashes); i++ { + added, err := mt.Add(hashes[i][:]) + require.True(t, added) + require.NoError(t, err) + } + _, err := mt.Commit() + require.NoError(t, err) + + for r := 0; r < 100; r++ { + newMC := mc.Duplicate(true) + mt, _ = MakeTrie(newMC, memConfig) + mc = newMC + + for k := r * 5; k < r*7+len(hashes); k++ { + i := k % len(hashes) + deleted, err := mt.Delete(hashes[i][:]) + require.True(t, deleted) + require.NoError(t, err) + hashes[i] = crypto.Hash([]byte{byte(i), byte(i >> 8), byte(i >> 16), byte(i >> 24), byte(r + 1), byte((r + 1) >> 8)}) + added, err := mt.Add(hashes[i][:]) + require.True(t, added) + require.NoError(t, err) + } + _, err := mt.Commit() + require.NoError(t, err) + mt = nil + + } +} diff --git a/crypto/merkletrie/node.go b/crypto/merkletrie/node.go index a934426b48..ac9605e666 100644 --- a/crypto/merkletrie/node.go +++ b/crypto/merkletrie/node.go @@ -47,16 +47,16 @@ var childEntrySize int = int(unsafe.Sizeof(childEntry{})) // stats recursively update the provided Stats structure with the current node information func (n *node) stats(cache *merkleTrieCache, stats *Stats, depth int) (err error) { - stats.nodesCount++ + stats.NodesCount++ if n.leaf() { - stats.leafCount++ - if depth > stats.depth { - stats.depth = depth + stats.LeafCount++ + if depth > stats.Depth { + stats.Depth = depth } - stats.size += sliceSize + len(n.hash) + bitsetSize + stats.Size += sliceSize + len(n.hash) + bitsetSize return nil } - stats.size += sliceSize + len(n.hash) + sliceSize + len(n.children)*childEntrySize + bitsetSize + stats.Size += sliceSize + len(n.hash) + sliceSize + len(n.children)*childEntrySize + bitsetSize for _, child := range n.children { childNode, err := cache.getNode(child.id) if err != nil { diff --git a/crypto/merkletrie/trie.go b/crypto/merkletrie/trie.go index 2efd76d895..9cb855ccc7 100644 --- a/crypto/merkletrie/trie.go +++ b/crypto/merkletrie/trie.go @@ -69,10 +69,10 @@ type Trie struct { // Stats structure is a helper for finding underlaying statistics about the trie type Stats struct { - nodesCount uint - leafCount uint - depth int - size int + NodesCount uint + LeafCount uint + Depth int + Size int } // MakeTrie creates a merkle trie diff --git a/crypto/merkletrie/trie_test.go b/crypto/merkletrie/trie_test.go index 46e23b1fcd..120ac9ed33 100644 --- a/crypto/merkletrie/trie_test.go +++ b/crypto/merkletrie/trie_test.go @@ -43,16 +43,16 @@ func TestAddingAndRemoving(t *testing.T) { require.Equal(t, true, addResult) rootsWhileAdding[i], _ = mt.RootHash() stats, _ := mt.GetStats() - require.Equal(t, i+1, int(stats.leafCount)) + require.Equal(t, i+1, int(stats.LeafCount)) } stats, _ := mt.GetStats() - require.Equal(t, len(hashes), int(stats.leafCount)) - require.Equal(t, 4, int(stats.depth)) - require.Equal(t, 10915, int(stats.nodesCount)) - require.Equal(t, 1135745, int(stats.size)) - require.True(t, int(stats.nodesCount) > len(hashes)) - require.True(t, int(stats.nodesCount) < 2*len(hashes)) + require.Equal(t, len(hashes), int(stats.LeafCount)) + require.Equal(t, 4, int(stats.Depth)) + require.Equal(t, 10915, int(stats.NodesCount)) + require.Equal(t, 1135745, int(stats.Size)) + require.True(t, int(stats.NodesCount) > len(hashes)) + require.True(t, int(stats.NodesCount) < 2*len(hashes)) allHashesAddedRoot, _ := mt.RootHash() @@ -67,8 +67,8 @@ func TestAddingAndRemoving(t *testing.T) { roothash, _ := mt.RootHash() require.Equal(t, crypto.Digest{}, roothash) stats, _ = mt.GetStats() - require.Equal(t, 0, int(stats.leafCount)) - require.Equal(t, 0, int(stats.depth)) + require.Equal(t, 0, int(stats.LeafCount)) + require.Equal(t, 0, int(stats.Depth)) // add the items in a different order. hashesOrder := rand.New(rand.NewSource(1234567)).Perm(len(hashes)) diff --git a/ledger/accountdb.go b/ledger/accountdb.go index e0aa74025b..fd33308853 100644 --- a/ledger/accountdb.go +++ b/ledger/accountdb.go @@ -1313,15 +1313,19 @@ func reencodeAccounts(ctx context.Context, tx *sql.Tx) (modifiedAccounts uint, e return } -type merkleCommitter struct { +// MerkleCommitter todo +//msgp:ignore MerkleCommitter +type MerkleCommitter struct { tx *sql.Tx deleteStmt *sql.Stmt insertStmt *sql.Stmt selectStmt *sql.Stmt } -func makeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err error) { - mc = &merkleCommitter{tx: tx} +// MakeMerkleCommitter creates a MerkleCommitter object that implements the merkletrie.Committer interface allowing storing and loading +// merkletrie pages from a sqlite database. +func MakeMerkleCommitter(tx *sql.Tx, staging bool) (mc *MerkleCommitter, err error) { + mc = &MerkleCommitter{tx: tx} accountHashesTable := "accounthashes" if staging { accountHashesTable = "catchpointaccounthashes" @@ -1341,8 +1345,8 @@ func makeMerkleCommitter(tx *sql.Tx, staging bool) (mc *merkleCommitter, err err return mc, nil } -// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqllite database table. -func (mc *merkleCommitter) StorePage(page uint64, content []byte) error { +// StorePage is the merkletrie.Committer interface implementation, stores a single page in a sqlite database table. +func (mc *MerkleCommitter) StorePage(page uint64, content []byte) error { if len(content) == 0 { _, err := mc.deleteStmt.Exec(page) return err @@ -1351,8 +1355,8 @@ func (mc *merkleCommitter) StorePage(page uint64, content []byte) error { return err } -// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqllite database table. -func (mc *merkleCommitter) LoadPage(page uint64) (content []byte, err error) { +// LoadPage is the merkletrie.Committer interface implementation, load a single page from a sqlite database table. +func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) { err = mc.selectStmt.QueryRow(page).Scan(&content) if err == sql.ErrNoRows { content = nil diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index a5decac61a..9c6f15e04c 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -99,7 +99,8 @@ const initializingAccountCachesMessageTimeout = 3 * time.Second // where we end up batching up to 1000 rounds in a single update. const accountsUpdatePerRoundHighWatermark = 1 * time.Second -var trieMemoryConfig = merkletrie.MemoryConfig{ +// TrieMemoryConfig is the memory configuration setup used for the merkle trie. +var TrieMemoryConfig = merkletrie.MemoryConfig{ NodesCountPerPage: merkleCommitterNodesPerPage, CachedNodesCount: trieCachedNodesCount, PageFillFactor: 0.95, @@ -1258,12 +1259,12 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b } // create the merkle trie for the balances - committer, err := makeMerkleCommitter(tx, false) + committer, err := MakeMerkleCommitter(tx, false) if err != nil { return 0, fmt.Errorf("accountsInitialize was unable to makeMerkleCommitter: %v", err) } - trie, err := merkletrie.MakeTrie(committer, trieMemoryConfig) + trie, err := merkletrie.MakeTrie(committer, TrieMemoryConfig) if err != nil { return 0, fmt.Errorf("accountsInitialize was unable to MakeTrie: %v", err) } @@ -1533,7 +1534,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDe deleteHash := accountHashBuilder(addr, delta.old.accountData, protocol.Encode(&delta.old.accountData)) deleted, err = au.balancesTrie.Delete(deleteHash) if err != nil { - return err + return fmt.Errorf("failed to delete hash '%s' from merkle trie for account %v: %w", hex.EncodeToString(deleteHash), addr, err) } if !deleted { au.log.Warnf("failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(deleteHash), addr) @@ -1546,7 +1547,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDe addHash := accountHashBuilder(addr, delta.new, protocol.Encode(&delta.new)) added, err = au.balancesTrie.Add(addHash) if err != nil { - return err + return fmt.Errorf("attempted to add duplicate hash '%s' to merkle trie for account %v: %w", hex.EncodeToString(addHash), addr, err) } if !added { au.log.Warnf("attempted to add duplicate hash '%s' to merkle trie for account %v", hex.EncodeToString(addHash), addr) @@ -1567,6 +1568,7 @@ func (au *accountUpdates) accountsUpdateBalances(accountsDeltas compactAccountDe if accumulatedChanges > 0 { _, err = au.balancesTrie.Commit() } + return } @@ -2039,12 +2041,12 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb err := au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { treeTargetRound := basics.Round(0) if au.catchpointInterval > 0 { - mc, err0 := makeMerkleCommitter(tx, false) + mc, err0 := MakeMerkleCommitter(tx, false) if err0 != nil { return err0 } if au.balancesTrie == nil { - trie, err := merkletrie.MakeTrie(mc, trieMemoryConfig) + trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig) if err != nil { au.log.Warnf("unable to create merkle trie during committedUpTo: %v", err) return err diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go index 7491297e08..dc07128578 100644 --- a/ledger/catchupaccessor.go +++ b/ledger/catchupaccessor.go @@ -466,19 +466,19 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro uncommitedHashesCount := 0 keepWriting := true hashesWritten := uint64(0) - var mc *merkleCommitter + var mc *MerkleCommitter if progressUpdates != nil { progressUpdates(hashesWritten) } err := wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // create the merkle trie for the balances - mc, err = makeMerkleCommitter(tx, true) + mc, err = MakeMerkleCommitter(tx, true) if err != nil { return } - trie, err = merkletrie.MakeTrie(mc, trieMemoryConfig) + trie, err = merkletrie.MakeTrie(mc, TrieMemoryConfig) return err }) if err != nil { @@ -501,7 +501,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro } err = rdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { - mc, err = makeMerkleCommitter(tx, true) + mc, err = MakeMerkleCommitter(tx, true) if err != nil { return } @@ -528,7 +528,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro err = wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // set a long 30-second window for the evict before warning is generated. db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second)) - mc, err = makeMerkleCommitter(tx, true) + mc, err = MakeMerkleCommitter(tx, true) if err != nil { return } @@ -557,7 +557,7 @@ func (c *CatchpointCatchupAccessorImpl) BuildMerkleTrie(ctx context.Context, pro err = wdb.Atomic(func(transactionCtx context.Context, tx *sql.Tx) (err error) { // set a long 30-second window for the evict before warning is generated. db.ResetTransactionWarnDeadline(transactionCtx, tx, time.Now().Add(30*time.Second)) - mc, err = makeMerkleCommitter(tx, true) + mc, err = MakeMerkleCommitter(tx, true) if err != nil { return } @@ -618,12 +618,12 @@ func (c *CatchpointCatchupAccessorImpl) VerifyCatchpoint(ctx context.Context, bl ledgerVerifycatchpointCount.Inc(nil) err = rdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { // create the merkle trie for the balances - mc, err0 := makeMerkleCommitter(tx, true) + mc, err0 := MakeMerkleCommitter(tx, true) if err0 != nil { return fmt.Errorf("unable to make MerkleCommitter: %v", err0) } var trie *merkletrie.Trie - trie, err = merkletrie.MakeTrie(mc, trieMemoryConfig) + trie, err = merkletrie.MakeTrie(mc, TrieMemoryConfig) if err != nil { return fmt.Errorf("unable to make trie: %v", err) } From feb7d093f50a8a3d3c4dca8a8afaeb31f208fb5d Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 9 Apr 2021 09:23:48 -0400 Subject: [PATCH 165/215] make the msgp less verbosed on the success case. (#2054) The message pack generator is very noisy. It tends to emit lot of messages that aren't being used. This PR cache the output of the message pack so that it would only present these in case of an error. --- Makefile | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 0bca14bf43..b10d47ca91 100644 --- a/Makefile +++ b/Makefile @@ -107,7 +107,18 @@ generate: deps msgp: $(patsubst %,%/msgp_gen.go,$(MSGP_GENERATE)) %/msgp_gen.go: deps ALWAYS - $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand + @set +e; \ + printf "msgp: $(@D)..."; \ + $(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand > ./$@.out 2>&1; \ + if [ "$$?" != "0" ]; then \ + printf "failed:\n$(GOPATH1)/bin/msgp -file ./$(@D) -o $@ -warnmask github.com/algorand/go-algorand\n"; \ + cat ./$@.out; \ + rm ./$@.out; \ + exit 1; \ + else \ + echo " done."; \ + fi; \ + rm -f ./$@.out ALWAYS: # build our fork of libsodium, placing artifacts into crypto/lib/ and crypto/include/ From db1c0659d8f33d6f4451578886d17cfd0ba4ae02 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 9 Apr 2021 15:46:01 -0400 Subject: [PATCH 166/215] ensure the participation key database is being correctly closed after being installed. (#2056) Ensure the participation key database is being correctly closed after being installed. --- cmd/goal/account.go | 3 ++- libgoal/participation.go | 14 ++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/cmd/goal/account.go b/cmd/goal/account.go index c5505ec805..dfb46938f5 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -859,10 +859,11 @@ No --delete-input flag specified, exiting without installing key.`) dataDir := ensureSingleDataDir() client := ensureAlgodClient(dataDir) - _, _, err := client.InstallParticipationKeys(partKeyFile) + partKey, _, err := client.InstallParticipationKeys(partKeyFile) if err != nil { reportErrorf(errorRequestFail, err) } + partKey.Close() fmt.Println("Participation key installed successfully") }, } diff --git a/libgoal/participation.go b/libgoal/participation.go index 319229575f..bb754f0f54 100644 --- a/libgoal/participation.go +++ b/libgoal/participation.go @@ -171,6 +171,12 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k // based on an existing database from inputfile. On successful install, it // deletes the input file. func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) { + proto, ok := c.consensus[protocol.ConsensusCurrentVersion] + if !ok { + err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion) + return + } + // Get the GenesisID for use in the participation key path var genID string genID, err = c.GenesisID() @@ -210,12 +216,7 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic newpartkey.Store = newdb err = newpartkey.Persist() if err != nil { - return - } - - proto, ok := c.consensus[protocol.ConsensusCurrentVersion] - if !ok { - err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion) + newpartkey.Close() return } @@ -229,6 +230,7 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic errCh := partkey.DeleteOldKeys(basics.Round(math.MaxUint64), proto) err = <-errCh if err != nil { + newpartkey.Close() return } os.Remove(inputfile) From de792c3ca61d2e8a0fab92fcfe136b053fa09958 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Tue, 13 Apr 2021 23:51:51 -0400 Subject: [PATCH 167/215] debug tooling: logfilter bug when running test without -v (#2070) logfilter had a bug, not handling cases where the go test was run without -v, which eliminated the -- RUN message. --- debug/logfilter/example7.in | 63 ++++++++++++++++++++++++++ debug/logfilter/example7.out.expected | 64 +++++++++++++++++++++++++++ debug/logfilter/main.go | 20 +++++---- 3 files changed, 139 insertions(+), 8 deletions(-) create mode 100644 debug/logfilter/example7.in create mode 100644 debug/logfilter/example7.out.expected diff --git a/debug/logfilter/example7.in b/debug/logfilter/example7.in new file mode 100644 index 0000000000..0953dd5391 --- /dev/null +++ b/debug/logfilter/example7.in @@ -0,0 +1,63 @@ +2021/04/13 21:46:13 Desc object: [{"name":"globals","value":{"type":"object","className":"Object","description":"Object","objectId":"globalsObjID","preview":{"type":"object","description":"Object","overflow":true,"properties":[{"name":"error","type":"undefined","value":"globals: invalid length 0 != 10"}]}},"writable":false,"configurable":false,"enumerable":true,"isOwn":true}] +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test2 +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test2 +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT session test closed +--- FAIL: TestBalanceAdapterStateChanges (0.00s) +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 starting server on 127.0.0.1:63081 +2021/04/13 21:46:14 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:14 Run mode: logicsig +2021/04/13 21:46:14 Open http://127.0.0.1:12345 in a web browser +2021/04/13 21:46:14 subscribeHandler error: websocket: response does not implement http.Hijacker +FAIL +FAIL github.com/algorand/go-algorand/cmd/tealdbg 0.903s +FAIL diff --git a/debug/logfilter/example7.out.expected b/debug/logfilter/example7.out.expected new file mode 100644 index 0000000000..c7df1d1a62 --- /dev/null +++ b/debug/logfilter/example7.out.expected @@ -0,0 +1,64 @@ +--- FAIL: TestBalanceAdapterStateChanges (0.00s) +2021/04/13 21:46:13 Desc object: [{"name":"globals","value":{"type":"object","className":"Object","description":"Object","objectId":"globalsObjID","preview":{"type":"object","description":"Object","overflow":true,"properties":[{"name":"error","type":"undefined","value":"globals: invalid length 0 != 10"}]}},"writable":false,"configurable":false,"enumerable":true,"isOwn":true}] +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test2 +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test2 +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT debugger listening on: ws://127.0.0.1:12345/test +2021/04/13 21:46:13 Or open in Chrome: +2021/04/13 21:46:13 devtools://devtools/bundled/js_app.html?experiments=true&v8only=false&ws=127.0.0.1:12345/test +2021/04/13 21:46:13 ------------------------------------------------ +2021/04/13 21:46:13 CDT session test closed + +FAIL github.com/algorand/go-algorand/cmd/tealdbg 0.903s... +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: logicsig +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:13 Run mode: stateful +2021/04/13 21:46:13 starting server on 127.0.0.1:63081 +2021/04/13 21:46:14 Using proto: https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff +2021/04/13 21:46:14 Run mode: logicsig +2021/04/13 21:46:14 Open http://127.0.0.1:12345 in a web browser +2021/04/13 21:46:14 subscribeHandler error: websocket: response does not implement http.Hijacker + +FAIL github.com/algorand/go-algorand/cmd/tealdbg 0.903s diff --git a/debug/logfilter/main.go b/debug/logfilter/main.go index d8efd92352..78ac1222e4 100644 --- a/debug/logfilter/main.go +++ b/debug/logfilter/main.go @@ -74,10 +74,12 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { var testName string fmt.Sscanf(line[idx:], "--- PASS: %s", &testName) if _, have := tests[testName]; !have { - panic(fmt.Errorf("test '%s' is missing, when parsing '%s'", testName, line)) + fmt.Fprintf(outFile, "%s\r\n%s\r\n", line, packageOutputBuffer) + packageOutputBuffer = "" + } else { + fmt.Fprintf(outFile, line+"\r\n") + delete(tests, testName) } - fmt.Fprintf(outFile, line+"\r\n") - delete(tests, testName) continue } if idx := strings.Index(line, "--- FAIL:"); idx >= 0 { @@ -85,12 +87,14 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { fmt.Sscanf(line[idx:], "--- FAIL: %s", &testName) test, have := tests[testName] if !have { - panic(fmt.Errorf("test %s is missing", testName)) + fmt.Fprintf(outFile, "%s\r\n%s\r\n", line, packageOutputBuffer) + packageOutputBuffer = "" + } else { + fmt.Fprintf(outFile, test.outputBuffer+"\r\n") + fmt.Fprintf(outFile, line+"\r\n") + test.outputBuffer = "" + tests[testName] = test } - fmt.Fprintf(outFile, test.outputBuffer+"\r\n") - fmt.Fprintf(outFile, line+"\r\n") - test.outputBuffer = "" - tests[testName] = test continue } // otherwise, add the line to the current test ( if there is such ) From b0d9bb0d1c3c60480bb17bcd410952314620fc91 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Wed, 14 Apr 2021 10:07:24 -0400 Subject: [PATCH 168/215] netgoal: fixup small amounts of float64 roundoff (#2063) Genesis blocks (now usually for test networks) initial stake must add up to exactly the total money, but the network description is in float64 and there can be small roundoff to fixup. --- cmd/genesis/newgenesis.go | 8 ++- gen/generate.go | 103 ++++++++++++++++++++-------- gen/generate_test.go | 18 +++++ netdeploy/networkTemplate.go | 2 +- netdeploy/remote/deployedNetwork.go | 2 +- 5 files changed, 102 insertions(+), 31 deletions(-) diff --git a/cmd/genesis/newgenesis.go b/cmd/genesis/newgenesis.go index 27a6592110..5a7ad83269 100644 --- a/cmd/genesis/newgenesis.go +++ b/cmd/genesis/newgenesis.go @@ -19,7 +19,9 @@ package main import ( "flag" "fmt" + "io" "log" + "os" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/gen" @@ -54,7 +56,11 @@ func main() { genesisData.NetworkName = *netName } - err = gen.GenerateGenesisFiles(genesisData, config.Consensus, *outDir, !*quiet) + var verboseOut io.Writer = nil + if !*quiet { + verboseOut = os.Stdout + } + err = gen.GenerateGenesisFiles(genesisData, config.Consensus, *outDir, verboseOut) if err != nil { reportErrorf("Cannot write genesis files: %s", err) } diff --git a/gen/generate.go b/gen/generate.go index fd06634d0f..b8eefebcba 100644 --- a/gen/generate.go +++ b/gen/generate.go @@ -18,7 +18,9 @@ package gen import ( "fmt" + "io" "io/ioutil" + "math" "os" "path/filepath" "runtime" @@ -55,15 +57,44 @@ type genesisAllocation struct { Online basics.Status } -// GenerateGenesisFiles generates the genesis.json file and wallet files for a give genesis configuration. -func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusProtocols, outDir string, verbose bool) error { - err := os.Mkdir(outDir, os.ModeDir|os.FileMode(0777)) - if err != nil && os.IsNotExist(err) { - return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error()) +func u64absDiff(a, b uint64) uint64 { + if a > b { + return a - b + } + if b > a { + return b - a + } + return 0 +} + +// testable inner function that doesn't touch filesystem +func setupGenerateGenesisFiles(genesisData *GenesisData, consensus config.ConsensusProtocols, verboseOut io.Writer) (proto protocol.ConsensusVersion, consensusParams config.ConsensusParams, allocation []genesisAllocation, err error) { + err = nil + // Backwards compatibility with older genesis files: if the consensus + // protocol version is not specified, default to V0. + proto = genesisData.ConsensusProtocol + if proto == protocol.ConsensusVersion("") { + proto = protocol.ConsensusCurrentVersion + } + + // Backwards compatibility with older genesis files: if the fee sink + // or the rewards pool is not specified, set their defaults. + if (genesisData.FeeSink == basics.Address{}) { + genesisData.FeeSink = defaultSinkAddr + } + if (genesisData.RewardsPool == basics.Address{}) { + genesisData.RewardsPool = defaultPoolAddr + } + + var ok bool + consensusParams, ok = consensus[proto] + if !ok { + err = fmt.Errorf("protocol %s not supported", proto) + return } var sum uint64 - allocation := make([]genesisAllocation, len(genesisData.Wallets)) + allocation = make([]genesisAllocation, len(genesisData.Wallets)) for i, wallet := range genesisData.Wallets { acct := genesisAllocation{ @@ -79,35 +110,50 @@ func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusPro } if sum != TotalMoney { - panic(fmt.Sprintf("Amounts don't add up to TotalMoney - off by %v", int64(TotalMoney)-int64(sum))) - } - - // Backwards compatibility with older genesis files: if the consensus - // protocol version is not specified, default to V0. - proto := genesisData.ConsensusProtocol - if proto == protocol.ConsensusVersion("") { - proto = protocol.ConsensusCurrentVersion + fsum := float64(sum) + ftot := float64(TotalMoney) + if (math.Abs((fsum-ftot)/ftot) < 0.01) && (u64absDiff(sum, TotalMoney) < 10000) { + if verboseOut != nil { + fmt.Fprintf(verboseOut, "doing roundoff fixup expected total money %d actual sum %d\n", TotalMoney, sum) + } + // wallet stake is a float and roundoff might happen but we might be close enough to do fixup + i := 0 + for sum != TotalMoney { + if sum < TotalMoney { + allocation[i].Stake++ + sum++ + } else { + if allocation[i].Stake > consensusParams.MinBalance { + allocation[i].Stake-- + sum-- + } + } + i = (i + 1) % len(allocation) + } + } else { + panic(fmt.Sprintf("Amounts don't add up to TotalMoney - off by %v", int64(TotalMoney)-int64(sum))) + } } + return +} - // Backwards compatibility with older genesis files: if the fee sink - // or the rewards pool is not specified, set their defaults. - if (genesisData.FeeSink == basics.Address{}) { - genesisData.FeeSink = defaultSinkAddr - } - if (genesisData.RewardsPool == basics.Address{}) { - genesisData.RewardsPool = defaultPoolAddr +// GenerateGenesisFiles generates the genesis.json file and wallet files for a give genesis configuration. +func GenerateGenesisFiles(genesisData GenesisData, consensus config.ConsensusProtocols, outDir string, verboseOut io.Writer) error { + proto, consensusParams, allocation, err := setupGenerateGenesisFiles(&genesisData, consensus, verboseOut) + if err != nil { + return err } - consensusParams, ok := consensus[proto] - if !ok { - return fmt.Errorf("protocol %s not supported", proto) + err = os.Mkdir(outDir, os.ModeDir|os.FileMode(0777)) + if err != nil && os.IsNotExist(err) { + return fmt.Errorf("couldn't make output directory '%s': %v", outDir, err.Error()) } - return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, verbose) + return generateGenesisFiles(outDir, proto, consensusParams, genesisData.NetworkName, genesisData.VersionModifier, allocation, genesisData.FirstPartKeyRound, genesisData.LastPartKeyRound, genesisData.PartKeyDilution, genesisData.FeeSink, genesisData.RewardsPool, genesisData.Comment, verboseOut) } func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, protoParams config.ConsensusParams, netName string, schemaVersionModifier string, - allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, verbose bool) (err error) { + allocation []genesisAllocation, firstWalletValid uint64, lastWalletValid uint64, partKeyDilution uint64, feeSink, rewardsPool basics.Address, comment string, verboseOut io.Writer) (err error) { genesisAddrs := make(map[string]basics.Address) records := make(map[string]basics.AccountData) @@ -127,6 +173,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, concurrentWalletGenerators := runtime.NumCPU() * 2 errorsChannel := make(chan error, concurrentWalletGenerators) + verbose := verboseOut != nil verbosedOutput := make(chan string) var creatingWalletsWaitGroup sync.WaitGroup var writeMu deadlock.Mutex @@ -243,7 +290,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, // create a listener for the verbosedOutput go func() { for textOut := range verbosedOutput { - fmt.Printf("%s\n", textOut) + fmt.Fprintf(verboseOut, "%s\n", textOut) } }() } @@ -269,7 +316,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, genesisAddrs["RewardsPool"] = rewardsPool if verbose { - fmt.Println(protoVersion, protoParams.MinBalance) + fmt.Fprintln(verboseOut, protoVersion, protoParams.MinBalance) } records["FeeSink"] = basics.AccountData{ diff --git a/gen/generate_test.go b/gen/generate_test.go index b09e9aa5d7..c8a6656d80 100644 --- a/gen/generate_test.go +++ b/gen/generate_test.go @@ -21,10 +21,13 @@ import ( "io/ioutil" "os" "path/filepath" + "strings" "sync" "testing" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" "github.com/stretchr/testify/require" @@ -100,3 +103,18 @@ func TestLoadSingleRootKeyConcurrent(t *testing.T) { } wg.Wait() } + +func TestGenesisRoundoff(t *testing.T) { + verbosity := strings.Builder{} + genesisData := DefaultGenesis + genesisData.NetworkName = "wat" + genesisData.ConsensusProtocol = protocol.ConsensusCurrentVersion // TODO: also check ConsensusFuture ? + genesisData.Wallets = make([]WalletData, 15) + for i := range genesisData.Wallets { + genesisData.Wallets[i].Name = fmt.Sprintf("w%d", i) + genesisData.Wallets[i].Stake = 100.0 / float64(len(genesisData.Wallets)) + } + _, _, _, err := setupGenerateGenesisFiles(&genesisData, config.Consensus, &verbosity) + require.NoError(t, err) + require.True(t, strings.Contains(verbosity.String(), "roundoff")) +} diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index 9a4ae113d9..c9fe871a06 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -50,7 +50,7 @@ func (t NetworkTemplate) generateGenesisAndWallets(targetFolder, networkName, bi genesisData := t.Genesis genesisData.NetworkName = networkName mergedConsensus := config.Consensus.Merge(t.Consensus) - return gen.GenerateGenesisFiles(genesisData, mergedConsensus, targetFolder, true) + return gen.GenerateGenesisFiles(genesisData, mergedConsensus, targetFolder, os.Stdout) } // Create data folders for all NodeConfigs, configuring relays appropriately and diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index cb78c2e7dd..cda4b37003 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -268,7 +268,7 @@ func (cfg DeployedNetwork) BuildNetworkFromTemplate(buildCfg BuildConfig, rootDi if cfg.useExistingGenesis { fmt.Println(" *** using existing genesis files ***") } else { - if err = gen.GenerateGenesisFiles(cfg.GenesisData, config.Consensus, genesisFolder, true); err != nil { + if err = gen.GenerateGenesisFiles(cfg.GenesisData, config.Consensus, genesisFolder, os.Stdout); err != nil { return } } From f566e82fc77e0f4af3ea2d71757217bcba77b130 Mon Sep 17 00:00:00 2001 From: Jason Paulos Date: Wed, 14 Apr 2021 10:21:45 -0400 Subject: [PATCH 169/215] Tealdbg don't assume program version (#2042) Currently the TEAL debugger always compiles programs with the latest TEAL version. This means that programs using prior versions are unable to be properly debugged. This PR fixes this issue. --- cmd/tealdbg/local.go | 5 ++++- cmd/tealdbg/local_test.go | 8 ++++--- cmd/tealdbg/main.go | 2 +- .../cli/tealdbg/expect/tealdbgSpinoffTest.exp | 2 +- .../e2e-go/cli/tealdbg/expect/tealdbgTest.exp | 22 ++++++++++++++++--- 5 files changed, 30 insertions(+), 9 deletions(-) diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go index 338a25cf5f..b47031d871 100644 --- a/cmd/tealdbg/local.go +++ b/cmd/tealdbg/local.go @@ -335,7 +335,10 @@ func (r *LocalRunner) Setup(dp *DebugParams) (err error) { r.runs[i].program = data if IsTextFile(data) { source := string(data) - ops, err := logic.AssembleStringWithVersion(source, r.proto.LogicSigVersion) + ops, err := logic.AssembleString(source) + if ops.Version > r.proto.LogicSigVersion { + return fmt.Errorf("Program version (%d) is beyond the maximum supported protocol version (%d)", ops.Version, r.proto.LogicSigVersion) + } if err != nil { errorLines := "" for _, lineError := range ops.Errors { diff --git a/cmd/tealdbg/local_test.go b/cmd/tealdbg/local_test.go index 4a93825c54..6a844963e5 100644 --- a/cmd/tealdbg/local_test.go +++ b/cmd/tealdbg/local_test.go @@ -337,7 +337,8 @@ func TestDebugEnvironment(t *testing.T) { // create sample programs that checks all the environment: // transaction fields, global properties, - source := `global Round + source := `#pragma version 2 +global Round int 222 == global LatestTimestamp @@ -476,7 +477,8 @@ int 100 a.True(pass) // check relaxed - opted in for both - source = `int 1 + source = `#pragma version 2 +int 1 int 100 app_opted_in int 1 @@ -499,7 +501,7 @@ int 1 ds.Painless = false // check ForeignApp - source = ` + source = `#pragma version 2 int 300 byte 0x676b657962797465 // gkeybyte app_global_get_ex diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go index 60a090cccc..6a1004c278 100644 --- a/cmd/tealdbg/main.go +++ b/cmd/tealdbg/main.go @@ -155,7 +155,7 @@ func init() { rootCmd.PersistentFlags().BoolVar(&noSourceMap, "no-source-map", false, "Do not generate source maps") rootCmd.PersistentFlags().BoolVarP(&verbose, "verbose", "v", false, "Verbose output") - debugCmd.Flags().StringVarP(&proto, "proto", "p", "", "Consensus protocol version for TEAL") + debugCmd.Flags().StringVarP(&proto, "proto", "p", "", "Consensus protocol version for TEAL evaluation") debugCmd.Flags().StringVarP(&txnFile, "txn", "t", "", "Transaction(s) to evaluate TEAL on in form of json or msgpack file") debugCmd.Flags().IntVarP(&groupIndex, "group-index", "g", 0, "Transaction index in a txn group") debugCmd.Flags().StringVarP(&balanceFile, "balance", "b", "", "Balance records to evaluate stateful TEAL on in form of json or msgpack file") diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp index d18315cdc8..9be2c95ba2 100644 --- a/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp +++ b/test/e2e-go/cli/tealdbg/expect/tealdbgSpinoffTest.exp @@ -56,6 +56,6 @@ if { [catch { } } EXCEPTION ] } { - puts "ERROR in teadbgTest: $EXCEPTION" + puts "ERROR in tealdbgTest: $EXCEPTION" exit 1 } diff --git a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp index 49367fec93..a4fc773eb1 100644 --- a/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp +++ b/test/e2e-go/cli/tealdbg/expect/tealdbgTest.exp @@ -65,17 +65,33 @@ if { [catch { # this is ConsensusV26 set PROTOCOL_VERSION_3 "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff" - # run the test using version 2: + # run the test using version 2 on protocol version 2: exec printf "#pragma version 2\nint 1\ndup\n+\n" > $TEAL_PROG_FILE TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_2 "" - # run the test using version 3: + # run the test using version 2 on protocol version 3: + TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_3 "--remote-debugging-port 9392 --listen 127.0.0.1" + + # run the test using version 3 on protocol version 3: exec printf "#pragma version 3\nint 1\ndup\n+\n" > $TEAL_PROG_FILE TestTealdbg $TEAL_PROG_FILE $PROTOCOL_VERSION_3 "--remote-debugging-port 9392 --listen 127.0.0.1" + # run the test using version 3 on protocol version 2 (this should fail) + set FAILED 0 + spawn tealdbg debug -v $TEAL_PROG_FILE -p $PROTOCOL_VERSION_2 --remote-debugging-port 9392 --listen 127.0.0.1 + expect { + timeout { puts "tealdbg debug timed out"; exit 1 } + -re {Debug error: Program version \([0-9]+\) is beyond the maximum supported protocol version \([0-9]+\)} { set FAILED 1; close } + } + if { $FAILED == 0 } { + puts "ERROR: the command should have failed" + exit 1 + } + puts "The command failed as expected" + exec rm $TEAL_PROG_FILE } EXCEPTION ] } { - puts "ERROR in teadbgTest: $EXCEPTION" + puts "ERROR in tealdbgTest: $EXCEPTION" exit 1 } From f2ee25ad50e653aff104f005a78b6d0f2461499c Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 14 Apr 2021 12:35:10 -0400 Subject: [PATCH 170/215] goal: improve waitForCommit logic (#2067) The existing implementation of `waitForCommit` had two deficiencies: - It was waiting for two rounds at the time, instead of waiting for a single round before re-testing the pending transactions. - It wasn't limiting it's execution time to the transaction's last valid ( on the client side ). - Note that there is a server side handling for that, which returns an error "kicked out of local node pool". - This would ensure we have a proper working client in case the server-side restarts. --- cmd/goal/account.go | 4 +-- cmd/goal/application.go | 14 +++++----- cmd/goal/asset.go | 10 +++---- cmd/goal/clerk.go | 14 +++++++--- cmd/goal/interact.go | 2 +- cmd/goal/messages.go | 1 + .../cli/goal/expect/goalAccountTest.exp | 27 +++++++++++++++++++ .../cli/goal/expect/goalExpectCommon.exp | 22 +++++++++++++++ 8 files changed, 76 insertions(+), 18 deletions(-) diff --git a/cmd/goal/account.go b/cmd/goal/account.go index dfb46938f5..3a43f5f244 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -810,7 +810,7 @@ func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnl return nil } - return waitForCommit(client, txid) + return waitForCommit(client, txid, lastTxRound) } var addParticipationKeyCmd = &cobra.Command{ @@ -1325,7 +1325,7 @@ var markNonparticipatingCmd = &cobra.Command{ return } - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lastTxRound) if err != nil { reportErrorf("error waiting for transaction to be committed: %v", err) } diff --git a/cmd/goal/application.go b/cmd/goal/application.go index 43e13fa723..f32d554f11 100644 --- a/cmd/goal/application.go +++ b/cmd/goal/application.go @@ -416,7 +416,7 @@ var createAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -499,7 +499,7 @@ var updateAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -577,7 +577,7 @@ var optInAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -655,7 +655,7 @@ var closeOutAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -733,7 +733,7 @@ var clearAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -811,7 +811,7 @@ var callAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -889,7 +889,7 @@ var deleteAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go index b0c2cd2344..11aaecccd9 100644 --- a/cmd/goal/asset.go +++ b/cmd/goal/asset.go @@ -231,7 +231,7 @@ var createAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -311,7 +311,7 @@ var destroyAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } @@ -400,7 +400,7 @@ var configAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } @@ -481,7 +481,7 @@ var sendAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } @@ -546,7 +546,7 @@ var freezeAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index 228318fbae..ef9f1e4795 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -151,7 +151,7 @@ var clerkCmd = &cobra.Command{ }, } -func waitForCommit(client libgoal.Client, txid string) error { +func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) error { // Get current round information stat, err := client.Status() if err != nil { @@ -174,8 +174,16 @@ func waitForCommit(client libgoal.Client, txid string) error { return fmt.Errorf(txPoolError, txid, txn.PoolError) } + // check if we've already committed to the block number equals to the transaction's last valid round. + // if this is the case, the transaction would not be included in the blockchain, and we can exit right + // here. + if stat.LastRound >= transactionLastValidRound { + return fmt.Errorf(errorTransactionExpired, txid) + } + reportInfof(infoTxPending, txid, stat.LastRound) - stat, err = client.WaitForRound(stat.LastRound + 1) + // WaitForRound waits until round "stat.LastRound+1" is committed + stat, err = client.WaitForRound(stat.LastRound) if err != nil { return fmt.Errorf(errorRequestFail, err) } @@ -460,7 +468,7 @@ var sendCmd = &cobra.Command{ reportInfof(infoTxIssued, amount, fromAddressResolved, toAddressResolved, txid, fee) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go index 4b6eba2003..9ee1ae96a4 100644 --- a/cmd/goal/interact.go +++ b/cmd/goal/interact.go @@ -622,7 +622,7 @@ var appExecuteCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid) + err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go index 9ba686219f..77b8a1cfc8 100644 --- a/cmd/goal/messages.go +++ b/cmd/goal/messages.go @@ -124,6 +124,7 @@ const ( rekeySenderTargetSameError = "The sender and the resulted multisig address are the same" noOutputFileError = "--msig-params must be specified with an output file name (-o)" infoAutoFeeSet = "Automatically set fee to %d MicroAlgos" + errorTransactionExpired = "Transaction %s expired before it could be included in a block" loggingNotConfigured = "Remote logging is not currently configured and won't be enabled" loggingNotEnabled = "Remote logging is current disabled" diff --git a/test/e2e-go/cli/goal/expect/goalAccountTest.exp b/test/e2e-go/cli/goal/expect/goalAccountTest.exp index 0b5999cc4f..b502272843 100644 --- a/test/e2e-go/cli/goal/expect/goalAccountTest.exp +++ b/test/e2e-go/cli/goal/expect/goalAccountTest.exp @@ -32,6 +32,33 @@ if { [catch { # Determine primary account set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] + # try to generate an expired transaction for 5 times before giving up. + set TEST_TRANSACTION_EXPIRATION 5 + while {$TEST_TRANSACTION_EXPIRATION > 0} { + # Get the lastest block + set LAST_COMMITTED_BLOCK [::AlgorandGoal::GetNodeLastCommittedBlock $TEST_PRIMARY_NODE_DIR] + + # test that sending a transaction where the last round is equal to the current round end up resulting in "Transaction %s expired before it could be included in a block" error. + spawn goal clerk send -a 10 --fee 1000 --firstvalid [expr {$LAST_COMMITTED_BLOCK + 1}] --lastvalid [expr {$LAST_COMMITTED_BLOCK + 1}] -f $PRIMARY_ACCOUNT_ADDRESS -t $PRIMARY_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR + expect { + timeout { close; ::AlgorandGoal::Abort "goal clerk send timeout" } + -re {Transaction ([A-Z0-9]+) expired before it could be included in a block} { + break; + close; + } + -re {Transaction ([A-Z0-9]+) kicked out of local node pool} { + # this is a legit possible case, so just keep iterating if we hit this one. + close; + } + -re {Couldn't broadcast tx with algod: HTTP 400 Bad Request: TransactionPool.Remember: txn dead: round ([0-9]+) outside of ([0-9]+)--([0-9]+)} { + # this is a legit possible case, so just keep iterating if we hit this one. + close; + } + eof { ::AlgorandGoal::CheckEOF "Failed to send a dummy transaction" } + } + set TEST_TRANSACTION_EXPIRATION [expr {$TEST_TRANSACTION_EXPIRATION - 1}] + } + set MN "advice pudding treat near rule blouse same whisper inner electric quit surface sunny dismiss leader blood seat clown cost exist hospital century reform able sponsor" spawn goal account import -m $MN --datadir $TEST_PRIMARY_NODE_DIR expect { diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index e7b1df3304..3d77d9065d 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -830,6 +830,28 @@ proc ::AlgorandGoal::GetNodeLastCatchpoint { NODE_DATA_DIR } { return $CATCHPOINT } + +# Get node's last reached round +proc ::AlgorandGoal::GetNodeLastCommittedBlock { NODE_DATA_DIR } { + set COMMITTEDROUND "" + if { [catch { + # Check node status + puts "spawn node status" + spawn goal node status -d $NODE_DATA_DIR + expect { + timeout { ::AlgorandGoal::Abort "goal node status timed out" } + -re {Last committed block: ([0-9]+)} {regexp -- {[0-9]+} $expect_out(0,string) COMMITTEDROUND; exp_continue } + eof { catch wait result; if { [lindex $result 3] != 0 } { ::AlgorandGoal::Abort "failed to perform goal node status : error code [lindex $result 3]"} } + } + if { $COMMITTEDROUND == "" } { + ::AlgorandGoal::Abort "Last committed block entry was missing from goal node status" + } + } EXCEPTION ] } { + ::AlgorandGoal::Abort "ERROR in GetNodeLastCommittedBlock: $EXCEPTION" + } + return $COMMITTEDROUND +} + # Start catching up to a specific catchpoint proc ::AlgorandGoal::StartCatchup { NODE_DATA_DIR CATCHPOINT } { if { [catch { From 77c327884af4de29ed597d6b91a68867fcb35f08 Mon Sep 17 00:00:00 2001 From: John Lee Date: Wed, 14 Apr 2021 12:53:33 -0400 Subject: [PATCH 171/215] Remove buildnumber and genesistimestamp. --- buildnumber.dat | 1 - genesistimestamp.dat | 1 - 2 files changed, 2 deletions(-) delete mode 100644 buildnumber.dat delete mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat deleted file mode 100644 index b8626c4cff..0000000000 --- a/buildnumber.dat +++ /dev/null @@ -1 +0,0 @@ -4 diff --git a/genesistimestamp.dat b/genesistimestamp.dat deleted file mode 100644 index c72c6a7795..0000000000 --- a/genesistimestamp.dat +++ /dev/null @@ -1 +0,0 @@ -1558657885 From 2de6e551dcd2f50ab2ca844fda04624b86425acf Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 14 Apr 2021 15:41:58 -0400 Subject: [PATCH 172/215] Refactor participation keys management (#2061) This PR breaks apart the `account.Participation`, which represented both the account participation data as well as the account participation persistence model, into two: - `account.Participation` would be used for the account participation data. - `account.PersistedParticipation` extends the `account.Participation` by providing persistence methods. The goal in making these changes is to ensure that we don't leak any open database connections. A holder of `account.Participation` doesn't need to be concerned about closing the database handle. The usage of `account.PersistedParticipation` is by far more encapsulated and allows us to ensure no unneeded database connections remains floating around. --- agreement/agreementtest/simulate_test.go | 3 +- agreement/fuzzer/fuzzer_test.go | 15 --- agreement/service_test.go | 9 -- catchup/pref_test.go | 9 +- cmd/algokey/part.go | 8 +- cmd/goal/account.go | 6 +- compactcert/worker_test.go | 5 +- daemon/algod/api/server/v2/test/helpers.go | 3 +- data/account/account.go | 8 +- data/account/participation.go | 103 +++++++++++---------- data/account/participation_test.go | 31 +++++++ data/accountManager.go | 8 +- data/common_test.go | 4 +- gen/generate.go | 4 +- libgoal/participation.go | 21 +++-- test/framework/fixtures/libgoalFixture.go | 6 +- util/db/perf_test.go | 1 + 17 files changed, 136 insertions(+), 108 deletions(-) diff --git a/agreement/agreementtest/simulate_test.go b/agreement/agreementtest/simulate_test.go index 3017a117a3..c4ccb69b15 100644 --- a/agreement/agreementtest/simulate_test.go +++ b/agreement/agreementtest/simulate_test.go @@ -356,7 +356,8 @@ func generateNAccounts(t *testing.T, N int, firstRound, lastRound basics.Round, if err != nil { panic(err) } - accounts = append(accounts, part) + accounts = append(accounts, part.Participation) + part.Close() } return } diff --git a/agreement/fuzzer/fuzzer_test.go b/agreement/fuzzer/fuzzer_test.go index 31702ca328..b2e1960d8c 100644 --- a/agreement/fuzzer/fuzzer_test.go +++ b/agreement/fuzzer/fuzzer_test.go @@ -205,27 +205,12 @@ func (n *Fuzzer) initAccountsAndBalances(rootSeed []byte, onlineNodes []bool) er } rootAddress := root.Address() - partAccess, err := db.MakeAccessor(n.networkName+"part"+strconv.Itoa(i+off), false, true) - - if err != nil { - return err - } - - n.accountAccessors[i*2+1] = partAccess - n.accounts[i] = account.Participation{ Parent: rootAddress, VRF: generatePseudoRandomVRF(i), Voting: readOnlyParticipationVotes[i], FirstValid: firstValid, LastValid: lastValid, - Store: partAccess, - } - - err = n.accounts[i].Persist() - - if err != nil { - panic(err) } acctData := basics.AccountData{ diff --git a/agreement/service_test.go b/agreement/service_test.go index 464478b2c6..b8ff67f5a9 100644 --- a/agreement/service_test.go +++ b/agreement/service_test.go @@ -678,21 +678,12 @@ func createTestAccountsAndBalances(t *testing.T, numNodes int, rootSeed []byte) // save partkeys to db { - partAccess, err := db.MakeAccessor(t.Name()+"part"+strconv.Itoa(i+off), false, true) - if err != nil { - panic(err) - } accounts[i] = account.Participation{ Parent: rootAddress, VRF: generatePseudoRandomVRF(i), Voting: v, FirstValid: firstValid, LastValid: lastValid, - Store: partAccess, - } - err = accounts[i].Persist() - if err != nil { - panic(err) } } diff --git a/catchup/pref_test.go b/catchup/pref_test.go index a3222683c3..34b78352dd 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -32,8 +32,8 @@ import ( "github.com/algorand/go-algorand/data/datatest" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/util/db" "github.com/algorand/go-algorand/rpcs" + "github.com/algorand/go-algorand/util/db" ) func BenchmarkServiceFetchBlocks(b *testing.B) { @@ -44,7 +44,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { require.NotNil(b, remote) require.NotNil(b, local) - + // Create a network and block service net := &httpTestPeerSource{} ls := rpcs.MakeBlockService(logging.TestingLog(b), config.GetDefaultLocal(), remote, net, "test genesisID") @@ -54,7 +54,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { defer nodeA.stop() rootURL := nodeA.rootURL() net.addPeer(rootURL) - + cfg := config.GetDefaultLocal() cfg.Archival = true @@ -128,8 +128,9 @@ func benchenv(t testing.TB, numAccounts, numBlocks int) (ledger, emptyLedger *da } short := root.Address() - parts[i] = part + parts[i] = part.Participation genesis[short] = startamt + part.Close() } genesis[basics.Address(sinkAddr)] = basics.AccountData{ diff --git a/cmd/algokey/part.go b/cmd/algokey/part.go index 49cc9c0f5a..48ba060a39 100644 --- a/cmd/algokey/part.go +++ b/cmd/algokey/part.go @@ -81,7 +81,7 @@ var partGenerateCmd = &cobra.Command{ os.Exit(1) } - printPartkey(partkey) + printPartkey(partkey.Participation) }, } @@ -101,8 +101,9 @@ var partInfoCmd = &cobra.Command{ fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err) os.Exit(1) } + partkey.Close() - printPartkey(partkey) + printPartkey(partkey.Participation) }, } @@ -128,6 +129,7 @@ var partReparentCmd = &cobra.Command{ fmt.Fprintf(os.Stderr, "Cannot load partkey database %s: %v\n", partKeyfile, err) os.Exit(1) } + defer partkey.Close() partkey.Parent = parent err = partkey.PersistNewParent() @@ -136,7 +138,7 @@ var partReparentCmd = &cobra.Command{ os.Exit(1) } - printPartkey(partkey) + printPartkey(partkey.Participation) }, } diff --git a/cmd/goal/account.go b/cmd/goal/account.go index 3a43f5f244..a8bf74a2d5 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -760,7 +760,7 @@ var changeOnlineCmd = &cobra.Command{ os.Exit(1) } - part = &partkey + part = &partkey.Participation if accountAddress == "" { accountAddress = part.Parent.String() } @@ -859,11 +859,10 @@ No --delete-input flag specified, exiting without installing key.`) dataDir := ensureSingleDataDir() client := ensureAlgodClient(dataDir) - partKey, _, err := client.InstallParticipationKeys(partKeyFile) + _, _, err := client.InstallParticipationKeys(partKeyFile) if err != nil { reportErrorf(errorRequestFail, err) } - partKey.Close() fmt.Println("Participation key installed successfully") }, } @@ -926,7 +925,6 @@ func generateAndRegisterPartKey(address string, currentRound, lastValidRound uin txFile := "" err = changeAccountOnlineStatus(address, &part, goOnline, txFile, wallet, currentRound, lastValidRound, fee, leaseBytes, dataDir, client) if err != nil { - part.Close() os.Remove(keyPath) fmt.Fprintf(os.Stderr, " Error registering keys - deleting newly-generated key file: %s\n", keyPath) } diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go index a2849120cc..a82a6cd282 100644 --- a/compactcert/worker_test.go +++ b/compactcert/worker_test.go @@ -96,7 +96,7 @@ func (s *testWorkerStubs) addBlock(ccNextRound basics.Round) { } } -func (s *testWorkerStubs) Keys() []account.Participation { +func (s *testWorkerStubs) Keys() (out []account.Participation) { return s.keys } @@ -203,7 +203,8 @@ func newPartKey(t testing.TB, parent basics.Address) account.Participation { part, err := account.FillDBWithParticipationKeys(partDB, parent, 0, 1024*1024, config.Consensus[protocol.ConsensusFuture].DefaultKeyDilution) require.NoError(t, err) - return part + part.Close() + return part.Participation } func TestWorkerAllSigs(t *testing.T) { diff --git a/daemon/algod/api/server/v2/test/helpers.go b/daemon/algod/api/server/v2/test/helpers.go index 206921f2c8..b5962cf4c4 100644 --- a/daemon/algod/api/server/v2/test/helpers.go +++ b/daemon/algod/api/server/v2/test/helpers.go @@ -238,7 +238,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*d } roots[i] = root - parts[i] = part + parts[i] = part.Participation startamt := basics.MicroAlgos{Raw: uint64(minMoneyAtStart + (gen.Int() % (maxMoneyAtStart - minMoneyAtStart)))} short := root.Address() @@ -251,6 +251,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*d data.VoteID = parts[i].VotingSecrets().OneTimeSignatureVerifier genesis[short] = data } + part.Close() } genesis[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 100000 * uint64(proto.RewardsRateRefreshInterval)}) diff --git a/data/account/account.go b/data/account/account.go index 8be24cfb6a..6d33331963 100644 --- a/data/account/account.go +++ b/data/account/account.go @@ -134,7 +134,7 @@ func (root Root) Address() basics.Address { // RestoreParticipation restores a Participation from a database // handle. -func RestoreParticipation(store db.Accessor) (acc Participation, err error) { +func RestoreParticipation(store db.Accessor) (acc PersistedParticipation, err error) { var rawParent, rawVRF, rawVoting []byte err = Migrate(store) @@ -163,19 +163,19 @@ func RestoreParticipation(store db.Accessor) (acc Participation, err error) { return nil }) if err != nil { - return Participation{}, err + return PersistedParticipation{}, err } acc.VRF = &crypto.VRFSecrets{} err = protocol.Decode(rawVRF, acc.VRF) if err != nil { - return Participation{}, err + return PersistedParticipation{}, err } acc.Voting = &crypto.OneTimeSignatureSecrets{} err = protocol.Decode(rawVoting, acc.Voting) if err != nil { - return Participation{}, err + return PersistedParticipation{}, err } acc.Store = store diff --git a/data/account/participation.go b/data/account/participation.go index 8d130e9158..2883366f99 100644 --- a/data/account/participation.go +++ b/data/account/participation.go @@ -20,12 +20,12 @@ import ( "context" "database/sql" "fmt" - "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) @@ -41,8 +41,6 @@ import ( // For correctness, all Roots should have no more than one Participation // globally active at any time. If this condition is violated, the Root may // equivocate. (Algorand tolerates a limited fraction of misbehaving accounts.) -// -// Participations handle persistence and deletion of secrets. type Participation struct { Parent basics.Address @@ -56,6 +54,13 @@ type Participation struct { LastValid basics.Round KeyDilution uint64 +} + +// PersistedParticipation encapsulates the static state of the participation +// for a single address at any given moment, while providing the ability +// to handle persistence and deletion of secrets. +type PersistedParticipation struct { + Participation Store db.Accessor } @@ -81,40 +86,6 @@ func (part Participation) OverlapsInterval(first, last basics.Round) bool { return true } -// DeleteOldKeys securely deletes ephemeral keys for rounds strictly older than the given round. -func (part Participation) DeleteOldKeys(current basics.Round, proto config.ConsensusParams) <-chan error { - keyDilution := part.KeyDilution - if keyDilution == 0 { - keyDilution = proto.DefaultKeyDilution - } - - part.Voting.DeleteBeforeFineGrained(basics.OneTimeIDForRound(current, keyDilution), keyDilution) - - errorCh := make(chan error, 1) - deleteOldKeys := func(encodedVotingSecrets []byte) { - errorCh <- part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error { - _, err := tx.Exec("UPDATE ParticipationAccount SET voting=?", encodedVotingSecrets) - if err != nil { - return fmt.Errorf("Participation.DeleteOldKeys: failed to update account: %v", err) - } - return nil - }) - close(errorCh) - } - voting := part.Voting.Snapshot() - encodedVotingSecrets := protocol.Encode(&voting) - go deleteOldKeys(encodedVotingSecrets) - return errorCh -} - -// PersistNewParent writes a new parent address to the partkey database. -func (part Participation) PersistNewParent() error { - return part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error { - _, err := tx.Exec("UPDATE ParticipationAccount SET parent=?", part.Parent[:]) - return err - }) -} - // VRFSecrets returns the VRF secrets associated with this Participation account. func (part Participation) VRFSecrets() *crypto.VRFSecrets { return part.VRF @@ -156,8 +127,42 @@ func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos, return t } +// DeleteOldKeys securely deletes ephemeral keys for rounds strictly older than the given round. +func (part PersistedParticipation) DeleteOldKeys(current basics.Round, proto config.ConsensusParams) <-chan error { + keyDilution := part.KeyDilution + if keyDilution == 0 { + keyDilution = proto.DefaultKeyDilution + } + + part.Voting.DeleteBeforeFineGrained(basics.OneTimeIDForRound(current, keyDilution), keyDilution) + + errorCh := make(chan error, 1) + deleteOldKeys := func(encodedVotingSecrets []byte) { + errorCh <- part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error { + _, err := tx.Exec("UPDATE ParticipationAccount SET voting=?", encodedVotingSecrets) + if err != nil { + return fmt.Errorf("Participation.DeleteOldKeys: failed to update account: %v", err) + } + return nil + }) + close(errorCh) + } + voting := part.Voting.Snapshot() + encodedVotingSecrets := protocol.Encode(&voting) + go deleteOldKeys(encodedVotingSecrets) + return errorCh +} + +// PersistNewParent writes a new parent address to the partkey database. +func (part PersistedParticipation) PersistNewParent() error { + return part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error { + _, err := tx.Exec("UPDATE ParticipationAccount SET parent=?", part.Parent[:]) + return err + }) +} + // FillDBWithParticipationKeys initializes the passed database with participation keys -func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part Participation, err error) { +func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firstValid, lastValid basics.Round, keyDilution uint64) (part PersistedParticipation, err error) { if lastValid < firstValid { err = fmt.Errorf("FillDBWithParticipationKeys: lastValid %d is after firstValid %d", lastValid, firstValid) return @@ -175,14 +180,16 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs vrf := crypto.GenerateVRFSecrets() // Construct the Participation containing these keys to be persisted - part = Participation{ - Parent: address, - VRF: vrf, - Voting: v, - FirstValid: firstValid, - LastValid: lastValid, - KeyDilution: keyDilution, - Store: store, + part = PersistedParticipation{ + Participation: Participation{ + Parent: address, + VRF: vrf, + Voting: v, + FirstValid: firstValid, + LastValid: lastValid, + KeyDilution: keyDilution, + }, + Store: store, } // Persist the Participation into the database @@ -191,7 +198,7 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs } // Persist writes a Participation out to a database on the disk -func (part Participation) Persist() error { +func (part PersistedParticipation) Persist() error { rawVRF := protocol.Encode(part.VRF) voting := part.Voting.Snapshot() rawVoting := protocol.Encode(&voting) @@ -220,6 +227,6 @@ func Migrate(partDB db.Accessor) error { } // Close closes the underlying database handle. -func (part Participation) Close() { +func (part PersistedParticipation) Close() { part.Store.Close() } diff --git a/data/account/participation_test.go b/data/account/participation_test.go index 134d3bc08e..d80155d848 100644 --- a/data/account/participation_test.go +++ b/data/account/participation_test.go @@ -19,11 +19,13 @@ package account import ( "context" "database/sql" + "os" "testing" "github.com/stretchr/testify/require" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" @@ -50,6 +52,9 @@ func TestParticipation_NewDB(t *testing.T) { versions, err := getSchemaVersions(partDB) a.NoError(err) a.Equal(versions[PartTableSchemaName], PartTableSchemaVersion) + + partDB.Close() + rootDB.Close() } func getSchemaVersions(db db.Accessor) (versions map[string]int, err error) { @@ -113,3 +118,29 @@ func TestOverlapsInterval(t *testing.T) { a.True(interval.OverlapsInterval(end, end)) a.True(interval.OverlapsInterval(end, after)) } + +func BenchmarkOldKeysDeletion(b *testing.B) { + a := require.New(b) + + var rootAddr basics.Address + crypto.RandBytes(rootAddr[:]) + + partDB, err := db.MakeErasableAccessor(b.Name() + "_part") + a.NoError(err) + a.NotNil(partDB) + defer func() { + os.Remove(b.Name() + "_part") + }() + + part, err := FillDBWithParticipationKeys(partDB, rootAddr, 0, 3000000, config.Consensus[protocol.ConsensusCurrentVersion].DefaultKeyDilution) + a.NoError(err) + a.NotNil(part) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + errCh := part.DeleteOldKeys(basics.Round(i), config.Consensus[protocol.ConsensusCurrentVersion]) + err := <-errCh + a.NoError(err) + } + part.Close() +} diff --git a/data/accountManager.go b/data/accountManager.go index ba39094cbf..bdde332467 100644 --- a/data/accountManager.go +++ b/data/accountManager.go @@ -34,7 +34,7 @@ import ( type AccountManager struct { mu deadlock.Mutex - partIntervals map[account.ParticipationInterval]account.Participation + partIntervals map[account.ParticipationInterval]account.PersistedParticipation // Map to keep track of accounts for which we've sent // AccountRegistered telemetry events @@ -47,7 +47,7 @@ type AccountManager struct { func MakeAccountManager(log logging.Logger) *AccountManager { manager := &AccountManager{} manager.log = log - manager.partIntervals = make(map[account.ParticipationInterval]account.Participation) + manager.partIntervals = make(map[account.ParticipationInterval]account.PersistedParticipation) manager.registeredAccounts = make(map[string]bool) return manager @@ -59,7 +59,7 @@ func (manager *AccountManager) Keys() (out []account.Participation) { defer manager.mu.Unlock() for _, part := range manager.partIntervals { - out = append(out, part) + out = append(out, part.Participation) } return out } @@ -81,7 +81,7 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool { // AddParticipation adds a new account.Participation to be managed. // The return value indicates if the key has been added (true) or // if this is a duplicate key (false). -func (manager *AccountManager) AddParticipation(participation account.Participation) bool { +func (manager *AccountManager) AddParticipation(participation account.PersistedParticipation) bool { manager.mu.Lock() defer manager.mu.Unlock() diff --git a/data/common_test.go b/data/common_test.go index 9df9ac914d..5390c1b5af 100644 --- a/data/common_test.go +++ b/data/common_test.go @@ -49,7 +49,7 @@ func keypair() *crypto.SignatureSecrets { return s } -func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*Ledger, []account.Root, []account.Participation, []transactions.SignedTxn, func()) { +func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*Ledger, []account.Root, []account.PersistedParticipation, []transactions.SignedTxn, func()) { P := numAccounts // n accounts TXs := numTxs // n txns maxMoneyAtStart := 1000000 // max money start @@ -71,7 +71,7 @@ func testingenv(t testing.TB, numAccounts, numTxs int, offlineAccounts bool) (*L genesis := make(map[basics.Address]basics.AccountData) gen := rand.New(rand.NewSource(2)) roots := make([]account.Root, P) - parts := make([]account.Participation, P) + parts := make([]account.PersistedParticipation, P) for i := 0; i < P; i++ { access, err := db.MakeAccessor(t.Name()+"_root_testingenv"+strconv.Itoa(i), false, true) if err != nil { diff --git a/gen/generate.go b/gen/generate.go index b8eefebcba..4c14f61f9a 100644 --- a/gen/generate.go +++ b/gen/generate.go @@ -189,7 +189,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion, return } var root account.Root - var part account.Participation + var part account.PersistedParticipation wfilename := filepath.Join(outDir, config.RootKeyFilename(wallet.Name)) pfilename := filepath.Join(outDir, config.PartKeyFilename(wallet.Name, firstWalletValid, lastWalletValid)) @@ -397,7 +397,7 @@ func loadRootKey(filename string) (root account.Root, rootDB db.Accessor, err er } // If err != nil, partDB needs to be closed. -func loadPartKeys(filename string) (part account.Participation, partDB db.Accessor, err error) { +func loadPartKeys(filename string) (part account.PersistedParticipation, partDB db.Accessor, err error) { if !util.FileExists(filename) { err = os.ErrNotExist return diff --git a/libgoal/participation.go b/libgoal/participation.go index bb754f0f54..c95d4c3c6a 100644 --- a/libgoal/participation.go +++ b/libgoal/participation.go @@ -47,7 +47,7 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round) // This lambda will be used for finding the desired file. checkIfFileIsDesiredKey := func(file os.FileInfo, expiresAfter basics.Round) (part account.Participation, err error) { var handle db.Accessor - var partCandidate account.Participation + var partCandidate account.PersistedParticipation // If it can't be a participation key database, skip it if !config.IsPartKeyFilename(file.Name()) { @@ -62,14 +62,15 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round) // Couldn't open it, skip it return } - defer handle.Close() // Fetch an account.Participation from the database partCandidate, err = account.RestoreParticipation(handle) if err != nil { // Couldn't read it, skip it + handle.Close() return } + defer partCandidate.Close() // Return the Participation valid for this round that relates to the passed address // that expires farthest in the future. @@ -77,7 +78,7 @@ func (c *Client) chooseParticipation(address basics.Address, round basics.Round) // in the short-term. // In the future we should allow the user to specify exactly which partkeys to register. if partCandidate.FirstValid <= round && round <= partCandidate.LastValid && partCandidate.Parent == address && partCandidate.LastValid > expiresAfter { - part = partCandidate + part = partCandidate.Participation } return } @@ -164,7 +165,9 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k // Fill the database with new participation keys newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution) - return newPart, partKeyPath, err + part = newPart.Participation + newPart.Close() + return part, partKeyPath, err } // InstallParticipationKeys creates a .partkey database for a given address, @@ -234,8 +237,9 @@ func (c *Client) InstallParticipationKeys(inputfile string) (part account.Partic return } os.Remove(inputfile) - - return newpartkey, newdbpath, nil + part = newpartkey.Participation + newpartkey.Close() + return part, newdbpath, nil } // ListParticipationKeys returns the available participation keys, @@ -271,13 +275,14 @@ func (c *Client) ListParticipationKeys() (partKeyFiles map[string]account.Partic // Fetch an account.Participation from the database part, err := account.RestoreParticipation(handle) - handle.Close() if err != nil { // Couldn't read it, skip it + handle.Close() continue } - partKeyFiles[filename] = part + partKeyFiles[filename] = part.Participation + part.Close() } return diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go index 7adb8cd26e..3f390e61cf 100644 --- a/test/framework/fixtures/libgoalFixture.go +++ b/test/framework/fixtures/libgoalFixture.go @@ -186,13 +186,17 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) { if err != nil { // Couldn't read it, skip it err = nil + handle.Close() continue } // Early reject partkeys if we already have a rootkey for the account if !accountsWithRootKeys[participation.Address().String()] { - allPartKeys = append(allPartKeys, participation) + allPartKeys = append(allPartKeys, participation.Participation) } + + // close the database handle. + participation.Close() } } diff --git a/util/db/perf_test.go b/util/db/perf_test.go index 06deecf012..eb25bbcbf7 100644 --- a/util/db/perf_test.go +++ b/util/db/perf_test.go @@ -65,6 +65,7 @@ func BenchmarkSQLErasableWrites(b *testing.B) { wdb, err := MakeErasableAccessor(fn) require.NoError(b, err) + defer wdb.Close() logging.Base().SetLevel(logging.Error) From ad3deea0de6d4296aca310d7590b65b81b65cfa5 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Wed, 14 Apr 2021 17:17:06 -0400 Subject: [PATCH 173/215] Bandwidth watch (#2039) Expand test/heapwatch/heapWatch.py to also gather /metrics and block headers. Add a reporting script to measure differences across metrics and blocks. --- test/heapwatch/bwstart.sh | 44 ++++++++++ test/heapwatch/heapWatch.py | 77 +++++++++++++++--- test/heapwatch/metrics_delta.py | 140 ++++++++++++++++++++++++++++++++ test/heapwatch/start.sh | 3 +- test/heapwatch/stop.sh | 6 +- 5 files changed, 257 insertions(+), 13 deletions(-) create mode 100644 test/heapwatch/bwstart.sh create mode 100644 test/heapwatch/metrics_delta.py diff --git a/test/heapwatch/bwstart.sh b/test/heapwatch/bwstart.sh new file mode 100644 index 0000000000..3770136f70 --- /dev/null +++ b/test/heapwatch/bwstart.sh @@ -0,0 +1,44 @@ +#!/bin/bash +# +# Run a local 3-relay 8-leaf-node test. +# Run 40 TPS of payment txns through it. +# Record metrics for bandwidth analysis. + +set -e +set -o pipefail +set -x +export SHELLOPTS + +TESTROOT=$1 +if [ -z "${TESTROOT}" ]; then + TESTROOT=/tmp/heap_testnetwork +fi + +mkdir -p "${TESTROOT}" + +netgoal generate --nodes 8 --relays 3 -r "${TESTROOT}" -o "${TESTROOT}"/netgoal.json --template goalnet -w 15 + +TESTDIR="${TESTROOT}"/net + +REPO_ROOT="$( cd "$(dirname "$0")" ; pwd -P )"/../.. + +goal network create -r "${TESTDIR}" -t "${TESTROOT}"/netgoal.json -n r3n8 + +goal network start -r "${TESTDIR}" + +# give all the algod a moment... +sleep 2 + +mkdir -p "${TESTDIR}/heaps" +python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --no-heap --metrics --blockinfo --period 90 "${TESTDIR}"/{node,relay}* > "${TESTDIR}/heaps/watch.log" 2>&1 & + +echo "$!" > .heapWatch.pid + +# TODO: other pingpong modes +pingpong run -d "${TESTDIR}/node1" --tps 20 --rest 0 --run 0 & + +echo "$!" > .pingpong1.pid + +pingpong run -d "${TESTDIR}/node2" --tps 20 --rest 0 --run 0 & + +echo "$!" > .pingpong2.pid diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py index 24bbf2ff5c..d41bf85289 100644 --- a/test/heapwatch/heapWatch.py +++ b/test/heapwatch/heapWatch.py @@ -7,6 +7,7 @@ # python3 test/scripts/heapWatch.py -o /tmp/heaps --period 60s private_network_root/* import argparse +import json import logging import os import signal @@ -15,6 +16,11 @@ import time import urllib.request +# pip install py-algorand-sdk +import algosdk +import algosdk.v2client +import algosdk.v2client.algod + logger = logging.getLogger(__name__) @@ -69,6 +75,7 @@ def __init__(self, path): self.admin_token = admin_token self.headers = {} self._pid = None + self._algod = None def pid(self): if self._pid is None: @@ -76,6 +83,14 @@ def pid(self): self._pid = int(fin.read()) return self._pid + def algod(self): + if self._algod is None: + net = self.net + if not net.startswith('http'): + net = 'http://' + net + self._algod = algosdk.v2client.algod.AlgodClient(self.token, net, self.headers) + return self._algod + def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None): url = 'http://' + self.net + '/urlAuth/' + self.admin_token + '/debug/pprof/' + name response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers)) @@ -97,6 +112,31 @@ def get_heap_snapshot(self, snapshot_name=None, outdir=None): def get_goroutine_snapshot(self, snapshot_name=None, outdir=None): return self.get_pprof_snapshot('goroutine', snapshot_name, outdir) + def get_metrics(self, snapshot_name=None, outdir=None): + url = 'http://' + self.net + '/metrics' + response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers)) + if response.code != 200: + logger.error('could not fetch %s from %s via %r', name, self.path. url) + return + blob = response.read() + outpath = os.path.join(outdir or '.', self.nick + '.' + snapshot_name + '.metrics') + with open(outpath, 'wb') as fout: + fout.write(blob) + logger.debug('%s -> %s', self.nick, outpath) + + def get_blockinfo(self, snapshot_name=None, outdir=None): + algod = self.algod() + status = algod.status() + bi = algod.block_info(status['last-round']) + if snapshot_name is None: + snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime()) + outpath = os.path.join(outdir or '.', self.nick + '.' + snapshot_name + '.blockinfo.json') + bi['block'].pop('txns', None) + with open(outpath, 'wt') as fout: + json.dump(bi, fout) + return bi + #txncount = bi['block']['tc'] + def psHeap(self): # return rss, vsz # ps -o rss,vsz $(cat ${ALGORAND_DATA}/algod.pid) @@ -118,13 +158,17 @@ def __init__(self, args): self.prevsnapshots = {} self.they = [] for path in args.data_dirs: + if not os.path.isdir(path): + continue if os.path.exists(os.path.join(path, 'algod.net')): try: ad = algodDir(path) - logger.debug('found "%s" at %r', ad.nick, ad.path) self.they.append(ad) except: logger.error('bad algod: %r', path, exc_info=True) + else: + logger.debug('not a datadir: %r', path) + logger.debug('data dirs: %r', self.they) def do_snap(self, now): snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime(now)) @@ -132,19 +176,26 @@ def do_snap(self, now): logger.debug('begin snapshot %s', snapshot_name) psheaps = {} newsnapshots = {} - for ad in self.they: - snappath = ad.get_heap_snapshot(snapshot_name, outdir=self.args.out) - newsnapshots[ad.path] = snappath - rss, vsz = ad.psHeap() - if rss and vsz: - psheaps[ad.nick] = (rss, vsz) - for nick, rssvsz in psheaps.items(): - rss, vsz = rssvsz - with open(os.path.join(self.args.out, nick + '.heap.csv'), 'at') as fout: - fout.write('{},{},{},{}\n'.format(snapshot_name,snapshot_isotime,rss, vsz)) + if self.args.heaps: + for ad in self.they: + snappath = ad.get_heap_snapshot(snapshot_name, outdir=self.args.out) + newsnapshots[ad.path] = snappath + rss, vsz = ad.psHeap() + if rss and vsz: + psheaps[ad.nick] = (rss, vsz) + for nick, rssvsz in psheaps.items(): + rss, vsz = rssvsz + with open(os.path.join(self.args.out, nick + '.heap.csv'), 'at') as fout: + fout.write('{},{},{},{}\n'.format(snapshot_name,snapshot_isotime,rss, vsz)) if self.args.goroutine: for ad in self.they: ad.get_goroutine_snapshot(snapshot_name, outdir=self.args.out) + if self.args.metrics: + for ad in self.they: + ad.get_metrics(snapshot_name, outdir=self.args.out) + if self.args.blockinfo: + for ad in self.they: + ad.get_blockinfo(snapshot_name, outdir=self.args.out) logger.debug('snapped, processing...') # make absolute and differential plots for path, snappath in newsnapshots.items(): @@ -159,7 +210,10 @@ def do_snap(self, now): def main(): ap = argparse.ArgumentParser() ap.add_argument('data_dirs', nargs='*', help='list paths to algorand datadirs to grab heap profile from') + ap.add_argument('--no-heap', dest='heaps', default=True, action='store_false', help='disable heap snapshot capture') ap.add_argument('--goroutine', default=False, action='store_true', help='also capture goroutine profile') + ap.add_argument('--metrics', default=False, action='store_true', help='also capture /metrics counts') + ap.add_argument('--blockinfo', default=False, action='store_true', help='also capture block header info') ap.add_argument('--period', default=None, help='seconds between automatically capturing') ap.add_argument('-o', '--out', default=None, help='directory to write to') ap.add_argument('--verbose', default=False, action='store_true') @@ -203,6 +257,7 @@ def main(): periodi += 1 nextt += periodSecs app.do_snap(now) + return 0 if __name__ == '__main__': sys.exit(main()) diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py new file mode 100644 index 0000000000..7b17234390 --- /dev/null +++ b/test/heapwatch/metrics_delta.py @@ -0,0 +1,140 @@ +#!/usr/bin/env python3 + +import argparse +import contextlib +import csv +import gzip +import logging +import json +import os +import sys +import time + +logger = logging.getLogger(__name__) + +def num(x): + if '.' in x: + return float(x) + return int(x) + +def parse_metrics(fin): + out = dict() + for line in fin: + if not line: + continue + line = line.strip() + if not line: + continue + if line[0] == '#': + continue + ab = line.split() + out[ab[0]] = num(ab[1]) + return out + +# return b-a +def metrics_delta(a,b): + old_unseen = set(a.keys()) + d = dict() + for k,bv in b.items(): + if k in a: + av = a.get(k, 0) + d[k] = bv-av + old_unseen.remove(k) + else: + d[k] = bv + for k in old_unseen: + d[k] = 0-a[k] + return d + +# slightly smarter open, stdout for '-', auto .gz +def sopen(path, mode): + if path == '-': + return contextlib.nullcontext(sys.stdout) + if path.endswith('.gz'): + return contextlib.closing(gzip.open(path, mode)) + return open(path, mode) + +def main(): + ap = argparse.ArgumentParser() + ap.add_argument('metrics_files', nargs='*') + ap.add_argument('--deltas', default=None, help='path to write csv deltas') + ap.add_argument('--report', default=None, help='path to write csv report') + ap.add_argument('--verbose', default=False, action='store_true') + args = ap.parse_args() + + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + else: + logging.basicConfig(level=logging.INFO) + + reportf = None + writer = None + if args.report: + if args.report == '-': + writer = csv.writer(sys.stdout) + else: + reportf = open(args.report, 'wt') + writer = csv.writer(reportf) + writer.writerow(('when', 'tx bytes/s', 'rx bytes/s','TPS', 's/block')) + prev = None + prevtime = None + prevPath = None + prevbi = None + + deltas = [] + for path in sorted(args.metrics_files): + with open(path, 'rt') as fin: + cur = parse_metrics(fin) + bijsonpath = path.replace('.metrics', '.blockinfo.json') + bi = None + if os.path.exists(bijsonpath): + with open(bijsonpath, 'rt') as fin: + bi = json.load(fin) + curtime = os.path.getmtime(path) + logger.debug('%s: %r', path, cur) + if prev is not None: + d = metrics_delta(prev, cur) + dt = curtime - prevtime + #print("{} ->\n{}".format(prevPath, path)) + #print(json.dumps(d, indent=2, sort_keys=True)) + deltas.append((curtime, d)) + tps = None + blocktime = None + if bi and prevbi: + tps = (bi.get('block',{}).get('tc', 0) - prevbi.get('block',{}).get('tc', 0)) / dt + rounds = (bi.get('block',{}).get('rnd', 0) - prevbi.get('block',{}).get('rnd', 0)) + if rounds != 0: + blocktime = dt/rounds + if writer: + txBytesPerSec = d.get('algod_network_sent_bytes_total{}',0) / dt + rxBytesPerSec = d.get('algod_network_received_bytes_total{}',0) /dt + writer.writerow(( + time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(curtime)), + txBytesPerSec, + rxBytesPerSec, + tps, + blocktime, + )) + prev = cur + prevPath = path + prevtime = curtime + prevbi = bi + if reportf: + reportf.close() + if deltas and args.deltas: + keys = set() + for ct, d in deltas: + keys.update(set(d.keys())) + keys = sorted(keys) + with sopen(args.deltas, 'wt') as fout: + writer = csv.writer(fout) + writer.writerow(['when'] + keys) + for ct, d in deltas: + row = [time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(ct))] + for k in keys: + row.append(d.get(k, None)) + writer.writerow(row) + return 0 + +if __name__ == '__main__': + sys.exit(main()) diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh index ee04ed853a..cb4b37eca5 100755 --- a/test/heapwatch/start.sh +++ b/test/heapwatch/start.sh @@ -20,10 +20,11 @@ goal network start -r "${TESTDIR}" sleep 2 mkdir -p "${TESTDIR}/heaps" -python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --period 10m "${TESTDIR}/"* & +python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --period 10m --metrics --blockinfo "${TESTDIR}/"* & echo "$!" > .heapWatch.pid +# TODO: other pingpong modes pingpong run -d "${TESTDIR}/Node1" --tps 10 --rest 0 --run 0 --nftasapersecond 200 & echo "$!" > .pingpong1.pid diff --git a/test/heapwatch/stop.sh b/test/heapwatch/stop.sh index 146d8adcca..b5066a89a4 100755 --- a/test/heapwatch/stop.sh +++ b/test/heapwatch/stop.sh @@ -1,6 +1,6 @@ #!/bin/bash # -# clean up what was started by heapstart.sh +# clean up what was started by start.sh or bwstart.sh set -e set -o pipefail @@ -20,6 +20,10 @@ TESTDIR=$1 if [ -z "${TESTDIR}" ]; then TESTDIR=/tmp/heap_testnetwork fi +if [ -d "${TESTDIR}/net" ]; then + # started with bwstart.sh + TESTDIR="${TESTDIR}/net" +fi goal network stop -r "${TESTDIR}" From ef321d83e2741e48b153d911fe0af9a38f44af5f Mon Sep 17 00:00:00 2001 From: algonautshant <55754073+algonautshant@users.noreply.github.com> Date: Wed, 14 Apr 2021 23:02:56 -0400 Subject: [PATCH 174/215] Fine tune catchup peer selection logic (#2060) The peer selector has the following goals: 1. Block malicious hosts that return invalid blocks 2. Pick hosts that perform better than others 3. Adapt to changes to the performance of hosts 4. Avoid hosts that download fails The current implementation of the peer selector meets all these goals, but it is not flexible to random failures. - It will rank the peers by the first download time. When a good performing peer returns a bad value in the first download, it will have a low rank and may never be selected. - When the download fails from a good performing peer, that peer may never be considered again (until it runs out of all other peers) - The top performing peer is selected all the time, until it's performance degrades. It will never consider other peers as long as the top peer's rank is better than others obtained in the beginning. These situations make a big difference when using archival buckets. When switching from a peer in the same region to a peer in a different region, the performance difference is huge. If a local peer fails occasionally, the peer selector needs to be smart enough to evaluate it based on it's overall performance, and not just the most recent performance. This change enhances the peer selector so that it implements the above 4 points, and is resilient to occasional performance changes: - It established the initial ranks with multiple download value points - It penalizes a peer for frequently being selected, to also consider other peers - A single download failure will be forgiven if the peer is otherwise performing well These flexibility is implemented with: - Historical ranks. A moving window of the last 100 ranks is stored, and the new ranks is calculates by taking the average of these values. - Frequency penalty. When a peer is frequently selected, it will be penalized for that. When a peer is not selected, then the penalty will be reduced. This is done by storing a window of its selection frequency, and the penalty is higher (exponential) if the peer is continuously selected. The penalty will quickly get high enough to favor the next best ranking peer. This way, the selection will be diversified. --- catchup/peerSelector.go | 254 ++++++++++++++++++++++++++++++++--- catchup/peerSelector_test.go | 220 +++++++++++++++++++++++++++++- catchup/service.go | 3 +- catchup/universalFetcher.go | 2 +- ledger/ledger.go | 1 + 5 files changed, 456 insertions(+), 24 deletions(-) diff --git a/catchup/peerSelector.go b/catchup/peerSelector.go index ef5004f637..a3b09e00e5 100644 --- a/catchup/peerSelector.go +++ b/catchup/peerSelector.go @@ -18,6 +18,7 @@ package catchup import ( "errors" + "math" "sort" "time" @@ -57,6 +58,9 @@ const ( // then mapped into the a ranking range. lowBlockDownloadThreshold = 50 * time.Millisecond highBlockDownloadThreshold = 8 * time.Second + + // Is the lookback window size of peer usage statistics + peerHistoryWindowSize = 100 ) var errPeerSelectorNoPeerPoolsAvailable = errors.New("no peer pools available") @@ -79,8 +83,9 @@ type peersRetriever interface { // peerPoolEntry represents a single peer entry in the pool. It contains // the underlying network peer as well as the peer class. type peerPoolEntry struct { - peer network.Peer - class peerClass + peer network.Peer + class peerClass + history *historicStats } // peerPool is a single pool of peers that shares the same rank. @@ -98,6 +103,151 @@ type peerSelector struct { net peersRetriever peerClasses []peerClass pools []peerPool + counter uint64 +} + +// historicStats stores the past windowSize ranks for the peer passed +// into RankPeer (i.e. no averaging or penalty). The purpose of this +// structure is to compute the rank based on the performance of the +// peer in the past, and to be forgiving of occasional performance +// variations which may not be representative of the peer's overall +// performance. It also stores the penalty history in the form or peer +// selection gaps. +type historicStats struct { + windowSize int + rankSamples []int + rankSum uint64 + requestGaps []uint64 + gapSum float64 + counter uint64 +} + +func makeHistoricStatus(windowSize int) *historicStats { + // Initialize the window (rankSamples) with zeros. + // This way, every peer will slowly build up its profile. + // Otherwise, if the best peer gets a bad download the first time, + // that will determine the rank of the peer. + hs := historicStats{ + windowSize: windowSize, + rankSamples: make([]int, windowSize, windowSize), + requestGaps: make([]uint64, 0, windowSize), + rankSum: 0, + gapSum: 0.0} + return &hs +} + +// computerPenalty is the formula (exponential) used to calculate the +// penalty from the sum of gaps. +func (hs *historicStats) computerPenalty() float64 { + return 1 + (math.Exp(hs.gapSum/10.0) / 1000) +} + +// updateRequestPenalty is given a counter, which is the most recent +// counter for ranking a peer. It calculates newGap, which is the +// number of counter ticks since it last was updated (i.e. last ranked +// after being selected). The number of gaps stored is bounded by the +// windowSize. Calculages and returns the new penalty. +func (hs *historicStats) updateRequestPenalty(counter uint64) float64 { + newGap := counter - hs.counter + hs.counter = counter + + if len(hs.requestGaps) == hs.windowSize { + hs.gapSum -= 1.0 / float64(hs.requestGaps[0]) + hs.requestGaps = hs.requestGaps[1:] + } + + hs.requestGaps = append(hs.requestGaps, newGap) + hs.gapSum += 1.0 / float64(newGap) + + return hs.computerPenalty() +} + +// resetRequestPenalty removes steps least recent gaps and recomputes the new penalty. +// Returns the new rank calculated with the new penalty. +// If steps it 0, it is a full reset i.e. drops or gap values. +func (hs *historicStats) resetRequestPenalty(steps int, initialRank int, class peerClass) (rank int) { + if len(hs.requestGaps) == 0 { + return initialRank + } + // resetRequestPenalty cannot move the peer to a better class if the peer was moved + // to a lower class (e.g. failed downloads or invalid downloads) + if upperBound(class) < initialRank { + return initialRank + } + // if setps is 0, it is a full reset + if steps == 0 { + hs.requestGaps = make([]uint64, 0, hs.windowSize) + hs.gapSum = 0.0 + return int(float64(hs.rankSum) / float64(len(hs.rankSamples))) + } + + if steps > len(hs.requestGaps) { + steps = len(hs.requestGaps) + } + for s := 0; s < steps; s++ { + hs.gapSum -= 1.0 / float64(hs.requestGaps[s]) + } + hs.requestGaps = hs.requestGaps[steps:] + return int(hs.computerPenalty() * (float64(hs.rankSum) / float64(len(hs.rankSamples)))) +} + +// push pushes a new rank to the historicStats, and returns the new +// rank based on the average of ranks in the windowSize window and the +// penlaty. +func (hs *historicStats) push(value int, counter uint64, class peerClass) (averagedRank int) { + + // This is the lowest ranking class, and is not subject to giving another chance. + // Do not modify this value with historical data. + if value == peerRankInvalidDownload { + return value + } + + // This is a moving window. Remore the least recent value once the window is full + if len(hs.rankSamples) == hs.windowSize { + hs.rankSum -= uint64(hs.rankSamples[0]) + hs.rankSamples = hs.rankSamples[1:] + } + + initialRank := value + + // Download may fail for various reasons. Give it additional tries + // and see if it recovers/improves. + if value == peerRankDownloadFailed { + // Set the rank to 10 + the class upper bound, to evict + // the peer from the class if it is repeatedly + // failing. This is to make sure to switch to the next + // class when all peers in this class are failing. + // Here, +10 is added. This is of little consequence, and the + // purpose is to avoid rounding errors. + value = upperBound(class) + 10 + } + + hs.rankSamples = append(hs.rankSamples, value) + hs.rankSum += uint64(value) + + // The average performance of the peer + average := float64(hs.rankSum) / float64(len(hs.rankSamples)) + + if int(average) > upperBound(class) && initialRank == peerRankDownloadFailed { + // peerRankDownloadFailed will be delayed, to give the peer + // additional time to improve. If does not improve over time, + // the average will exceed the class limit. At this point, + // it will be pushed down to download failed class. + return peerRankDownloadFailed + } + + // A penalty is added relative to how freequently the peer is used + penalty := hs.updateRequestPenalty(counter) + + // The rank based on the performance and the freequency + avgWithPenalty := int(penalty * average) + + // Keep the peer in the same class. The value passed will be + // within bounds (unless it is downloadFailed or + // invalidDownload), but the penalty may push it over. Prevent + // the penalty pushing it off the class bounds. + bounded := boundRankByClass(avgWithPenalty, class) + return bounded } // makePeerSelector creates a peerSelector, given a peersRetriever and peerClass array. @@ -132,23 +282,29 @@ func (ps *peerSelector) GetNextPeer() (peer network.Peer, err error) { } // RankPeer ranks a given peer. -// return true if the value was updated or false otherwise. -func (ps *peerSelector) RankPeer(peer network.Peer, rank int) bool { +// return the old value and the new updated value. +// updated value could be different from the input rank. +func (ps *peerSelector) RankPeer(peer network.Peer, rank int) (int, int) { if peer == nil { - return false + return -1, -1 } ps.mu.Lock() defer ps.mu.Unlock() poolIdx, peerIdx := ps.findPeer(peer) if poolIdx < 0 || peerIdx < 0 { - return false + return -1, -1 } + sortNeeded := false // we need to remove the peer from the pool so we can place it in a different location. pool := ps.pools[poolIdx] + ps.counter++ + initialRank := pool.rank + rank = pool.peers[peerIdx].history.push(rank, ps.counter, pool.peers[peerIdx].class) if pool.rank != rank { class := pool.peers[peerIdx].class + peerHistory := pool.peers[peerIdx].history if len(pool.peers) > 1 { pool.peers = append(pool.peers[:peerIdx], pool.peers[peerIdx+1:]...) ps.pools[poolIdx] = pool @@ -157,13 +313,41 @@ func (ps *peerSelector) RankPeer(peer network.Peer, rank int) bool { ps.pools = append(ps.pools[:poolIdx], ps.pools[poolIdx+1:]...) } - sortNeeded := ps.addToPool(peer, rank, class) - if sortNeeded { - ps.sort() - } + sortNeeded = ps.addToPool(peer, rank, class, peerHistory) } - return true + // Update the ranks of the peers by reducing the penalty for not beeing selected + for pl := len(ps.pools) - 1; pl >= 0; pl-- { + pool := ps.pools[pl] + for pr := len(pool.peers) - 1; pr >= 0; pr-- { + localPeer := pool.peers[pr] + if pool.peers[pr].peer == peer { + continue + } + // make the removal of penalty at a faster rate than adding it, so that the + // performance of the peer dominates in the evaluation over the freequency. + // Otherwise, the peer selection will oscillate between the good performing and + // a bad performing peers when sufficient penalty is accumulated to the good peer. + newRank := localPeer.history.resetRequestPenalty(5, pool.rank, pool.peers[pr].class) + if newRank != pool.rank { + upeer := pool.peers[pr].peer + class := pool.peers[pr].class + peerHistory := pool.peers[pr].history + if len(pool.peers) > 1 { + pool.peers = append(pool.peers[:pr], pool.peers[pr+1:]...) + ps.pools[pl] = pool + } else { + // the last peer was removed from the pool; delete this pool. + ps.pools = append(ps.pools[:pl], ps.pools[pl+1:]...) + } + sortNeeded = ps.addToPool(upeer, newRank, class, peerHistory) || sortNeeded + } + } + } + if sortNeeded { + ps.sort() + } + return initialRank, rank } // PeerDownloadDurationToRank calculates the rank for a peer given a peer and the block download time. @@ -184,24 +368,22 @@ func (ps *peerSelector) PeerDownloadDurationToRank(peer network.Peer, blockDownl return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank2LowBlockTime, peerRank2HighBlockTime) default: // i.e. peerRankInitialFourthPriority return downloadDurationToRank(blockDownloadDuration, lowBlockDownloadThreshold, highBlockDownloadThreshold, peerRank3LowBlockTime, peerRank3HighBlockTime) - - } } // addToPool adds a given peer to the correct group. If no group exists for that peer's rank, // a new group is created. // The method return true if a new group was created ( suggesting that the pools list would need to be re-ordered ), or false otherwise. -func (ps *peerSelector) addToPool(peer network.Peer, rank int, class peerClass) bool { +func (ps *peerSelector) addToPool(peer network.Peer, rank int, class peerClass, peerHistory *historicStats) bool { // see if we already have a list with that rank: for i, pool := range ps.pools { if pool.rank == rank { // we found an existing group, add this peer to the list. - ps.pools[i].peers = append(pool.peers, peerPoolEntry{peer: peer, class: class}) + ps.pools[i].peers = append(pool.peers, peerPoolEntry{peer: peer, class: class, history: peerHistory}) return false } } - ps.pools = append(ps.pools, peerPool{rank: rank, peers: []peerPoolEntry{{peer: peer, class: class}}}) + ps.pools = append(ps.pools, peerPool{rank: rank, peers: []peerPoolEntry{{peer: peer, class: class, history: peerHistory}}}) return true } @@ -247,8 +429,8 @@ func (ps *peerSelector) refreshAvailablePeers() { delete(existingPeers, peerAddress) continue } - // it's an entry which we did not had before. - sortNeeded = ps.addToPool(peer, initClass.initialRank, initClass) || sortNeeded + // it's an entry which we did not have before. + sortNeeded = ps.addToPool(peer, initClass.initialRank, initClass, makeHistoricStatus(peerHistoryWindowSize)) || sortNeeded } } @@ -305,3 +487,39 @@ func downloadDurationToRank(downloadDuration, minDownloadDuration, maxDownloadDu rank = minRank + int((downloadDuration-minDownloadDuration).Nanoseconds()*int64(maxRank-minRank)/(maxDownloadDuration-minDownloadDuration).Nanoseconds()) return } + +func lowerBound(class peerClass) int { + switch class.initialRank { + case peerRankInitialFirstPriority: + return peerRank0LowBlockTime + case peerRankInitialSecondPriority: + return peerRank1LowBlockTime + case peerRankInitialThirdPriority: + return peerRank2LowBlockTime + default: // i.e. peerRankInitialFourthPriority + return peerRank3LowBlockTime + } +} + +func upperBound(class peerClass) int { + switch class.initialRank { + case peerRankInitialFirstPriority: + return peerRank0HighBlockTime + case peerRankInitialSecondPriority: + return peerRank1HighBlockTime + case peerRankInitialThirdPriority: + return peerRank2HighBlockTime + default: // i.e. peerRankInitialFourthPriority + return peerRank3HighBlockTime + } +} + +func boundRankByClass(rank int, class peerClass) int { + if rank < lowerBound(class) { + return lowerBound(class) + } + if rank > upperBound(class) { + return upperBound(class) + } + return rank +} diff --git a/catchup/peerSelector_test.go b/catchup/peerSelector_test.go index 5432234198..b8f1e57c3f 100644 --- a/catchup/peerSelector_test.go +++ b/catchup/peerSelector_test.go @@ -18,12 +18,14 @@ package catchup import ( "context" + "fmt" "net/http" "testing" "time" "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" ) @@ -129,13 +131,15 @@ func TestPeerSelector(t *testing.T) { // add another peer peers = []network.Peer{&mockHTTPPeer{address: "54321"}, &mockHTTPPeer{address: "abcde"}} - require.True(t, peerSelector.RankPeer(peer, 5)) + r1, r2 := peerSelector.RankPeer(peer, 5) + require.True(t, r1 != r2) peer, err = peerSelector.GetNextPeer() require.NoError(t, err) require.Equal(t, "abcde", peerAddress(peer)) - require.True(t, peerSelector.RankPeer(peer, 10)) + r1, r2 = peerSelector.RankPeer(peer, 10) + require.True(t, r1 != r2) peer, err = peerSelector.GetNextPeer() require.NoError(t, err) @@ -152,8 +156,10 @@ func TestPeerSelector(t *testing.T) { require.Equal(t, errPeerSelectorNoPeerPoolsAvailable, err) require.Nil(t, peer) - require.False(t, peerSelector.RankPeer(nil, 10)) - require.False(t, peerSelector.RankPeer(&mockHTTPPeer{address: "abc123"}, 10)) + r1, r2 = peerSelector.RankPeer(nil, 10) + require.False(t, r1 != r2 ) + r2, r2 = peerSelector.RankPeer(&mockHTTPPeer{address: "abc123"}, 10) + require.False(t, r1 != r2) return } @@ -218,3 +224,209 @@ func TestFindMissingPeer(t *testing.T) { require.Equal(t, -1, poolIdx) require.Equal(t, -1, peerIdx) } + +func TestHistoricData(t *testing.T) { + + peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}} + peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}} + + peerSelector := makePeerSelector( + makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) { + for _, opt := range options { + if opt == network.PeersPhonebookArchivers { + peers = append(peers, peers1...) + } else { + peers = append(peers, peers2...) + } + } + return + }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}}, + ) + + var counters [5]int + for i := 0; i < 1000; i++ { + peer, getPeerErr := peerSelector.GetNextPeer() + + switch peer.(*mockHTTPPeer).address { + case "a1": + counters[0]++ + case "a2": + counters[1]++ + case "a3": + counters[2]++ + case "b1": + counters[3]++ + case "b2": + counters[4]++ + } + + require.NoError(t, getPeerErr) + randVal := float64(crypto.RandUint64()%uint64(100)) / 100 + randVal = randVal + 1 + if randVal < 1.98 { + var duration time.Duration + switch peer.(*mockHTTPPeer).address { + case "a1": + duration = time.Duration(1500 * float64(time.Millisecond) * randVal) + case "a2": + duration = time.Duration(500 * float64(time.Millisecond) * randVal) + case "a3": + duration = time.Duration(100 * float64(time.Millisecond) * randVal) + } + peerRank := peerSelector.PeerDownloadDurationToRank(peer, duration) + peerSelector.RankPeer(peer, peerRank) + } else { + peerSelector.RankPeer(peer, peerRankDownloadFailed) + } + } + + fmt.Printf("a1: %d\n", counters[0]) + fmt.Printf("a2: %d\n", counters[1]) + fmt.Printf("a3: %d\n", counters[2]) + fmt.Printf("b1: %d\n", counters[3]) + fmt.Printf("b2: %d\n", counters[4]) + require.GreaterOrEqual(t, counters[2], counters[1]) + require.GreaterOrEqual(t, counters[2], counters[0]) + require.Equal(t, counters[3], 0) + require.Equal(t, counters[4], 0) +} + +func TestPeersDownloadFailed(t *testing.T) { + + peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}} + peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}} + + peerSelector := makePeerSelector( + makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) { + for _, opt := range options { + if opt == network.PeersPhonebookArchivers { + peers = append(peers, peers1...) + } else { + peers = append(peers, peers2...) + } + } + return + }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}}, + ) + + var counters [5]int + for i := 0; i < 1000; i++ { + peer, getPeerErr := peerSelector.GetNextPeer() + + switch peer.(*mockHTTPPeer).address { + case "a1": + counters[0]++ + case "a2": + counters[1]++ + case "a3": + counters[2]++ + case "b1": + counters[3]++ + case "b2": + counters[4]++ + } + + require.NoError(t, getPeerErr) + + if i < 500 || peerAddress(peer) == "b1" || peerAddress(peer) == "b2" { + randVal := float64(crypto.RandUint64()%uint64(100)) / 100 + randVal = randVal + 1 + if randVal < 1.98 { + duration := time.Duration(100 * float64(time.Millisecond) * randVal) + peerRank := peerSelector.PeerDownloadDurationToRank(peer, duration) + peerSelector.RankPeer(peer, peerRank) + } else { + peerSelector.RankPeer(peer, peerRankDownloadFailed) + } + } else { + peerSelector.RankPeer(peer, peerRankDownloadFailed) + } + } + + fmt.Printf("a1: %d\n", counters[0]) + fmt.Printf("a2: %d\n", counters[1]) + fmt.Printf("a3: %d\n", counters[2]) + fmt.Printf("b1: %d\n", counters[3]) + fmt.Printf("b2: %d\n", counters[4]) + require.GreaterOrEqual(t, counters[3], 20) + require.GreaterOrEqual(t, counters[4], 20) + + b1orb2 := peerAddress(peerSelector.pools[0].peers[0].peer) == "b1" || peerAddress(peerSelector.pools[0].peers[0].peer) == "b2" + require.True(t, b1orb2) + if len(peerSelector.pools) == 2 { + b1orb2 := peerAddress(peerSelector.pools[0].peers[1].peer) == "b1" || peerAddress(peerSelector.pools[0].peers[1].peer) == "b2" + require.True(t, b1orb2) + require.Equal(t, peerSelector.pools[1].rank, 900) + require.Equal(t, len(peerSelector.pools[1].peers), 3) + } else { + b1orb2 := peerAddress(peerSelector.pools[1].peers[0].peer) == "b1" || peerAddress(peerSelector.pools[1].peers[0].peer) == "b2" + require.True(t, b1orb2) + require.Equal(t, peerSelector.pools[2].rank, 900) + require.Equal(t, len(peerSelector.pools[2].peers), 3) + } + +} + +// TestPenalty tests that the penalty is calculated correctly and one peer +// is not dominating all the selection. +func TestPenalty(t *testing.T) { + + peers1 := []network.Peer{&mockHTTPPeer{address: "a1"}, &mockHTTPPeer{address: "a2"}, &mockHTTPPeer{address: "a3"}} + peers2 := []network.Peer{&mockHTTPPeer{address: "b1"}, &mockHTTPPeer{address: "b2"}} + + peerSelector := makePeerSelector( + makePeersRetrieverStub(func(options ...network.PeerOption) (peers []network.Peer) { + for _, opt := range options { + if opt == network.PeersPhonebookArchivers { + peers = append(peers, peers1...) + } else { + peers = append(peers, peers2...) + } + } + return + }), []peerClass{{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersPhonebookArchivers}, + {initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays}}, + ) + + var counters [5]int + for i := 0; i < 1000; i++ { + peer, getPeerErr := peerSelector.GetNextPeer() + switch peer.(*mockHTTPPeer).address { + case "a1": + counters[0]++ + case "a2": + counters[1]++ + case "a3": + counters[2]++ + case "b1": + counters[3]++ + case "b2": + counters[4]++ + } + + require.NoError(t, getPeerErr) + var duration time.Duration + switch peer.(*mockHTTPPeer).address { + case "a1": + duration = time.Duration(1500 * float64(time.Millisecond)) + case "a2": + duration = time.Duration(500 * float64(time.Millisecond)) + case "a3": + duration = time.Duration(100 * float64(time.Millisecond)) + } + peerRank := peerSelector.PeerDownloadDurationToRank(peer, duration) + peerSelector.RankPeer(peer, peerRank) + } + + fmt.Printf("a1: %d\n", counters[0]) + fmt.Printf("a2: %d\n", counters[1]) + fmt.Printf("a3: %d\n", counters[2]) + fmt.Printf("b1: %d\n", counters[3]) + fmt.Printf("b2: %d\n", counters[4]) + require.GreaterOrEqual(t, counters[1], 50) + require.GreaterOrEqual(t, counters[2], 2*counters[1]) + require.Equal(t, counters[3], 0) + require.Equal(t, counters[4], 0) +} diff --git a/catchup/service.go b/catchup/service.go index 0674e8440a..5e364dd17d 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -256,7 +256,8 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, } peerRank := peerSelector.PeerDownloadDurationToRank(peer, blockDownloadDuration) - peerSelector.RankPeer(peer, peerRank) + r1, r2 := peerSelector.RankPeer(peer, peerRank) + s.log.Debugf("fetchAndWrite(%d): ranked peer with %d from %d to %d", r, peerRank, r1, r2) // Write to ledger, noting that ledger writes must be in order select { diff --git a/catchup/universalFetcher.go b/catchup/universalFetcher.go index 9d33d1f20f..6ed00790ab 100644 --- a/catchup/universalFetcher.go +++ b/catchup/universalFetcher.go @@ -86,7 +86,7 @@ func (uf *universalBlockFetcher) fetchBlock(ctx context.Context, round basics.Ro if err != nil { return nil, nil, time.Duration(0), err } - uf.log.Debugf("fetchBlock: downloaded block %d from %s", uint64(round), address) + uf.log.Debugf("fetchBlock: downloaded block %d in %d from %s", uint64(round), downloadDuration, address) return block, cert, downloadDuration, err } diff --git a/ledger/ledger.go b/ledger/ledger.go index 9fc57cbf82..2b0dbf6933 100644 --- a/ledger/ledger.go +++ b/ledger/ledger.go @@ -576,6 +576,7 @@ func (l *Ledger) AddValidatedBlock(vb ValidatedBlock, cert agreement.Certificate } l.headerCache.Put(vb.blk.Round(), vb.blk.BlockHeader) l.trackers.newBlock(vb.blk, vb.delta) + l.log.Debugf("added blk %d", vb.blk.Round()) return nil } From 041e1f92d9c190bdc6d6c78b1dd04ef19b8ec03b Mon Sep 17 00:00:00 2001 From: shiqizng <80276844+shiqizng@users.noreply.github.com> Date: Wed, 14 Apr 2021 23:18:36 -0400 Subject: [PATCH 175/215] netgoal: add pre-generated network support (#2046) This PR creates the database files for a given network. It meant to emulate a mature running network. --- cmd/netgoal/generate.go | 43 + cmd/netgoal/network.go | 11 + cmd/netgoal/recipe.go | 1 + netdeploy/remote/bootstrappedNetwork.go | 45 + netdeploy/remote/bootstrappedNetwork_test.go | 49 + netdeploy/remote/deployedNetwork.go | 483 +++- netdeploy/remote/deployedNetwork_test.go | 121 + .../networks/bootstrapped/badSpec.json | 8 + .../networks/bootstrapped/okSpec.json | 8 + .../recipes/bootstrappedScenario/Makefile | 16 + .../bootstrappedScenario/boostrappedFile.json | 8 + .../bootstrappedScenario/gen_topology.py | 27 + .../recipes/bootstrappedScenario/genesis.json | 1013 +++++++ .../recipes/bootstrappedScenario/net.json | 2564 +++++++++++++++++ .../recipes/bootstrappedScenario/node.json | 22 + .../bootstrappedScenario/nonPartNode.json | 5 + .../recipes/bootstrappedScenario/recipe.json | 8 + .../recipes/bootstrappedScenario/relay.json | 11 + .../bootstrappedScenario/topology.json | 156 + 19 files changed, 4595 insertions(+), 4 deletions(-) create mode 100644 netdeploy/remote/bootstrappedNetwork.go create mode 100644 netdeploy/remote/bootstrappedNetwork_test.go create mode 100644 netdeploy/remote/deployedNetwork_test.go create mode 100644 test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json create mode 100644 test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json create mode 100644 test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json diff --git a/cmd/netgoal/generate.go b/cmd/netgoal/generate.go index 349fbfdf09..25379778c8 100644 --- a/cmd/netgoal/generate.go +++ b/cmd/netgoal/generate.go @@ -44,6 +44,12 @@ var walletsToGenerate int var nodeTemplatePath string var nonParticipatingNodeTemplatePath string var relayTemplatePath string +var sourceWallet string +var rounds uint64 +var roundTxnCount uint64 +var accountsCount uint64 +var assetsCount uint64 +var applicationCount uint64 func init() { rootCmd.AddCommand(generateCmd) @@ -61,6 +67,12 @@ func init() { generateCmd.Flags().StringVarP(&nodeTemplatePath, "node-template", "", "", "json for one node") generateCmd.Flags().StringVarP(&nonParticipatingNodeTemplatePath, "non-participating-node-template", "", "", "json for non participating node") generateCmd.Flags().StringVarP(&relayTemplatePath, "relay-template", "", "", "json for a relay node") + generateCmd.Flags().StringVarP(&sourceWallet, "wallet-name", "", "", "Source wallet name") + generateCmd.Flags().Uint64VarP(&rounds, "rounds", "", 13, "Number of rounds") + generateCmd.Flags().Uint64VarP(&roundTxnCount, "ntxns", "", 17, "Transaction count") + generateCmd.Flags().Uint64VarP(&accountsCount, "naccounts", "", 31, "Account count") + generateCmd.Flags().Uint64VarP(&assetsCount, "nassets", "", 5, "Asset count") + generateCmd.Flags().Uint64VarP(&applicationCount, "napps", "", 7, "Application Count") longParts := make([]string, len(generateTemplateLines)+1) longParts[0] = generateCmd.Long @@ -75,6 +87,7 @@ var generateTemplateLines = []string{ "otwt => OneThousandWallets network template", "otwg => OneThousandWallets genesis data", "ohwg => OneHundredWallets genesis data", + "loadingFile => create accounts database file according to -wallet-name -rounds -ntxns -naccts -nassets -napps options", } var generateCmd = &cobra.Command{ @@ -157,6 +170,12 @@ template modes for -t:`, err = generateWalletGenesis(outputFilename, 1000, 0) case "ohwg": err = generateWalletGenesis(outputFilename, 100, 0) + case "loadingfile": + if sourceWallet == "" { + reportErrorf("must specify source wallet name with -wname.") + } + + err = generateAccountsLoadingFileTemplate(outputFilename, sourceWallet, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount) default: reportInfoln("Please specify a valid template name.\nSupported templates are:") for _, line := range generateTemplateLines { @@ -472,3 +491,27 @@ func saveGenesisDataToDisk(genesisData gen.GenesisData, filename string) error { } return err } + +func generateAccountsLoadingFileTemplate(templateFilename, sourceWallet string, rounds, roundTxnCount, accountsCount, assetsCount, applicationCount uint64) error { + + var data = remote.BootstrappedNetwork{ + NumRounds: rounds, + RoundTransactionsCount: roundTxnCount, + GeneratedAccountsCount: accountsCount, + GeneratedAssetsCount: assetsCount, + GeneratedApplicationCount: applicationCount, + SourceWalletName: sourceWallet, + } + return saveLoadingFileDataToDisk(data, templateFilename) +} + +func saveLoadingFileDataToDisk(data remote.BootstrappedNetwork, filename string) error { + f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer f.Close() + + enc := codecs.NewFormattedJSONEncoder(f) + return enc.Encode(data) +} diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go index 5529e75ec8..199460c249 100644 --- a/cmd/netgoal/network.go +++ b/cmd/netgoal/network.go @@ -37,6 +37,7 @@ var miscStringStringTokens []string var networkUseGenesisFiles bool var networkIgnoreExistingDir bool +var bootstrapLoadingFile bool func init() { rootCmd.AddCommand(networkBuildCmd) @@ -50,6 +51,7 @@ func init() { networkBuildCmd.MarkFlagRequired("recipe") networkBuildCmd.Flags().BoolVarP(&networkUseGenesisFiles, "use-existing-files", "e", false, "Use existing genesis files.") + networkBuildCmd.Flags().BoolVarP(&bootstrapLoadingFile, "gen-db-files", "b", false, "Generate database files.") networkBuildCmd.Flags().BoolVarP(&networkIgnoreExistingDir, "force", "f", false, "Force generation into existing directory.") networkBuildCmd.Flags().StringSliceVarP(&miscStringStringTokens, "val", "v", nil, "name=value, may be reapeated") @@ -140,7 +142,16 @@ func runBuildNetwork() (err error) { net.GenesisData.VersionModifier = networkGenesisVersionModifier } + if bootstrapLoadingFile { + fileTemplate, err := remote.LoadBootstrappedData(resolveFile(r.BootstrappedFile, templateBaseDir)) + if err != nil { + return fmt.Errorf("error resolving bootstrap file: %v", err) + } + net.BootstrappedNet = fileTemplate + } + net.SetUseExistingGenesisFiles(networkUseGenesisFiles) + net.SetUseBoostrappedFiles(bootstrapLoadingFile) err = net.Validate(buildConfig, networkRootDir) if err != nil { return fmt.Errorf("error validating Network Config file: %v", err) diff --git a/cmd/netgoal/recipe.go b/cmd/netgoal/recipe.go index b2210d3529..ffd31254fe 100644 --- a/cmd/netgoal/recipe.go +++ b/cmd/netgoal/recipe.go @@ -22,4 +22,5 @@ type recipe struct { HostTemplatesFile string NetworkFile string TopologyFile string + BootstrappedFile string } diff --git a/netdeploy/remote/bootstrappedNetwork.go b/netdeploy/remote/bootstrappedNetwork.go new file mode 100644 index 0000000000..374d857801 --- /dev/null +++ b/netdeploy/remote/bootstrappedNetwork.go @@ -0,0 +1,45 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package remote + +import ( + "encoding/json" + "os" +) + +//BootstrappedNetwork contains the specs for generating db files +type BootstrappedNetwork struct { + NumRounds uint64 `json:"numRounds"` + RoundTransactionsCount uint64 `json:"roundTransactionsCount"` + GeneratedAccountsCount uint64 `json:"generatedAccountsCount"` + GeneratedAssetsCount uint64 `json:"generatedAssetsCount"` + GeneratedApplicationCount uint64 `json:"generatedApplicationCount"` + SourceWalletName string `json:"sourceWalletName"` +} + +// LoadBootstrappedData loads a bootstrappedFile structure from a json file +func LoadBootstrappedData(file string) (data BootstrappedNetwork, err error) { + f, err := os.Open(file) + if err != nil { + return + } + defer f.Close() + + dec := json.NewDecoder(f) + err = dec.Decode(&data) + return data, err +} diff --git a/netdeploy/remote/bootstrappedNetwork_test.go b/netdeploy/remote/bootstrappedNetwork_test.go new file mode 100644 index 0000000000..e863912db5 --- /dev/null +++ b/netdeploy/remote/bootstrappedNetwork_test.go @@ -0,0 +1,49 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package remote + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadBootstrappedData(t *testing.T) { + badSpecPath := filepath.Join("./../../test", "testdata/deployednettemplates/networks/bootstrapped/badSpec.json") + _, err := LoadBootstrappedData(badSpecPath) + require.NotEqual(t, nil, err) + + okSpecPath := filepath.Join("./../../test", "testdata/deployednettemplates/networks/bootstrapped/okSpec.json") + var data BootstrappedNetwork + data, err = LoadBootstrappedData(okSpecPath) + expected := BootstrappedNetwork{ + NumRounds: 65000, + RoundTransactionsCount: 1000, + GeneratedAccountsCount: 7000000, + GeneratedAssetsCount: 200000, + GeneratedApplicationCount: 1000000, + SourceWalletName: "wallet1", + } + require.Equal(t, nil, err) + require.Equal(t, data.NumRounds, expected.NumRounds) + require.Equal(t, data.RoundTransactionsCount, expected.RoundTransactionsCount) + require.Equal(t, data.GeneratedAccountsCount, expected.GeneratedAccountsCount) + require.Equal(t, data.GeneratedAssetsCount, expected.GeneratedAssetsCount) + require.Equal(t, data.GeneratedApplicationCount, expected.GeneratedApplicationCount) + require.Equal(t, data.SourceWalletName, expected.SourceWalletName) +} diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index cda4b37003..48ff12c157 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -20,13 +20,24 @@ import ( "encoding/json" "fmt" "io/ioutil" + "math/rand" "os" "path/filepath" "strconv" "strings" + "time" + "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/gen" + "github.com/algorand/go-algorand/ledger" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/codecs" ) @@ -62,13 +73,48 @@ type DeployedNetworkConfig struct { // DeployedNetwork represents the complete configuration specification for a deployed network type DeployedNetwork struct { - useExistingGenesis bool + useExistingGenesis bool + createBoostrappedNetwork bool + GenesisData gen.GenesisData + Topology topology + Hosts []HostConfig + BootstrappedNet BootstrappedNetwork +} + +type netState struct { + nAccounts uint64 + nAssets uint64 + nApplications uint64 + roundTxnCnt uint64 + + assetPerAcct int + appsPerAcct int + + genesisID string + genesisHash crypto.Digest + poolAddr basics.Address + sinkAddr basics.Address + + accountsCreated bool + txnState protocol.TxType - GenesisData gen.GenesisData - Topology topology - Hosts []HostConfig + round basics.Round + accounts []basics.Address + txnCount uint64 + fundPerAccount basics.MicroAlgos } +const program = `#pragma version 2 +txn ApplicationID +bz ok +int 0 +byte "key" +byte "value" +app_local_put +ok: +int 1 +` + // InitDeployedNetworkConfig loads the DeployedNetworkConfig from a file func InitDeployedNetworkConfig(file string, buildConfig BuildConfig) (cfg DeployedNetworkConfig, err error) { processedFile, err := loadAndProcessConfig(file, buildConfig) @@ -199,6 +245,15 @@ func (cfg *DeployedNetwork) SetUseExistingGenesisFiles(useExisting bool) bool { return old } +// SetUseBoostrappedFiles sets the override flag indicating we should use existing genesis +// files instead of generating new ones. This is useful for permanent networks like devnet and testnet. +// Returns the previous value. +func (cfg *DeployedNetwork) SetUseBoostrappedFiles(boostrappedFile bool) bool { + old := cfg.createBoostrappedNetwork + cfg.createBoostrappedNetwork = boostrappedFile + return old +} + // Validate uses the specified template to deploy a new private network // under the specified root directory. func (cfg DeployedNetwork) Validate(buildCfg BuildConfig, rootDir string) (err error) { @@ -283,9 +338,429 @@ func (cfg DeployedNetwork) BuildNetworkFromTemplate(buildCfg BuildConfig, rootDi return } + if cfg.createBoostrappedNetwork { + fmt.Println("Generating db files ") + + cfg.GenerateDatabaseFiles(cfg.BootstrappedNet, genesisFolder) + } + return } +//GenerateDatabaseFiles generates database files according to the configurations +func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, genesisFolder string) error { + + accounts := make(map[basics.Address]basics.AccountData) + + genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(genesisFolder, "genesis.json")) + if err != nil { + return err + } + + srcWallet := getGenesisAlloc(fileCfgs.SourceWalletName, genesis.Allocation) + if srcWallet.Address == "" { + return fmt.Errorf("error finding source wallet address") + } + + rewardsPool := getGenesisAlloc("RewardsPool", genesis.Allocation) + if rewardsPool.Address == "" { + return fmt.Errorf("error finding source rewards ppol address") + } + + feeSink := getGenesisAlloc("FeeSink", genesis.Allocation) + if feeSink.Address == "" { + return fmt.Errorf("error finding fee sink address") + } + src, err := basics.UnmarshalChecksumAddress(srcWallet.Address) + if err != nil { + return fmt.Errorf("failed to unmarshal src address : %w", err) + } + poolAddr, err := basics.UnmarshalChecksumAddress(rewardsPool.Address) + if err != nil { + return fmt.Errorf("failed to unmarshal rewards pool address %w", err) + } + sinkAddr, err := basics.UnmarshalChecksumAddress(feeSink.Address) + if err != nil { + return fmt.Errorf("failed to unmarshal fee sink address %w", err) + } + + //initial state + bootstrappedNet := netState{ + nAssets: fileCfgs.GeneratedAssetsCount, + nApplications: fileCfgs.GeneratedApplicationCount, + txnState: protocol.PaymentTx, + roundTxnCnt: fileCfgs.RoundTransactionsCount, + round: basics.Round(0), + genesisID: genesis.ID(), + genesisHash: crypto.HashObj(genesis), + } + + var params config.ConsensusParams + if len(genesis.Proto) == 0 { + params = config.Consensus[protocol.ConsensusCurrentVersion] + } else { + params = config.Consensus[genesis.Proto] + } + + minAccounts := accountsNeeded(fileCfgs.GeneratedApplicationCount, fileCfgs.GeneratedAssetsCount, params) + nAccounts := fileCfgs.GeneratedAccountsCount + if minAccounts > nAccounts { + bootstrappedNet.nAccounts = minAccounts + } else { + bootstrappedNet.nAccounts = nAccounts + } + accounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, rewardsPool.State.MicroAlgos) + accounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, feeSink.State.MicroAlgos) + //fund src account with enough funding + bootstrappedNet.fundPerAccount = basics.MicroAlgos{Raw: uint64(bootstrappedNet.nAssets) * params.MinBalance * 2} + totalFunds := srcWallet.State.MicroAlgos.Raw + bootstrappedNet.fundPerAccount.Raw*bootstrappedNet.nAccounts + bootstrappedNet.roundTxnCnt*fileCfgs.NumRounds + accounts[src] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: totalFunds}) + + bootstrappedNet.poolAddr = poolAddr + bootstrappedNet.sinkAddr = sinkAddr + + //init block + initState, err := generateInitState(accounts, &bootstrappedNet) + if err != nil { + return err + } + localCfg := config.GetDefaultLocal() + localCfg.Archival = true + localCfg.CatchpointTracking = -1 + localCfg.LedgerSynchronousMode = 0 + log := logging.NewLogger() + l, err := ledger.OpenLedger(log, filepath.Join(genesisFolder, "bootstrapped"), false, initState, localCfg) + if err != nil { + return err + } + + //create accounts, apps and assets + prev, _ := l.Block(l.Latest()) + err = generateAccounts(src, fileCfgs.RoundTransactionsCount, prev, l, &bootstrappedNet, params) + if err != nil { + return err + } + + //create more transactions + prev, _ = l.Block(l.Latest()) + for i := uint64(bootstrappedNet.round); i < fileCfgs.NumRounds; i++ { + bootstrappedNet.round++ + blk, _ := createBlock(src, prev, fileCfgs.RoundTransactionsCount, &bootstrappedNet, params) + err = l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round}) + if err != nil { + fmt.Printf("Error %v\n", err) + return err + } + prev, _ = l.Block(l.Latest()) + } + + l.WaitForCommit(bootstrappedNet.round) + l.Close() + + localCfg.CatchpointTracking = 0 + l, err = ledger.OpenLedger(log, genesisFolder+"/bootstrapped", false, initState, localCfg) + if err != nil { + return err + } + l.Close() + + return nil +} + +func getGenesisAlloc(name string, allocation []bookkeeping.GenesisAllocation) bookkeeping.GenesisAllocation { + name = strings.ToLower(name) + for _, alloc := range allocation { + if strings.ToLower(alloc.Comment) == name { + return alloc + } + } + return bookkeeping.GenesisAllocation{} +} + +func keypair() *crypto.SignatureSecrets { + var seed crypto.Seed + crypto.RandBytes(seed[:]) + s := crypto.GenerateSignatureSecrets(seed) + return s +} + +func generateInitState(accounts map[basics.Address]basics.AccountData, bootstrappedNet *netState) (ledger.InitState, error) { + + var initState ledger.InitState + + block := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + TimeStamp: time.Now().Unix(), + GenesisID: bootstrappedNet.genesisID, + GenesisHash: bootstrappedNet.genesisHash, + Round: bootstrappedNet.round, + RewardsState: bookkeeping.RewardsState{ + RewardsRate: 1, + RewardsPool: bootstrappedNet.poolAddr, + FeeSink: bootstrappedNet.sinkAddr, + }, + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: protocol.ConsensusCurrentVersion, + }, + TxnCounter: 0, + }, + } + + initState.Block = block + initState.Accounts = accounts + initState.GenesisHash = bootstrappedNet.genesisHash + return initState, nil +} + +func createBlock(src basics.Address, prev bookkeeping.Block, roundTxnCnt uint64, bootstrappedNet *netState, csParams config.ConsensusParams) (bookkeeping.Block, error) { + payset := make([]transactions.SignedTxnInBlock, 0, roundTxnCnt) + txibs := make([]transactions.SignedTxnInBlock, 0, roundTxnCnt) + + block := bookkeeping.Block{ + BlockHeader: bookkeeping.BlockHeader{ + TimeStamp: prev.TimeStamp + int64(crypto.RandUint64()%100*1000), + GenesisID: bootstrappedNet.genesisID, + GenesisHash: bootstrappedNet.genesisHash, + Round: bootstrappedNet.round, + RewardsState: bookkeeping.RewardsState{ + RewardsRate: 1, + RewardsPool: prev.RewardsPool, + RewardsLevel: prev.RewardsLevel, + FeeSink: prev.FeeSink, + }, + UpgradeState: bookkeeping.UpgradeState{ + CurrentProtocol: prev.CurrentProtocol, + }, + TxnCounter: bootstrappedNet.txnCount, + }, + } + + stxns, err := createSignedTx(src, bootstrappedNet.round, csParams, bootstrappedNet) + if err != nil { + return bookkeeping.Block{}, err + } + + for _, stxn := range stxns { + txib, err := block.EncodeSignedTxn(stxn, transactions.ApplyData{}) + if err != nil { + return bookkeeping.Block{}, err + } + txibs = append(txibs, txib) + } + + payset = append(payset, txibs...) + bootstrappedNet.txnCount += uint64(len(payset)) + block.Payset = payset + block.TxnRoot, err = block.PaysetCommit() + if err != nil { + return bookkeeping.Block{}, err + } + + return block, nil +} + +func generateAccounts(src basics.Address, roundTxnCnt uint64, prev bookkeeping.Block, l *ledger.Ledger, bootstrappedNet *netState, csParams config.ConsensusParams) error { + + for !bootstrappedNet.accountsCreated { + //create accounts + bootstrappedNet.round++ + blk, _ := createBlock(src, prev, roundTxnCnt, bootstrappedNet, csParams) + err := l.AddBlock(blk, agreement.Certificate{Round: bootstrappedNet.round}) + if err != nil { + fmt.Printf("Error %v\n", err) + return err + } + + prev, _ = l.Block(l.Latest()) + + } + + return nil +} + +func accountsNeeded(appsCount uint64, assetCount uint64, params config.ConsensusParams) uint64 { + var maxApps uint64 + var nAppAcct uint64 + + maxApps = uint64(params.MaxAppsCreated) + + if maxApps > 0 { + nAppAcct = appsCount / maxApps + if appsCount%maxApps != 0 { + nAppAcct++ + } + } + + var maxAssets uint64 + var nAssetAcct uint64 + maxAssets = uint64(params.MaxAssetsPerAccount) + + if maxAssets > 0 { + nAssetAcct = assetCount / maxAssets + if assetCount%maxAssets != 0 { + nAssetAcct++ + } + } + + if nAppAcct > nAssetAcct { + return nAppAcct + } + return nAssetAcct +} + +func createSignedTx(src basics.Address, round basics.Round, params config.ConsensusParams, bootstrappedNet *netState) ([]transactions.SignedTxn, error) { + + if bootstrappedNet.nApplications == 0 && bootstrappedNet.nAccounts == 0 && bootstrappedNet.nAssets == 0 { + bootstrappedNet.accountsCreated = true + } + var sgtxns []transactions.SignedTxn + + header := transactions.Header{ + Fee: basics.MicroAlgos{Raw: 1}, + FirstValid: round, + LastValid: round, + GenesisID: bootstrappedNet.genesisID, + GenesisHash: bootstrappedNet.genesisHash, + } + + if bootstrappedNet.txnState == protocol.PaymentTx { + var accounts []basics.Address + bootstrappedNet.appsPerAcct = 0 + bootstrappedNet.assetPerAcct = 0 + n := bootstrappedNet.nAccounts + if n == 0 || n >= bootstrappedNet.roundTxnCnt { + n = bootstrappedNet.roundTxnCnt + } + + if !bootstrappedNet.accountsCreated { + for i := uint64(0); i < n; i++ { + secretDst := keypair() + dst := basics.Address(secretDst.SignatureVerifier) + accounts = append(accounts, dst) + + header.Sender = src + + tx := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: header, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: dst, + Amount: bootstrappedNet.fundPerAccount, + }, + } + t := transactions.SignedTxn{Txn: tx} + sgtxns = append(sgtxns, t) + } + bootstrappedNet.nAccounts -= uint64(len(sgtxns)) + bootstrappedNet.accounts = accounts + if bootstrappedNet.nAssets > 0 { + bootstrappedNet.txnState = protocol.AssetConfigTx + } else if bootstrappedNet.nApplications > 0 { + bootstrappedNet.txnState = protocol.ApplicationCallTx + } + } else { + //send payments to created accounts randomly + accti := rand.Intn(len(bootstrappedNet.accounts)) + for i := uint64(0); i < n; i++ { + header.Sender = src + tx := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: header, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: bootstrappedNet.accounts[accti], + Amount: basics.MicroAlgos{Raw: 0}, + }, + } + t := transactions.SignedTxn{Txn: tx} + sgtxns = append(sgtxns, t) + } + + } + + } else if bootstrappedNet.txnState == protocol.AssetConfigTx { + i := uint64(0) + for _, acct := range bootstrappedNet.accounts { + if i == bootstrappedNet.nAssets { + break + } + header.Sender = acct + assetParam := basics.AssetParams{ + Total: 100, + UnitName: "unit", + Manager: acct, + } + + assetConfigFields := transactions.AssetConfigTxnFields{ + AssetParams: assetParam, + } + + tx := transactions.Transaction{ + Type: protocol.AssetConfigTx, + Header: header, + AssetConfigTxnFields: assetConfigFields, + } + t := transactions.SignedTxn{Txn: tx} + sgtxns = append(sgtxns, t) + i++ + } + bootstrappedNet.assetPerAcct++ + bootstrappedNet.nAssets -= uint64(len(sgtxns)) + + if bootstrappedNet.nAssets == 0 || bootstrappedNet.assetPerAcct == params.MaxAssetsPerAccount { + if bootstrappedNet.nApplications > 0 { + bootstrappedNet.txnState = protocol.ApplicationCallTx + } else { + bootstrappedNet.txnState = protocol.PaymentTx + } + + } + } else if bootstrappedNet.txnState == protocol.ApplicationCallTx { + ops, err := logic.AssembleString(program) + if err != nil { + return []transactions.SignedTxn{}, err + } + approval := ops.Program + ops, err = logic.AssembleString("#pragma version 2 int 1") + if err != nil { + panic(err) + } + i := uint64(0) + for _, acct := range bootstrappedNet.accounts { + if i == bootstrappedNet.nApplications { + break + } + header.Sender = acct + appCallFields := transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.NoOpOC, + ApplicationID: 0, + ClearStateProgram: ops.Program, + ApprovalProgram: approval, + ApplicationArgs: [][]byte{ + []byte("check"), + []byte("bar"), + }, + } + tx := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: header, + + ApplicationCallTxnFields: appCallFields, + } + + t := transactions.SignedTxn{Txn: tx} + sgtxns = append(sgtxns, t) + i++ + } + + bootstrappedNet.nApplications -= uint64(len(sgtxns)) + bootstrappedNet.appsPerAcct++ + if bootstrappedNet.nApplications == 0 || bootstrappedNet.appsPerAcct == params.MaxAppsCreated { + bootstrappedNet.txnState = protocol.PaymentTx + } + } + return sgtxns, nil +} + type walletTargetData struct { path string partOnly bool diff --git a/netdeploy/remote/deployedNetwork_test.go b/netdeploy/remote/deployedNetwork_test.go new file mode 100644 index 0000000000..9237262198 --- /dev/null +++ b/netdeploy/remote/deployedNetwork_test.go @@ -0,0 +1,121 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package remote + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/protocol" +) + +func TestCreateSignedTx(t *testing.T) { + var networkState netState + networkState.nApplications = 2 + networkState.nAssets = 2 + networkState.nAccounts = 10 + networkState.roundTxnCnt = 4 + networkState.txnState = protocol.PaymentTx + + params := config.Consensus[protocol.ConsensusCurrentVersion] + + secretDst := keypair() + src := basics.Address(secretDst.SignatureVerifier) + + // create accounts + sgtxns, _ := createSignedTx(src, basics.Round(1), params, &networkState) + require.Equal(t, 4, len(sgtxns)) + require.Equal(t, protocol.AssetConfigTx, networkState.txnState) + for _, sntx := range sgtxns { + require.Equal(t, protocol.PaymentTx, sntx.Txn.Type) + } + + initialAccounts := networkState.accounts + + // should be creating assets next + sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState) + accounts := networkState.accounts + require.Equal(t, 2, len(sgtxns)) + require.Equal(t, protocol.ApplicationCallTx, networkState.txnState) + require.Equal(t, uint64(0), networkState.nAssets) + // same accounts should be used + require.Equal(t, initialAccounts[0], accounts[0]) + for _, sntx := range sgtxns { + require.Equal(t, protocol.AssetConfigTx, sntx.Txn.Type) + } + + // should be creating applications next + sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState) + require.Equal(t, 2, len(sgtxns)) + require.Equal(t, protocol.PaymentTx, networkState.txnState) + require.Equal(t, uint64(0), networkState.nApplications) + require.Equal(t, initialAccounts[0], accounts[0]) + for _, sntx := range sgtxns { + require.Equal(t, protocol.ApplicationCallTx, sntx.Txn.Type) + } + + // create payment transactions for the remainder rounds + sgtxns, _ = createSignedTx(src, basics.Round(1), params, &networkState) + require.Equal(t, 4, len(sgtxns)) + require.Equal(t, protocol.PaymentTx, networkState.txnState) + //new accounts should be created + accounts = networkState.accounts + require.NotEqual(t, initialAccounts[0], accounts[0]) + for _, sntx := range sgtxns { + require.Equal(t, protocol.PaymentTx, sntx.Txn.Type) + } + + // assets per account should not exceed limit + networkState.txnState = protocol.PaymentTx + networkState.nAssets = 10 + networkState.nApplications = 10 + networkState.nAccounts = 1 + networkState.assetPerAcct = 0 + networkState.appsPerAcct = 0 + + params.MaxAssetsPerAccount = 5 + // create 1 account and try to create 6 assets for the account + createSignedTx(src, basics.Round(1), params, &networkState) + for i := 0; i < params.MaxAssetsPerAccount; i++ { + createSignedTx(src, basics.Round(1), params, &networkState) + } + require.Equal(t, params.MaxAssetsPerAccount, networkState.assetPerAcct) + // txn state has changed to the next one + require.Equal(t, protocol.ApplicationCallTx, networkState.txnState) + + params.MaxAppsCreated = 5 + networkState.appsPerAcct = 0 + // try to create 6 apps for the account + for i := 0; i < params.MaxAppsCreated; i++ { + createSignedTx(src, basics.Round(1), params, &networkState) + } + require.Equal(t, params.MaxAppsCreated, networkState.appsPerAcct) + // txn state has changed to the next one + require.Equal(t, protocol.PaymentTx, networkState.txnState) +} + +func TestAccountsNeeded(t *testing.T) { + params := config.Consensus[protocol.ConsensusCurrentVersion] + params.MaxAppsCreated = 10 + params.MaxAssetsPerAccount = 20 + nAccounts := accountsNeeded(uint64(100), uint64(400), params) + + require.Equal(t, uint64(20), nAccounts) +} diff --git a/test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json b/test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json new file mode 100644 index 0000000000..2070f9c93c --- /dev/null +++ b/test/testdata/deployednettemplates/networks/bootstrapped/badSpec.json @@ -0,0 +1,8 @@ +{ + "numRounds":65000, + "roundTransactionsCount": 1000, + "generatedAccountsCount": 7000000, + "generatedAssetsCount": 200000, + "generatedApplicationCount": 1000000, + "sourceWalletName": "wallet1", +} diff --git a/test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json b/test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json new file mode 100644 index 0000000000..4a2f0f3dc8 --- /dev/null +++ b/test/testdata/deployednettemplates/networks/bootstrapped/okSpec.json @@ -0,0 +1,8 @@ +{ + "numRounds":65000, + "roundTransactionsCount": 1000, + "generatedAccountsCount": 7000000, + "generatedAssetsCount": 200000, + "generatedApplicationCount": 1000000, + "sourceWalletName": "wallet1" +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile new file mode 100644 index 0000000000..ee48014b60 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/Makefile @@ -0,0 +1,16 @@ +PARAMS=-w 100 -R 8 -N 20 -n 100 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json +FILEPARAMS=--rounds 5000 --ntrx 1000 --naccounts 3000000 --nassets 20000 --napps 20000 --wallet-name "wallet1" + +all: net.json genesis.json boostrappedFile.json + +net.json: node.json nonPartNode.json ${GOPATH}/bin/netgoal + netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS} + +genesis.json: ${GOPATH}/bin/netgoal + netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS} + +boostrappedFile.json: ${GOPATH}/bin/netgoal + netgoal generate -t loadingFile -r /tmp/wat -o boostrappedFile.json ${FILEPARAMS} + +clean: + rm -f net.json genesis.json diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json new file mode 100644 index 0000000000..7e63535ffc --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/boostrappedFile.json @@ -0,0 +1,8 @@ +{ + "numRounds":65000, + "roundTransactionsCount": 1000, + "generatedAccountsCount": 7000000, + "generatedAssetsCount": 200000, + "generatedApplicationCount": 1000000, + "sourceWalletName": "wallet1" +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py new file mode 100644 index 0000000000..69e156293a --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/gen_topology.py @@ -0,0 +1,27 @@ +node_types = {"R":8, "N":20, "NPN":10} +node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"} +regions = [ + "AWS-US-EAST-2" +] + +f = open("topology.json", "w") +f.write("{ 'Hosts':\n [") + +region_count = len(regions) +first = True +for x in node_types: + node_type = x + node_count = node_types[x] + region_size = node_size[x] + for i in range(node_count): + node_name = node_type + str(i+1) + region = regions[i%region_count] + if (first ): + first = False + else: + f.write(",") + f.write ("\n {\n 'Name': '" + node_name + "',\n 'Template': '" + region + region_size + "'\n }" ) + +f.write("\n ]\n}\n") +f.close() + diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json new file mode 100644 index 0000000000..77a347e277 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/genesis.json @@ -0,0 +1,1013 @@ +{ + "NetworkName": "", + "VersionModifier": "", + "ConsensusProtocol": "future", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 3000000, + "PartKeyDilution": 0, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet5", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet6", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet7", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet8", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet9", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet10", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet11", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet12", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet13", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet14", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet15", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet16", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet17", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet18", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet19", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet20", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet21", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet22", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet23", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet24", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet25", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet26", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet27", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet28", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet29", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet30", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet31", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet32", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet33", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet34", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet35", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet36", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet37", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet38", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet39", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet40", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet41", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet42", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet43", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet44", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet45", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet46", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet47", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet48", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet49", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet50", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet51", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet52", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet53", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet54", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet55", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet56", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet57", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet58", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet59", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet60", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet61", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet62", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet63", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet64", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet65", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet66", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet67", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet68", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet69", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet70", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet71", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet72", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet73", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet74", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet75", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet76", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet77", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet78", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet79", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet80", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet81", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet82", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet83", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet84", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet85", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet86", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet87", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet88", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet89", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet90", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet91", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet92", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet93", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet94", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet95", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet96", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet97", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet98", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet99", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet100", + "Stake": 0.5, + "Online": true + }, + { + "Name": "Wallet101", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet102", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet103", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet104", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet105", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet106", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet107", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet108", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet109", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet110", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet111", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet112", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet113", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet114", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet115", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet116", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet117", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet118", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet119", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet120", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet121", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet122", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet123", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet124", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet125", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet126", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet127", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet128", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet129", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet130", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet131", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet132", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet133", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet134", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet135", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet136", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet137", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet138", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet139", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet140", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet141", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet142", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet143", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet144", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet145", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet146", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet147", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet148", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet149", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet150", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet151", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet152", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet153", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet154", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet155", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet156", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet157", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet158", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet159", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet160", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet161", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet162", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet163", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet164", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet165", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet166", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet167", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet168", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet169", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet170", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet171", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet172", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet173", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet174", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet175", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet176", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet177", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet178", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet179", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet180", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet181", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet182", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet183", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet184", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet185", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet186", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet187", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet188", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet189", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet190", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet191", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet192", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet193", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet194", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet195", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet196", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet197", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet198", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet199", + "Stake": 0.5, + "Online": false + }, + { + "Name": "Wallet200", + "Stake": 0.5, + "Online": false + } + ], + "FeeSink": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "Comment": "" +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json new file mode 100644 index 0000000000..b6d25d3804 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json @@ -0,0 +1,2564 @@ +{ + "Hosts": [ + { + "Name": "R1", + "Group": "", + "Nodes": [ + { + "Name": "relay1", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R2", + "Group": "", + "Nodes": [ + { + "Name": "relay2", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R3", + "Group": "", + "Nodes": [ + { + "Name": "relay3", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R4", + "Group": "", + "Nodes": [ + { + "Name": "relay4", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R5", + "Group": "", + "Nodes": [ + { + "Name": "relay5", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R6", + "Group": "", + "Nodes": [ + { + "Name": "relay6", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R7", + "Group": "", + "Nodes": [ + { + "Name": "relay7", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "R8", + "Group": "", + "Nodes": [ + { + "Name": "relay8", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + } + ] + }, + { + "Name": "N1", + "Group": "", + "Nodes": [ + { + "Name": "node1", + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node21", + "Wallets": [ + { + "Name": "Wallet2", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node41", + "Wallets": [ + { + "Name": "Wallet3", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node61", + "Wallets": [ + { + "Name": "Wallet4", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node81", + "Wallets": [ + { + "Name": "Wallet5", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N2", + "Group": "", + "Nodes": [ + { + "Name": "node2", + "Wallets": [ + { + "Name": "Wallet6", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node22", + "Wallets": [ + { + "Name": "Wallet7", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node42", + "Wallets": [ + { + "Name": "Wallet8", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node62", + "Wallets": [ + { + "Name": "Wallet9", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node82", + "Wallets": [ + { + "Name": "Wallet10", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N3", + "Group": "", + "Nodes": [ + { + "Name": "node3", + "Wallets": [ + { + "Name": "Wallet11", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node23", + "Wallets": [ + { + "Name": "Wallet12", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node43", + "Wallets": [ + { + "Name": "Wallet13", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node63", + "Wallets": [ + { + "Name": "Wallet14", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node83", + "Wallets": [ + { + "Name": "Wallet15", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N4", + "Group": "", + "Nodes": [ + { + "Name": "node4", + "Wallets": [ + { + "Name": "Wallet16", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node24", + "Wallets": [ + { + "Name": "Wallet17", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node44", + "Wallets": [ + { + "Name": "Wallet18", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node64", + "Wallets": [ + { + "Name": "Wallet19", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node84", + "Wallets": [ + { + "Name": "Wallet20", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N5", + "Group": "", + "Nodes": [ + { + "Name": "node5", + "Wallets": [ + { + "Name": "Wallet21", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node25", + "Wallets": [ + { + "Name": "Wallet22", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node45", + "Wallets": [ + { + "Name": "Wallet23", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node65", + "Wallets": [ + { + "Name": "Wallet24", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node85", + "Wallets": [ + { + "Name": "Wallet25", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N6", + "Group": "", + "Nodes": [ + { + "Name": "node6", + "Wallets": [ + { + "Name": "Wallet26", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node26", + "Wallets": [ + { + "Name": "Wallet27", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node46", + "Wallets": [ + { + "Name": "Wallet28", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node66", + "Wallets": [ + { + "Name": "Wallet29", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node86", + "Wallets": [ + { + "Name": "Wallet30", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N7", + "Group": "", + "Nodes": [ + { + "Name": "node7", + "Wallets": [ + { + "Name": "Wallet31", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node27", + "Wallets": [ + { + "Name": "Wallet32", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node47", + "Wallets": [ + { + "Name": "Wallet33", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node67", + "Wallets": [ + { + "Name": "Wallet34", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node87", + "Wallets": [ + { + "Name": "Wallet35", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N8", + "Group": "", + "Nodes": [ + { + "Name": "node8", + "Wallets": [ + { + "Name": "Wallet36", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node28", + "Wallets": [ + { + "Name": "Wallet37", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node48", + "Wallets": [ + { + "Name": "Wallet38", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node68", + "Wallets": [ + { + "Name": "Wallet39", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node88", + "Wallets": [ + { + "Name": "Wallet40", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N9", + "Group": "", + "Nodes": [ + { + "Name": "node9", + "Wallets": [ + { + "Name": "Wallet41", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node29", + "Wallets": [ + { + "Name": "Wallet42", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node49", + "Wallets": [ + { + "Name": "Wallet43", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node69", + "Wallets": [ + { + "Name": "Wallet44", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node89", + "Wallets": [ + { + "Name": "Wallet45", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N10", + "Group": "", + "Nodes": [ + { + "Name": "node10", + "Wallets": [ + { + "Name": "Wallet46", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node30", + "Wallets": [ + { + "Name": "Wallet47", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node50", + "Wallets": [ + { + "Name": "Wallet48", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node70", + "Wallets": [ + { + "Name": "Wallet49", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node90", + "Wallets": [ + { + "Name": "Wallet50", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N11", + "Group": "", + "Nodes": [ + { + "Name": "node11", + "Wallets": [ + { + "Name": "Wallet51", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node31", + "Wallets": [ + { + "Name": "Wallet52", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node51", + "Wallets": [ + { + "Name": "Wallet53", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node71", + "Wallets": [ + { + "Name": "Wallet54", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node91", + "Wallets": [ + { + "Name": "Wallet55", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N12", + "Group": "", + "Nodes": [ + { + "Name": "node12", + "Wallets": [ + { + "Name": "Wallet56", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node32", + "Wallets": [ + { + "Name": "Wallet57", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node52", + "Wallets": [ + { + "Name": "Wallet58", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node72", + "Wallets": [ + { + "Name": "Wallet59", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node92", + "Wallets": [ + { + "Name": "Wallet60", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N13", + "Group": "", + "Nodes": [ + { + "Name": "node13", + "Wallets": [ + { + "Name": "Wallet61", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node33", + "Wallets": [ + { + "Name": "Wallet62", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node53", + "Wallets": [ + { + "Name": "Wallet63", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node73", + "Wallets": [ + { + "Name": "Wallet64", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node93", + "Wallets": [ + { + "Name": "Wallet65", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N14", + "Group": "", + "Nodes": [ + { + "Name": "node14", + "Wallets": [ + { + "Name": "Wallet66", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node34", + "Wallets": [ + { + "Name": "Wallet67", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node54", + "Wallets": [ + { + "Name": "Wallet68", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node74", + "Wallets": [ + { + "Name": "Wallet69", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node94", + "Wallets": [ + { + "Name": "Wallet70", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N15", + "Group": "", + "Nodes": [ + { + "Name": "node15", + "Wallets": [ + { + "Name": "Wallet71", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node35", + "Wallets": [ + { + "Name": "Wallet72", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node55", + "Wallets": [ + { + "Name": "Wallet73", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node75", + "Wallets": [ + { + "Name": "Wallet74", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node95", + "Wallets": [ + { + "Name": "Wallet75", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N16", + "Group": "", + "Nodes": [ + { + "Name": "node16", + "Wallets": [ + { + "Name": "Wallet76", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node36", + "Wallets": [ + { + "Name": "Wallet77", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node56", + "Wallets": [ + { + "Name": "Wallet78", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node76", + "Wallets": [ + { + "Name": "Wallet79", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node96", + "Wallets": [ + { + "Name": "Wallet80", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N17", + "Group": "", + "Nodes": [ + { + "Name": "node17", + "Wallets": [ + { + "Name": "Wallet81", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node37", + "Wallets": [ + { + "Name": "Wallet82", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node57", + "Wallets": [ + { + "Name": "Wallet83", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node77", + "Wallets": [ + { + "Name": "Wallet84", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node97", + "Wallets": [ + { + "Name": "Wallet85", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N18", + "Group": "", + "Nodes": [ + { + "Name": "node18", + "Wallets": [ + { + "Name": "Wallet86", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node38", + "Wallets": [ + { + "Name": "Wallet87", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node58", + "Wallets": [ + { + "Name": "Wallet88", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node78", + "Wallets": [ + { + "Name": "Wallet89", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node98", + "Wallets": [ + { + "Name": "Wallet90", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N19", + "Group": "", + "Nodes": [ + { + "Name": "node19", + "Wallets": [ + { + "Name": "Wallet91", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node39", + "Wallets": [ + { + "Name": "Wallet92", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node59", + "Wallets": [ + { + "Name": "Wallet93", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node79", + "Wallets": [ + { + "Name": "Wallet94", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node99", + "Wallets": [ + { + "Name": "Wallet95", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "N20", + "Group": "", + "Nodes": [ + { + "Name": "node20", + "Wallets": [ + { + "Name": "Wallet96", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node40", + "Wallets": [ + { + "Name": "Wallet97", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node60", + "Wallets": [ + { + "Name": "Wallet98", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node80", + "Wallets": [ + { + "Name": "Wallet99", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + }, + { + "Name": "node100", + "Wallets": [ + { + "Name": "Wallet100", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN1", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode1", + "Wallets": [ + { + "Name": "Wallet101", + "ParticipationOnly": false + }, + { + "Name": "Wallet111", + "ParticipationOnly": false + }, + { + "Name": "Wallet121", + "ParticipationOnly": false + }, + { + "Name": "Wallet131", + "ParticipationOnly": false + }, + { + "Name": "Wallet141", + "ParticipationOnly": false + }, + { + "Name": "Wallet151", + "ParticipationOnly": false + }, + { + "Name": "Wallet161", + "ParticipationOnly": false + }, + { + "Name": "Wallet171", + "ParticipationOnly": false + }, + { + "Name": "Wallet181", + "ParticipationOnly": false + }, + { + "Name": "Wallet191", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN2", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode2", + "Wallets": [ + { + "Name": "Wallet102", + "ParticipationOnly": false + }, + { + "Name": "Wallet112", + "ParticipationOnly": false + }, + { + "Name": "Wallet122", + "ParticipationOnly": false + }, + { + "Name": "Wallet132", + "ParticipationOnly": false + }, + { + "Name": "Wallet142", + "ParticipationOnly": false + }, + { + "Name": "Wallet152", + "ParticipationOnly": false + }, + { + "Name": "Wallet162", + "ParticipationOnly": false + }, + { + "Name": "Wallet172", + "ParticipationOnly": false + }, + { + "Name": "Wallet182", + "ParticipationOnly": false + }, + { + "Name": "Wallet192", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN3", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode3", + "Wallets": [ + { + "Name": "Wallet103", + "ParticipationOnly": false + }, + { + "Name": "Wallet113", + "ParticipationOnly": false + }, + { + "Name": "Wallet123", + "ParticipationOnly": false + }, + { + "Name": "Wallet133", + "ParticipationOnly": false + }, + { + "Name": "Wallet143", + "ParticipationOnly": false + }, + { + "Name": "Wallet153", + "ParticipationOnly": false + }, + { + "Name": "Wallet163", + "ParticipationOnly": false + }, + { + "Name": "Wallet173", + "ParticipationOnly": false + }, + { + "Name": "Wallet183", + "ParticipationOnly": false + }, + { + "Name": "Wallet193", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN4", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode4", + "Wallets": [ + { + "Name": "Wallet104", + "ParticipationOnly": false + }, + { + "Name": "Wallet114", + "ParticipationOnly": false + }, + { + "Name": "Wallet124", + "ParticipationOnly": false + }, + { + "Name": "Wallet134", + "ParticipationOnly": false + }, + { + "Name": "Wallet144", + "ParticipationOnly": false + }, + { + "Name": "Wallet154", + "ParticipationOnly": false + }, + { + "Name": "Wallet164", + "ParticipationOnly": false + }, + { + "Name": "Wallet174", + "ParticipationOnly": false + }, + { + "Name": "Wallet184", + "ParticipationOnly": false + }, + { + "Name": "Wallet194", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN5", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode5", + "Wallets": [ + { + "Name": "Wallet105", + "ParticipationOnly": false + }, + { + "Name": "Wallet115", + "ParticipationOnly": false + }, + { + "Name": "Wallet125", + "ParticipationOnly": false + }, + { + "Name": "Wallet135", + "ParticipationOnly": false + }, + { + "Name": "Wallet145", + "ParticipationOnly": false + }, + { + "Name": "Wallet155", + "ParticipationOnly": false + }, + { + "Name": "Wallet165", + "ParticipationOnly": false + }, + { + "Name": "Wallet175", + "ParticipationOnly": false + }, + { + "Name": "Wallet185", + "ParticipationOnly": false + }, + { + "Name": "Wallet195", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN6", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode6", + "Wallets": [ + { + "Name": "Wallet106", + "ParticipationOnly": false + }, + { + "Name": "Wallet116", + "ParticipationOnly": false + }, + { + "Name": "Wallet126", + "ParticipationOnly": false + }, + { + "Name": "Wallet136", + "ParticipationOnly": false + }, + { + "Name": "Wallet146", + "ParticipationOnly": false + }, + { + "Name": "Wallet156", + "ParticipationOnly": false + }, + { + "Name": "Wallet166", + "ParticipationOnly": false + }, + { + "Name": "Wallet176", + "ParticipationOnly": false + }, + { + "Name": "Wallet186", + "ParticipationOnly": false + }, + { + "Name": "Wallet196", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN7", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode7", + "Wallets": [ + { + "Name": "Wallet107", + "ParticipationOnly": false + }, + { + "Name": "Wallet117", + "ParticipationOnly": false + }, + { + "Name": "Wallet127", + "ParticipationOnly": false + }, + { + "Name": "Wallet137", + "ParticipationOnly": false + }, + { + "Name": "Wallet147", + "ParticipationOnly": false + }, + { + "Name": "Wallet157", + "ParticipationOnly": false + }, + { + "Name": "Wallet167", + "ParticipationOnly": false + }, + { + "Name": "Wallet177", + "ParticipationOnly": false + }, + { + "Name": "Wallet187", + "ParticipationOnly": false + }, + { + "Name": "Wallet197", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN8", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode8", + "Wallets": [ + { + "Name": "Wallet108", + "ParticipationOnly": false + }, + { + "Name": "Wallet118", + "ParticipationOnly": false + }, + { + "Name": "Wallet128", + "ParticipationOnly": false + }, + { + "Name": "Wallet138", + "ParticipationOnly": false + }, + { + "Name": "Wallet148", + "ParticipationOnly": false + }, + { + "Name": "Wallet158", + "ParticipationOnly": false + }, + { + "Name": "Wallet168", + "ParticipationOnly": false + }, + { + "Name": "Wallet178", + "ParticipationOnly": false + }, + { + "Name": "Wallet188", + "ParticipationOnly": false + }, + { + "Name": "Wallet198", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN9", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode9", + "Wallets": [ + { + "Name": "Wallet109", + "ParticipationOnly": false + }, + { + "Name": "Wallet119", + "ParticipationOnly": false + }, + { + "Name": "Wallet129", + "ParticipationOnly": false + }, + { + "Name": "Wallet139", + "ParticipationOnly": false + }, + { + "Name": "Wallet149", + "ParticipationOnly": false + }, + { + "Name": "Wallet159", + "ParticipationOnly": false + }, + { + "Name": "Wallet169", + "ParticipationOnly": false + }, + { + "Name": "Wallet179", + "ParticipationOnly": false + }, + { + "Name": "Wallet189", + "ParticipationOnly": false + }, + { + "Name": "Wallet199", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN10", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode10", + "Wallets": [ + { + "Name": "Wallet110", + "ParticipationOnly": false + }, + { + "Name": "Wallet120", + "ParticipationOnly": false + }, + { + "Name": "Wallet130", + "ParticipationOnly": false + }, + { + "Name": "Wallet140", + "ParticipationOnly": false + }, + { + "Name": "Wallet150", + "ParticipationOnly": false + }, + { + "Name": "Wallet160", + "ParticipationOnly": false + }, + { + "Name": "Wallet170", + "ParticipationOnly": false + }, + { + "Name": "Wallet180", + "ParticipationOnly": false + }, + { + "Name": "Wallet190", + "ParticipationOnly": false + }, + { + "Name": "Wallet200", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json new file mode 100644 index 0000000000..0788913ab9 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json @@ -0,0 +1,22 @@ +{ + "APIToken": "{{APIToken}}", + "EnableBlockStats": false, + "EnableTelemetry": false, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": false, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "AltConfigs": [ + { + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "FractionApply": 0.2 + } + ] +} + diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json new file mode 100644 index 0000000000..8ab3b8bddf --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/nonPartNode.json @@ -0,0 +1,5 @@ +{ + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json new file mode 100644 index 0000000000..766328dbb3 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/recipe.json @@ -0,0 +1,8 @@ +{ + "GenesisFile":"genesis.json", + "NetworkFile":"net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "topology.json", + "BootstrappedFile": "boostrappedFile.json" +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json new file mode 100644 index 0000000000..25bb6b5a26 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json @@ -0,0 +1,11 @@ +{ + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" +} diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json new file mode 100644 index 0000000000..8e9c8e7cd2 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/topology.json @@ -0,0 +1,156 @@ +{ "Hosts": + [ + { + "Name": "R1", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R2", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R3", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R4", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R5", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R6", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R7", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "R8", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN1", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN2", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN3", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN4", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN5", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN6", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN7", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN8", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN9", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN10", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N1", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N2", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N3", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N4", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N5", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N6", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N7", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N8", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N9", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N10", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N11", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N12", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N13", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N14", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N15", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N16", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N17", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N18", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N19", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N20", + "Template": "AWS-US-EAST-2-m5d.4xl" + } + ] +} From d585712d583fffccbdbe79161c008737a9a23ae0 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 15 Apr 2021 17:16:19 -0400 Subject: [PATCH 176/215] goal: refactor waitForCommit (#2076) efactor waitForCommit to return the output of PendingTransactionInformation. This PR should eliminate a redundant call to PendingTransactionInformation. --- cmd/goal/account.go | 5 +++-- cmd/goal/application.go | 49 ++++++----------------------------------- cmd/goal/asset.go | 15 +++++-------- cmd/goal/clerk.go | 21 +++++++++--------- cmd/goal/interact.go | 7 +----- 5 files changed, 27 insertions(+), 70 deletions(-) diff --git a/cmd/goal/account.go b/cmd/goal/account.go index a8bf74a2d5..e508a71cec 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -810,7 +810,8 @@ func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnl return nil } - return waitForCommit(client, txid, lastTxRound) + _, err = waitForCommit(client, txid, lastTxRound) + return err } var addParticipationKeyCmd = &cobra.Command{ @@ -1323,7 +1324,7 @@ var markNonparticipatingCmd = &cobra.Command{ return } - err = waitForCommit(client, txid, lastTxRound) + _, err = waitForCommit(client, txid, lastTxRound) if err != nil { reportErrorf("error waiting for transaction to be committed: %v", err) } diff --git a/cmd/goal/application.go b/cmd/goal/application.go index f32d554f11..fd467a6e02 100644 --- a/cmd/goal/application.go +++ b/cmd/goal/application.go @@ -416,15 +416,10 @@ var createAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + txn, err := waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - txn, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } if txn.TransactionResults != nil && txn.TransactionResults.CreatedAppIndex != 0 { reportInfof("Created app with app index %d", txn.TransactionResults.CreatedAppIndex) } @@ -499,15 +494,10 @@ var updateAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + _, err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - _, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } } } else { if dumpForDryrun { @@ -577,15 +567,10 @@ var optInAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + _, err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - _, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } } } else { if dumpForDryrun { @@ -655,15 +640,10 @@ var closeOutAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + _, err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - _, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } } } else { if dumpForDryrun { @@ -733,15 +713,10 @@ var clearAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + _, err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - _, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } } } else { if dumpForDryrun { @@ -811,15 +786,10 @@ var callAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + _, err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - _, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } } } else { if dumpForDryrun { @@ -889,15 +859,10 @@ var deleteAppCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + _, err = waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - _, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } } } else { if dumpForDryrun { diff --git a/cmd/goal/asset.go b/cmd/goal/asset.go index 11aaecccd9..047949c0be 100644 --- a/cmd/goal/asset.go +++ b/cmd/goal/asset.go @@ -231,12 +231,7 @@ var createAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) - if err != nil { - reportErrorf(err.Error()) - } - // Check if we know about the transaction yet - txn, err := client.PendingTransactionInformation(txid) + txn, err := waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } @@ -311,7 +306,7 @@ var destroyAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lastValid) + _, err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } @@ -400,7 +395,7 @@ var configAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lastValid) + _, err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } @@ -481,7 +476,7 @@ var sendAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lastValid) + _, err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } @@ -546,7 +541,7 @@ var freezeAssetCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lastValid) + _, err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index ef9f1e4795..0f9a32a972 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -29,6 +29,7 @@ import ( "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" generatedV2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated" + v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" @@ -151,18 +152,18 @@ var clerkCmd = &cobra.Command{ }, } -func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) error { +func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound uint64) (txn v1.Transaction, err error) { // Get current round information stat, err := client.Status() if err != nil { - return fmt.Errorf(errorRequestFail, err) + return v1.Transaction{}, fmt.Errorf(errorRequestFail, err) } for { // Check if we know about the transaction yet - txn, err := client.PendingTransactionInformation(txid) + txn, err = client.PendingTransactionInformation(txid) if err != nil { - return fmt.Errorf(errorRequestFail, err) + return v1.Transaction{}, fmt.Errorf(errorRequestFail, err) } if txn.ConfirmedRound > 0 { @@ -171,25 +172,25 @@ func waitForCommit(client libgoal.Client, txid string, transactionLastValidRound } if txn.PoolError != "" { - return fmt.Errorf(txPoolError, txid, txn.PoolError) + return v1.Transaction{}, fmt.Errorf(txPoolError, txid, txn.PoolError) } // check if we've already committed to the block number equals to the transaction's last valid round. // if this is the case, the transaction would not be included in the blockchain, and we can exit right // here. - if stat.LastRound >= transactionLastValidRound { - return fmt.Errorf(errorTransactionExpired, txid) + if transactionLastValidRound > 0 && stat.LastRound >= transactionLastValidRound { + return v1.Transaction{}, fmt.Errorf(errorTransactionExpired, txid) } reportInfof(infoTxPending, txid, stat.LastRound) // WaitForRound waits until round "stat.LastRound+1" is committed stat, err = client.WaitForRound(stat.LastRound) if err != nil { - return fmt.Errorf(errorRequestFail, err) + return v1.Transaction{}, fmt.Errorf(errorRequestFail, err) } } - return nil + return } func createSignedTransaction(client libgoal.Client, signTx bool, dataDir string, walletName string, tx transactions.Transaction) (stxn transactions.SignedTxn, err error) { @@ -468,7 +469,7 @@ var sendCmd = &cobra.Command{ reportInfof(infoTxIssued, amount, fromAddressResolved, toAddressResolved, txid, fee) if !noWaitAfterSend { - err = waitForCommit(client, txid, lastValid) + _, err = waitForCommit(client, txid, lastValid) if err != nil { reportErrorf(err.Error()) } diff --git a/cmd/goal/interact.go b/cmd/goal/interact.go index 9ee1ae96a4..7ed9be4072 100644 --- a/cmd/goal/interact.go +++ b/cmd/goal/interact.go @@ -622,15 +622,10 @@ var appExecuteCmd = &cobra.Command{ reportInfof("Issued transaction from account %s, txid %s (fee %d)", tx.Sender, txid, tx.Fee.Raw) if !noWaitAfterSend { - err = waitForCommit(client, txid, lv) + txn, err := waitForCommit(client, txid, lv) if err != nil { reportErrorf(err.Error()) } - // Check if we know about the transaction yet - txn, err := client.PendingTransactionInformation(txid) - if err != nil { - reportErrorf("%v", err) - } if txn.TransactionResults != nil && txn.TransactionResults.CreatedAppIndex != 0 { reportInfof("Created app with app index %d", txn.TransactionResults.CreatedAppIndex) } From 75c12fdffa849ee852b899b949b4009e34d974d5 Mon Sep 17 00:00:00 2001 From: algonautshant <55754073+algonautshant@users.noreply.github.com> Date: Thu, 15 Apr 2021 21:00:15 -0400 Subject: [PATCH 177/215] goal: avoid "node successfully started" on a previously started node (#2075) When starting a node which is already started, two messages are show: already started and successfully started: $ goal node start Algorand node was already started! Algorand node successfully started! Only the already started message should be shown. Also, when node restart command is given, there is a check in the code if the node was already running. This check comes after the node was stopped, so should never return as the node is still running. But in case it does, a message is added to notify the user. --- cmd/goal/messages.go | 1 + cmd/goal/node.go | 21 ++++++++++---------- test/e2e-go/cli/goal/expect/goalNodeTest.exp | 17 ++++++++++++++++ 3 files changed, 29 insertions(+), 10 deletions(-) diff --git a/cmd/goal/messages.go b/cmd/goal/messages.go index 77b8a1cfc8..edd6ea74cb 100644 --- a/cmd/goal/messages.go +++ b/cmd/goal/messages.go @@ -58,6 +58,7 @@ const ( // Node infoNodeStart = "Algorand node successfully started!" infoNodeAlreadyStarted = "Algorand node was already started!" + infoNodeDidNotRestart = "Algorand node did not restart. The node is still running!" infoTryingToStopNode = "Trying to stop the node..." infoNodeShuttingDown = "Algorand node is shutting down..." infoNodeSuccessfullyStopped = "The node was successfully stopped." diff --git a/cmd/goal/node.go b/cmd/goal/node.go index 322d4824d5..dd42e23819 100644 --- a/cmd/goal/node.go +++ b/cmd/goal/node.go @@ -182,14 +182,14 @@ var startCmd = &cobra.Command{ } algodAlreadyRunning, err := nc.StartAlgod(nodeArgs) - if algodAlreadyRunning { - reportInfoln(infoNodeAlreadyStarted) - } - if err != nil { reportErrorf(errorNodeFailedToStart, err) } else { - reportInfoln(infoNodeStart) + if algodAlreadyRunning { + reportInfoln(infoNodeAlreadyStarted) + } else { + reportInfoln(infoNodeStart) + } } }) }, @@ -304,14 +304,15 @@ var restartCmd = &cobra.Command{ } algodAlreadyRunning, err := nc.StartAlgod(nodeArgs) - if algodAlreadyRunning { - reportInfoln(infoNodeAlreadyStarted) - } - if err != nil { reportErrorf(errorNodeFailedToStart, err) } else { - reportInfoln(infoNodeStart) + if algodAlreadyRunning { + // This can never happen. In case it does, report about it. + reportInfoln(infoNodeDidNotRestart) + } else { + reportInfoln(infoNodeStart) + } } }) }, diff --git a/test/e2e-go/cli/goal/expect/goalNodeTest.exp b/test/e2e-go/cli/goal/expect/goalNodeTest.exp index 6a2f944061..9bfe6b48ec 100644 --- a/test/e2e-go/cli/goal/expect/goalNodeTest.exp +++ b/test/e2e-go/cli/goal/expect/goalNodeTest.exp @@ -30,6 +30,23 @@ if { [catch { # Start node ::AlgorandGoal::StartNode $TEST_PRIMARY_NODE_DIR + # Try starting the node again, should just report the node is already running + set ALREADY_STARTED_MESSAGE_RECEIVED 0 + spawn goal node start -d $TEST_PRIMARY_NODE_DIR + expect { + timeout { close; ::AlgorandGoal::Abort "goal node start unexpectedly failed" } + "^Algorand node was already started!" { + set ALREADY_STARTED_MESSAGE_RECEIVED 1 + exp_continue + } + -re "\\S+" { close; ::AlgorandGoal::Abort "Unexpected message for goal node start on a running node" } + eof { + if {$ALREADY_STARTED_MESSAGE_RECEIVED == 0} { + { close; ::AlgorandGoal::Abort "eof recieved before the expected message "} + } + } + } + # Restart node ::AlgorandGoal::RestartNode $TEST_PRIMARY_NODE_DIR From bd87c0782de7e78ab05d874420a0489276286bd4 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 16 Apr 2021 09:26:11 -0400 Subject: [PATCH 178/215] catchup: cross verify genesis.json hash (#2077) When using the fast catchup, the block authenticity is verified against the catchpoint label. That method bypass the local genesis.json authentication since it's not required to establish secure catchup. From that point and on, the caught-up node would be able to proceed as usual. However, when the node is restarted, it compares the on-disk genesis.json to the first 320 rounds being loaded into memory. Failing with that comparison, the node would refuse to start up. The solution here is to prevent the node from completing the fast catchup before testing that the genesis.json's hash matches the one on the first downloaded block. From there, it would be guaranteed to be identical due to the reverse hash check. --- catchup/catchpointService.go | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go index 6969c35f3d..c6bdf8ac0b 100644 --- a/catchup/catchpointService.go +++ b/catchup/catchpointService.go @@ -339,6 +339,8 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro if ledgerBlock, err := cs.ledger.Block(blockRound); err == nil { blk = &ledgerBlock } + var protoParams config.ConsensusParams + var ok bool for { attemptsCount++ @@ -356,7 +358,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro } // check block protocol version support. - if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok { + if protoParams, ok = config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok { cs.log.Warnf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol) if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts { @@ -368,6 +370,18 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro return cs.abort(fmt.Errorf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)) } + // We need to compare explicitly the genesis hash since we're not doing any block validation. This would ensure the genesis.json file matches the block that we've receieved. + if protoParams.SupportGenesisHash && blk.GenesisHash() != cs.ledger.GenesisHash() { + cs.log.Warnf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash()) + if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts { + // try again. + blk = nil + cs.blocksDownloadPeerSelector.RankPeer(peer, peerRankInvalidDownload) + continue + } + return cs.abort(fmt.Errorf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash())) + } + // check to see that the block header and the block payset aligns if !blk.ContentsMatchHeader() { cs.log.Warnf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header") From 498b28b0a0fc65a4ceffe489b5526165bf13af5c Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 16 Apr 2021 12:12:39 -0400 Subject: [PATCH 179/215] rollback https://github.com/algorand/go-algorand/pull/1756 (#2078) Rollback https://github.com/algorand/go-algorand/pull/1756, as it was determined to cause proposal propagation issues. This rollback would take the original work back onto the research stage, and would allow us to provide better testing before re-applying it. --- agreement/player.go | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/agreement/player.go b/agreement/player.go index 445eaff78d..85c0c18058 100644 --- a/agreement/player.go +++ b/agreement/player.go @@ -555,29 +555,21 @@ func (p *player) handleMessageEvent(r routerHandle, e messageEvent) (actions []a case payloadPipelined: ep := ef.(payloadProcessedEvent) if ep.Round == p.Round { - up := e.Input.UnauthenticatedProposal - uv := ef.(payloadProcessedEvent).Vote.u() - - a := relayAction(e, protocol.ProposalPayloadTag, compoundMessage{Proposal: up, Vote: uv}) - actions = append(actions, a) return append(actions, verifyPayloadAction(e, ep.Round, ep.Period, ep.Pinned)) } } - // relay as the proposer - if e.Input.MessageHandle == nil { - var uv unauthenticatedVote - switch ef.t() { - case payloadPipelined, payloadAccepted: - uv = ef.(payloadProcessedEvent).Vote.u() - case proposalCommittable: - uv = ef.(committableEvent).Vote.u() - } - up := e.Input.UnauthenticatedProposal - - a := relayAction(e, protocol.ProposalPayloadTag, compoundMessage{Proposal: up, Vote: uv}) - actions = append(actions, a) + var uv unauthenticatedVote + switch ef.t() { + case payloadPipelined, payloadAccepted: + uv = ef.(payloadProcessedEvent).Vote.u() + case proposalCommittable: + uv = ef.(committableEvent).Vote.u() } + up := e.Input.UnauthenticatedProposal + + a := relayAction(e, protocol.ProposalPayloadTag, compoundMessage{Proposal: up, Vote: uv}) + actions = append(actions, a) // If the payload is valid, check it against any received cert threshold. // Of course, this should only trigger for payloadVerified case. From 75a15c737ba1973dddf0cf191591c55d48180b59 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Fri, 16 Apr 2021 22:28:00 -0400 Subject: [PATCH 180/215] Unit test demonstrating high memory consumption (#2040) * go test -v ./ledger -run TestLedgerMemoryLeak * The test creates 980 NFTs per acct during 10000 rounds, creating new accounts when needed * Every 1000 full asset blocks it saves memory profile --- ledger/ledger_test.go | 147 +++++++++++++++++++++++++++++++++++++++++- 1 file changed, 146 insertions(+), 1 deletion(-) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index 54b2f0a729..bb1a94268a 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -19,6 +19,9 @@ package ledger import ( "context" "fmt" + "math/rand" + "os" + "runtime/pprof" "testing" "github.com/stretchr/testify/require" @@ -84,7 +87,7 @@ func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (gene for i := range genaddrs { initKeys[genaddrs[i]] = gensecrets[i] // Give each account quite a bit more balance than MinFee or MinBalance - initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + 100) * 100000)}) + initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + 10000000000) * 100000)}) } initKeys[poolAddr] = poolSecret initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567}) @@ -221,6 +224,25 @@ func (l *Ledger) appendUnvalidatedSignedTx(t *testing.T, initAccounts map[basics return l.appendUnvalidated(blk) } +func (l *Ledger) addBlockTxns(t *testing.T, accounts map[basics.Address]basics.AccountData, stxns []transactions.SignedTxn, ad transactions.ApplyData) error { + blk := makeNewEmptyBlock(t, l, t.Name(), accounts) + proto := config.Consensus[blk.CurrentProtocol] + for _, stx := range stxns { + txib, err := blk.EncodeSignedTxn(stx, ad) + if err != nil { + return fmt.Errorf("could not sign txn: %s", err.Error()) + } + if proto.TxnCounter { + blk.TxnCounter = blk.TxnCounter + 1 + } + blk.Payset = append(blk.Payset, txib) + } + var err error + blk.TxnRoot, err = blk.PaysetCommit() + require.NoError(t, err) + return l.AddBlock(blk, agreement.Certificate{}) +} + func TestLedgerBasic(t *testing.T) { genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) const inMem = true @@ -1554,3 +1576,126 @@ func TestListAssetsAndApplications(t *testing.T) { } require.Equal(t, appCount, len(results)) } + +func TestLedgerMemoryLeak(t *testing.T) { + t.Skip() // for manual runs only + dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + const inMem = false + cfg := config.GetDefaultLocal() + cfg.Archival = true + log := logging.TestingLog(t) + l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg) + require.NoError(t, err) + defer l.Close() + + maxBlocks := 10000 + nftPerAcct := make(map[basics.Address]int) + lastBlock, err := l.Block(l.Latest()) + proto := config.Consensus[lastBlock.CurrentProtocol] + accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts)+maxBlocks) + keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys)+maxBlocks) + // regular addresses: all init accounts minus pools + addresses := make([]basics.Address, len(genesisInitState.Accounts)-2, len(genesisInitState.Accounts)+maxBlocks) + i := 0 + for addr := range genesisInitState.Accounts { + if addr != testPoolAddr && addr != testSinkAddr { + addresses[i] = addr + i++ + } + accounts[addr] = genesisInitState.Accounts[addr] + keys[addr] = initKeys[addr] + } + + curAddressIdx := 0 + // run for maxBlocks rounds + // generate 1000 txn per block + for i := 0; i < maxBlocks; i++ { + stxns := make([]transactions.SignedTxn, 1000) + for j := 0; j < 1000; j++ { + txHeader := transactions.Header{ + Sender: addresses[curAddressIdx], + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: t.Name(), + GenesisHash: crypto.Hash([]byte(t.Name())), + } + + assetCreateFields := transactions.AssetConfigTxnFields{ + AssetParams: basics.AssetParams{ + Total: 10000000, + UnitName: fmt.Sprintf("unit_%d_%d", i, j), + AssetName: fmt.Sprintf("asset_%d_%d", i, j), + }, + } + + tx := transactions.Transaction{ + Type: protocol.AssetConfigTx, + Header: txHeader, + AssetConfigTxnFields: assetCreateFields, + } + stxns[j] = sign(initKeys, tx) + nftPerAcct[addresses[curAddressIdx]]++ + + if nftPerAcct[addresses[curAddressIdx]] >= 990 { + // switch to another account + if curAddressIdx == len(addresses)-1 { + // create new account + var seed crypto.Seed + seed[1] = byte(curAddressIdx % 256) + seed[2] = byte((curAddressIdx >> 8) % 256) + seed[3] = byte((curAddressIdx >> 16) % 256) + seed[4] = byte((curAddressIdx >> 24) % 256) + x := crypto.GenerateSignatureSecrets(seed) + addr := basics.Address(x.SignatureVerifier) + sender := addresses[rand.Intn(len(genesisInitState.Accounts)-2)] // one of init accounts + correctTxHeader := transactions.Header{ + Sender: sender, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: t.Name(), + GenesisHash: genesisInitState.GenesisHash, + } + + correctPayFields := transactions.PaymentTxnFields{ + Receiver: addr, + Amount: basics.MicroAlgos{Raw: 1000 * 1000000}, + } + + correctPay := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: correctTxHeader, + PaymentTxnFields: correctPayFields, + } + + err = l.appendUnvalidatedTx(t, accounts, keys, correctPay, transactions.ApplyData{}) + require.NoError(t, err) + ad, err := l.Lookup(l.Latest(), addr) + require.NoError(t, err) + + addresses = append(addresses, addr) + keys[addr] = x + accounts[addr] = ad + } + curAddressIdx++ + } + } + err = l.addBlockTxns(t, genesisInitState.Accounts, stxns, transactions.ApplyData{}) + require.NoError(t, err) + if i%100 == 0 { + l.WaitForCommit(l.Latest()) + fmt.Printf("block: %d\n", l.Latest()) + } + if i%1000 == 0 && i > 0 { + memprofile := fmt.Sprintf("%s-memprof-%d", t.Name(), i) + f, err := os.Create(memprofile) + require.NoError(t, err) + err = pprof.WriteHeapProfile(f) + require.NoError(t, err) + f.Close() + fmt.Printf("Profile %s created\n", memprofile) + } + } +} From 18d438785c12bd43731bff6f9fd35b44c8acbc48 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 19 Apr 2021 23:52:16 -0400 Subject: [PATCH 181/215] Print out intermediate decoding errors (#2084) Better errors on txn and dryrun objects decoding. --- cmd/tealdbg/dryrunRequest.go | 10 ++++++++-- cmd/tealdbg/local.go | 34 +++++++++++++++++++++++++++------- 2 files changed, 35 insertions(+), 9 deletions(-) diff --git a/cmd/tealdbg/dryrunRequest.go b/cmd/tealdbg/dryrunRequest.go index 760572528a..2963372fb4 100644 --- a/cmd/tealdbg/dryrunRequest.go +++ b/cmd/tealdbg/dryrunRequest.go @@ -17,6 +17,8 @@ package main import ( + "log" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/protocol" @@ -31,11 +33,15 @@ func ddrFromParams(dp *DebugParams) (ddr v2.DryrunRequest, err error) { } var gdr generatedV2.DryrunRequest - err = protocol.DecodeJSON(dp.DdrBlob, &gdr) - if err == nil { + err1 := protocol.DecodeJSON(dp.DdrBlob, &gdr) + if err1 == nil { ddr, err = v2.DryrunRequestFromGenerated(&gdr) } else { err = protocol.DecodeReflect(dp.DdrBlob, &ddr) + // if failed report intermediate decoding error + if err != nil { + log.Printf("Decoding as JSON DryrunRequest object failed: %s", err1.Error()) + } } return diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go index b47031d871..7dba943332 100644 --- a/cmd/tealdbg/local.go +++ b/cmd/tealdbg/local.go @@ -60,14 +60,14 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err // 1. Attempt json - a single transaction var txn transactions.SignedTxn - err = protocol.DecodeJSON(data, &txn) + err1 := protocol.DecodeJSON(data, &txn) if err == nil { txnGroup = append(txnGroup, txn) return } // 2. Attempt json - array of transactions - err = protocol.DecodeJSON(data, &txnGroup) + err2 := protocol.DecodeJSON(data, &txnGroup) if err == nil { return } @@ -87,6 +87,16 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err txnGroup = append(txnGroup, txn) } + // if conversion failed report all intermediate decoding errors + if err != nil { + if err1 != nil { + log.Printf("Decoding as JSON txn failed: %s", err1.Error()) + } + if err2 != nil { + log.Printf("Decoding as JSON txn group failed: %s", err2.Error()) + } + } + return } @@ -101,19 +111,19 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord, // 1. Attempt json - a single record var record basics.BalanceRecord - err = protocol.DecodeJSON(data, &record) - if err == nil { + err1 := protocol.DecodeJSON(data, &record) + if err1 == nil { records = append(records, record) return } // 2. Attempt json - a array of records - err = protocol.DecodeJSON(data, &records) - if err == nil { + err2 := protocol.DecodeJSON(data, &records) + if err2 == nil { return } - // 2. Attempt msgp - a array of records + // 3. Attempt msgp - a array of records dec := protocol.NewDecoderBytes(data) for { var record basics.BalanceRecord @@ -128,6 +138,16 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord, records = append(records, record) } + // if conversion failed report all intermediate decoding errors + if err != nil { + if err1 != nil { + log.Printf("Decoding as JSON record failed: %s", err1.Error()) + } + if err2 != nil { + log.Printf("Decoding as JSON array of records failed: %s", err2.Error()) + } + } + return } From ce5a0a1b0e26907d39ca9c4ea049ccb14bf467cd Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 22 Apr 2021 14:12:43 -0400 Subject: [PATCH 182/215] reduce verbosity of logged messages (#2083) This PR reduces the severity of two benign logged errors/warnings: 1. On the catchup service, the following warning was extremely common: ``` fetchAndWrite: block retrieval exceeded retry limit ``` This PR moves this to be a info messages rather than a warning message ( in most cases ), and extend it to include the round number. 2. On the AlgorandFullNode struct, we used to log a message like this: ``` AlgorandFullNode.loadParticipationKeys: cannot load db XXX.part: database is locked ``` This was caused by concurrent database access to the same part key database. This is a benign error : if we already updating the file, then we don't need to re-evaluate it's content. --- catchup/service.go | 17 ++++++++++++++++- node/node.go | 7 +++++++ util/db/dbutil.go | 6 ++++++ 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/catchup/service.go b/catchup/service.go index 5e364dd17d..bf0c2d5234 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -183,7 +183,22 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, // Stop retrying after a while. if i > catchupRetryLimit { - s.log.Errorf("fetchAndWrite: block retrieval exceeded retry limit") + loggedMessage := fmt.Sprintf("fetchAndWrite(%d): block retrieval exceeded retry limit", r) + if _, initialSync := s.IsSynchronizing(); initialSync { + // on the initial sync, it's completly expected that we won't be able to get all the "next" blocks. + // Therefore info should suffice. + s.log.Info(loggedMessage) + } else { + // On any subsequent sync, we migth be looking for multiple rounds into the future, so it's completly + // reasonable that we would fail retrieving the future block. + // Generate a warning here only if we're failing to retrieve X+1 or below. + // All other block retrievals should not generate a warning. + if r > s.ledger.NextRound() { + s.log.Info(loggedMessage) + } else { + s.log.Warn(loggedMessage) + } + } return false } diff --git a/node/node.go b/node/node.go index 760ea6b509..a47f55c420 100644 --- a/node/node.go +++ b/node/node.go @@ -734,6 +734,13 @@ func (node *AlgorandFullNode) loadParticipationKeys() error { // Fetch a handle to this database handle, err := node.getExistingPartHandle(filename) if err != nil { + if db.IsErrBusy(err) { + // this is a special case: + // we might get "database is locked" when we attempt to access a database that is conurrently updates it's participation keys. + // that database is clearly already on the account manager, and doesn't need to be processed through this logic, and therefore + // we can safely ignore that fail case. + continue + } return fmt.Errorf("AlgorandFullNode.loadParticipationKeys: cannot load db %v: %v", filename, err) } diff --git a/util/db/dbutil.go b/util/db/dbutil.go index cb7a92d7fc..76545ccedf 100644 --- a/util/db/dbutil.go +++ b/util/db/dbutil.go @@ -438,6 +438,12 @@ func dbretry(obj error) bool { return ok && (err.Code == sqlite3.ErrLocked || err.Code == sqlite3.ErrBusy) } +// IsErrBusy examine the input inerr varaible of type error and determine if it's a sqlite3 error for the ErrBusy error code. +func IsErrBusy(inerr error) bool { + err, ok := inerr.(sqlite3.Error) + return ok && (err.Code == sqlite3.ErrBusy) +} + type idemFn func(ctx context.Context, tx *sql.Tx) error const infoTxRetries = 5 From 60312fa558f36089ed0c90520eca0f49801f69ab Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 22 Apr 2021 17:00:16 -0400 Subject: [PATCH 183/215] catchup: add deep block validation support (#2088) Add a config key `CatchupBlockValidateMode` that would control which types of validation the catchup would perform prior to accept a block. The default validation remains the same as before. However, it allows the end-user to remove/add certain validation "toggles". This enable us to add a deep validation support, that perform a complete validation of a block. --- catchup/pref_test.go | 2 +- catchup/service.go | 79 +++++++++++++++--------- catchup/service_test.go | 26 +++++--- config/config.go | 43 ++++++++++++- config/local_defaults.go | 3 +- installer/config.json.example | 3 +- node/node.go | 2 +- test/testdata/configs/config-v17.json | 88 +++++++++++++++++++++++++++ 8 files changed, 205 insertions(+), 41 deletions(-) create mode 100644 test/testdata/configs/config-v17.json diff --git a/catchup/pref_test.go b/catchup/pref_test.go index 34b78352dd..2bbb32d109 100644 --- a/catchup/pref_test.go +++ b/catchup/pref_test.go @@ -64,7 +64,7 @@ func BenchmarkServiceFetchBlocks(b *testing.B) { require.NoError(b, err) // Make Service - syncer := MakeService(logging.TestingLog(b), defaultConfig, net, local, new(mockedAuthenticator), nil) + syncer := MakeService(logging.TestingLog(b), defaultConfig, net, local, new(mockedAuthenticator), nil, nil) b.StartTimer() syncer.Start() for w := 0; w < 1000; w++ { diff --git a/catchup/service.go b/catchup/service.go index bf0c2d5234..3de7809b91 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -28,11 +28,13 @@ import ( "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/ledger/ledgercore" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/logging/telemetryspec" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/util/execpool" ) const catchupPeersForSync = 10 @@ -56,21 +58,24 @@ type Ledger interface { LastRound() basics.Round Block(basics.Round) (bookkeeping.Block, error) IsWritingCatchpointFile() bool + Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error) + AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error } // Service represents the catchup service. Once started and until it is stopped, it ensures that the ledger is up to date with network. type Service struct { - syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops - cfg config.Local - ledger Ledger - ctx context.Context - cancel func() - done chan struct{} - log logging.Logger - net network.GossipNode - auth BlockAuthenticator - parallelBlocks uint64 - deadlineTimeout time.Duration + syncStartNS int64 // at top of struct to keep 64 bit aligned for atomic.* ops + cfg config.Local + ledger Ledger + ctx context.Context + cancel func() + done chan struct{} + log logging.Logger + net network.GossipNode + auth BlockAuthenticator + parallelBlocks uint64 + deadlineTimeout time.Duration + blockValidationPool execpool.BacklogPool // suspendForCatchpointWriting defines whether we've ran into a state where the ledger is currently busy writing the // catchpoint file. If so, we want to suspend the catchup process until the catchpoint file writing is complete, @@ -99,7 +104,7 @@ type BlockAuthenticator interface { } // MakeService creates a catchup service instance from its constituent components -func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate) (s *Service) { +func MakeService(log logging.Logger, config config.Local, net network.GossipNode, ledger Ledger, auth BlockAuthenticator, unmatchedPendingCertificates <-chan PendingUnmatchedCertificate, blockValidationPool execpool.BacklogPool) (s *Service) { s = &Service{} s.cfg = config @@ -110,6 +115,7 @@ func MakeService(log logging.Logger, config config.Local, net network.GossipNode s.log = log.With("Context", "sync") s.parallelBlocks = config.CatchupParallelBlocks s.deadlineTimeout = agreement.DeadlineTimeout() + s.blockValidationPool = blockValidationPool return s } @@ -237,16 +243,18 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, s.log.Debugf("fetchAndWrite(%v): Got block and cert contents: %v %v", r, block, cert) // Check that the block's contents match the block header (necessary with an untrusted block because b.Hash() only hashes the header) - if !block.ContentsMatchHeader() { - peerSelector.RankPeer(peer, peerRankInvalidDownload) - // Check if this mismatch is due to an unsupported protocol version - if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok { - s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol) - return false - } + if s.cfg.CatchupVerifyPaysetHash() { + if !block.ContentsMatchHeader() { + peerSelector.RankPeer(peer, peerRankInvalidDownload) + // Check if this mismatch is due to an unsupported protocol version + if _, ok := config.Consensus[block.BlockHeader.CurrentProtocol]; !ok { + s.log.Errorf("fetchAndWrite(%v): unsupported protocol version detected: '%v'", r, block.BlockHeader.CurrentProtocol) + return false + } - s.log.Warnf("fetchAndWrite(%v): block contents do not match header (attempt %d)", r, i) - continue // retry the fetch + s.log.Warnf("fetchAndWrite(%v): block contents do not match header (attempt %d)", r, i) + continue // retry the fetch + } } // make sure that we have the lookBack block that's required for authenticating this block @@ -262,12 +270,13 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, } } } - - err = s.auth.Authenticate(block, cert) - if err != nil { - s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err) - peerSelector.RankPeer(peer, peerRankInvalidDownload) - continue // retry the fetch + if s.cfg.CatchupVerifyCertificate() { + err = s.auth.Authenticate(block, cert) + if err != nil { + s.log.Warnf("fetchAndWrite(%v): cert did not authenticate block (attempt %d): %v", r, i, err) + peerSelector.RankPeer(peer, peerRankInvalidDownload) + continue // retry the fetch + } } peerRank := peerSelector.PeerDownloadDurationToRank(peer, blockDownloadDuration) @@ -296,7 +305,21 @@ func (s *Service) fetchAndWrite(r basics.Round, prevFetchCompleteChan chan bool, return false } - err = s.ledger.AddBlock(*block, *cert) + if s.cfg.CatchupVerifyTransactionSignatures() || s.cfg.CatchupVerifyApplyData() { + vb, err := s.ledger.Validate(s.ctx, *block, s.blockValidationPool) + if err != nil { + if s.ctx.Err() != nil { + // if the context expired, just exit. + return false + } + s.log.Warnf("fetchAndWrite(%d): failed to validate block : %v", r, err) + return false + } + err = s.ledger.AddValidatedBlock(*vb, *cert) + } else { + err = s.ledger.AddBlock(*block, *cert) + } + if err != nil { switch err.(type) { case ledgercore.BlockInLedgerError: diff --git a/catchup/service_test.go b/catchup/service_test.go index 6ed0d814fe..32e22c02c1 100644 --- a/catchup/service_test.go +++ b/catchup/service_test.go @@ -34,10 +34,12 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/committee" + "github.com/algorand/go-algorand/ledger" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/rpcs" + "github.com/algorand/go-algorand/util/execpool" ) var defaultConfig = config.GetDefaultLocal() @@ -150,7 +152,7 @@ func TestServiceFetchBlocksSameRange(t *testing.T) { net.addPeer(rootURL) // Make Service - syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) + syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil) syncer.testStart() syncer.sync() @@ -187,7 +189,7 @@ func TestPeriodicSync(t *testing.T) { require.True(t, 0 == initialLocalRound) // Make Service - s := MakeService(logging.Base(), defaultConfig, net, local, auth, nil) + s := MakeService(logging.Base(), defaultConfig, net, local, auth, nil, nil) s.deadlineTimeout = 2 * time.Second s.Start() @@ -236,7 +238,7 @@ func TestServiceFetchBlocksOneBlock(t *testing.T) { net.addPeer(rootURL) // Make Service - s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil) // Get last round @@ -298,7 +300,7 @@ func TestAbruptWrites(t *testing.T) { net.addPeer(rootURL) // Make Service - s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil) var wg sync.WaitGroup wg.Add(1) @@ -354,7 +356,7 @@ func TestServiceFetchBlocksMultiBlocks(t *testing.T) { net.addPeer(rootURL) // Make Service - syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil) + syncer := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil) fetcher := makeUniversalBlockFetcher(logging.Base(), net, defaultConfig) // Start the service ( dummy ) @@ -407,7 +409,7 @@ func TestServiceFetchBlocksMalformed(t *testing.T) { net.addPeer(rootURL) // Make Service - s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil, nil) // Start the service ( dummy ) s.testStart() @@ -554,7 +556,7 @@ func helperTestOnSwitchToUnSupportedProtocol( net.addPeer(rootURL) // Make Service - s := MakeService(logging.Base(), config, net, local, &mockedAuthenticator{errorRound: -1}, nil) + s := MakeService(logging.Base(), config, net, local, &mockedAuthenticator{errorRound: -1}, nil, nil) s.deadlineTimeout = 2 * time.Second s.Start() defer s.Stop() @@ -608,6 +610,14 @@ func (m *mockedLedger) AddBlock(blk bookkeeping.Block, cert agreement.Certificat return nil } +func (m *mockedLedger) Validate(ctx context.Context, blk bookkeeping.Block, executionPool execpool.BacklogPool) (*ledger.ValidatedBlock, error) { + return nil, nil +} + +func (m *mockedLedger) AddValidatedBlock(vb ledger.ValidatedBlock, cert agreement.Certificate) error { + return nil +} + func (m *mockedLedger) ConsensusParams(r basics.Round) (config.ConsensusParams, error) { m.mu.Lock() defer m.mu.Unlock() @@ -742,7 +752,7 @@ func TestCatchupUnmatchedCertificate(t *testing.T) { net.addPeer(rootURL) // Make Service - s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil) + s := MakeService(logging.Base(), defaultConfig, net, local, &mockedAuthenticator{errorRound: int(lastRoundAtStart + 1)}, nil, nil) s.testStart() for roundNumber := 2; roundNumber < 10; roundNumber += 3 { pc := &PendingUnmatchedCertificate{ diff --git a/config/config.go b/config/config.go index f9740aab6a..775f9ae0f7 100644 --- a/config/config.go +++ b/config/config.go @@ -63,7 +63,7 @@ type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. This field tag must be updated any time we add a new version. - Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16"` + Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17"` // environmental (may be overridden) // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks @@ -388,6 +388,20 @@ type Local struct { // BlockServiceCustomFallbackEndpoints is empty. // The archiver is randomly selected, if none is available, will return StatusNotFound (404). EnableBlockServiceFallbackToArchiver bool `version[16]:"true"` + + // CatchupBlockValidateMode is a development and testing configuration used by the catchup service. + // It can be used to omit certain validations to speed up the catchup process, or to apply extra validations which are redundant in normal operation. + // This field is a bit-field with: + // bit 0: (default 0) 0: verify the block certificate; 1: skip this validation + // bit 1: (default 0) 0: verify payset committed hash in block header matches payset hash; 1: skip this validation + // bit 2: (default 0) 0: don't verify the transaction signatures on the block are valid; 1: verify the transaction signatures on block + // bit 3: (default 0) 0: don't verify that the hash of the recomputed payset matches the hash of the payset committed in the block header; 1: do perform the above verification + // Note: not all permutations of the above bitset are currently functional. In particular, the ones that are functional are: + // 0 : default behavior. + // 3 : speed up catchup by skipping necessary validations + // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against + // previously used executabled, and would not provide any additional security guarantees. + CatchupBlockValidateMode int `version[17]:"0"` } // Filenames of config files within the configdir (e.g. ~/.algorand) @@ -636,3 +650,30 @@ func (cfg Local) DNSSecurityTelemeryAddrEnforced() bool { // ProposalAssemblyTime is the max amount of time to spend on generating a proposal block. This should eventually have it's own configurable value. const ProposalAssemblyTime time.Duration = 250 * time.Millisecond + +const ( + catchupValidationModeCertificate = 1 + catchupValidationModePaysetHash = 2 + catchupValidationModeVerifyTransactionSignatures = 4 + catchupValidationModeVerifyApplyData = 8 +) + +// CatchupVerifyCertificate returns true if certificate verification is needed +func (cfg Local) CatchupVerifyCertificate() bool { + return cfg.CatchupBlockValidateMode&catchupValidationModeCertificate == 0 +} + +// CatchupVerifyPaysetHash returns true if payset hash verification is needed +func (cfg Local) CatchupVerifyPaysetHash() bool { + return cfg.CatchupBlockValidateMode&catchupValidationModePaysetHash == 0 +} + +// CatchupVerifyTransactionSignatures returns true if transactions signature verification is needed +func (cfg Local) CatchupVerifyTransactionSignatures() bool { + return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyTransactionSignatures != 0 +} + +// CatchupVerifyApplyData returns true if verifying the ApplyData of the payset needed +func (cfg Local) CatchupVerifyApplyData() bool { + return cfg.CatchupBlockValidateMode&catchupValidationModeVerifyApplyData != 0 +} diff --git a/config/local_defaults.go b/config/local_defaults.go index f8b3f9ce41..f186363b93 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -20,7 +20,7 @@ package config var defaultLocal = Local{ - Version: 16, + Version: 17, AccountsRebuildSynchronousMode: 1, AnnounceParticipationKey: true, Archival: false, @@ -32,6 +32,7 @@ var defaultLocal = Local{ CatchpointInterval: 10000, CatchpointTracking: 0, CatchupBlockDownloadRetryAttempts: 1000, + CatchupBlockValidateMode: 0, CatchupFailurePeerRefreshRate: 10, CatchupGossipBlockFetchTimeoutSec: 4, CatchupHTTPBlockFetchTimeoutSec: 4, diff --git a/installer/config.json.example b/installer/config.json.example index e7201905da..2397cee029 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -1,5 +1,5 @@ { - "Version": 16, + "Version": 17, "AccountsRebuildSynchronousMode": 1, "AnnounceParticipationKey": true, "Archival": false, @@ -11,6 +11,7 @@ "CatchpointInterval": 10000, "CatchpointTracking": 0, "CatchupBlockDownloadRetryAttempts": 1000, + "CatchupBlockValidateMode": 0, "CatchupFailurePeerRefreshRate": 10, "CatchupGossipBlockFetchTimeoutSec": 4, "CatchupHTTPBlockFetchTimeoutSec": 4, diff --git a/node/node.go b/node/node.go index a47f55c420..92aad1e133 100644 --- a/node/node.go +++ b/node/node.go @@ -257,7 +257,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd node.agreementService = agreement.MakeService(agreementParameters) node.catchupBlockAuth = blockAuthenticatorImpl{Ledger: node.ledger, AsyncVoteVerifier: agreement.MakeAsyncVoteVerifier(node.lowPriorityCryptoVerificationPool)} - node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates) + node.catchupService = catchup.MakeService(node.log, node.config, p2pNode, node.ledger, node.catchupBlockAuth, agreementLedger.UnmatchedPendingCertificates, node.lowPriorityCryptoVerificationPool) node.txPoolSyncerService = rpcs.MakeTxSyncer(node.transactionPool, node.net, node.txHandler.SolicitedTxHandler(), time.Duration(cfg.TxSyncIntervalSeconds)*time.Second, time.Duration(cfg.TxSyncTimeoutSeconds)*time.Second, cfg.TxSyncServeResponseSize) err = node.loadParticipationKeys() diff --git a/test/testdata/configs/config-v17.json b/test/testdata/configs/config-v17.json new file mode 100644 index 0000000000..2397cee029 --- /dev/null +++ b/test/testdata/configs/config-v17.json @@ -0,0 +1,88 @@ +{ + "Version": 17, + "AccountsRebuildSynchronousMode": 1, + "AnnounceParticipationKey": true, + "Archival": false, + "BaseLoggerDebugLevel": 4, + "BlockServiceCustomFallbackEndpoints": "", + "BroadcastConnectionsLimit": -1, + "CadaverSizeTarget": 1073741824, + "CatchpointFileHistoryLength": 365, + "CatchpointInterval": 10000, + "CatchpointTracking": 0, + "CatchupBlockDownloadRetryAttempts": 1000, + "CatchupBlockValidateMode": 0, + "CatchupFailurePeerRefreshRate": 10, + "CatchupGossipBlockFetchTimeoutSec": 4, + "CatchupHTTPBlockFetchTimeoutSec": 4, + "CatchupLedgerDownloadRetryAttempts": 50, + "CatchupParallelBlocks": 16, + "ConnectionsRateLimitingCount": 60, + "ConnectionsRateLimitingWindowSeconds": 1, + "DNSBootstrapID": ".algorand.network", + "DNSSecurityFlags": 1, + "DeadlockDetection": 0, + "DisableLocalhostConnectionRateLimit": true, + "DisableOutgoingConnectionThrottling": false, + "EnableAgreementReporting": false, + "EnableAgreementTimeMetrics": false, + "EnableAssembleStats": false, + "EnableBlockService": false, + "EnableBlockServiceFallbackToArchiver": true, + "EnableCatchupFromArchiveServers": false, + "EnableDeveloperAPI": false, + "EnableGossipBlockService": true, + "EnableIncomingMessageFilter": false, + "EnableLedgerService": false, + "EnableMetricReporting": false, + "EnableOutgoingNetworkMessageFiltering": true, + "EnablePingHandler": true, + "EnableProcessBlockStats": false, + "EnableProfiler": false, + "EnableRequestLogger": false, + "EnableTopAccountsReporting": false, + "EndpointAddress": "127.0.0.1:0", + "FallbackDNSResolverAddress": "", + "ForceRelayMessages": false, + "GossipFanout": 4, + "IncomingConnectionsLimit": 10000, + "IncomingMessageFilterBucketCount": 5, + "IncomingMessageFilterBucketSize": 512, + "IsIndexerActive": false, + "LedgerSynchronousMode": 2, + "LogArchiveMaxAge": "", + "LogArchiveName": "node.archive.log", + "LogSizeLimit": 1073741824, + "MaxCatchpointDownloadDuration": 7200000000000, + "MaxConnectionsPerIP": 30, + "MinCatchpointFileDownloadBytesPerSecond": 20480, + "NetAddress": "", + "NetworkMessageTraceServer": "", + "NetworkProtocolVersion": "", + "NodeExporterListenAddress": ":9100", + "NodeExporterPath": "./node_exporter", + "OptimizeAccountsDatabaseOnStartup": false, + "OutgoingMessageFilterBucketCount": 3, + "OutgoingMessageFilterBucketSize": 128, + "PeerConnectionsUpdateInterval": 3600, + "PeerPingPeriodSeconds": 0, + "PriorityPeers": {}, + "PublicAddress": "", + "ReconnectTime": 60000000000, + "ReservedFDs": 256, + "RestReadTimeoutSeconds": 15, + "RestWriteTimeoutSeconds": 120, + "RunHosted": false, + "SuggestedFeeBlockHistory": 3, + "SuggestedFeeSlidingWindowSize": 50, + "TLSCertFile": "", + "TLSKeyFile": "", + "TelemetryToLog": true, + "TxPoolExponentialIncreaseFactor": 2, + "TxPoolSize": 15000, + "TxSyncIntervalSeconds": 60, + "TxSyncServeResponseSize": 1000000, + "TxSyncTimeoutSeconds": 30, + "UseXForwardedForAddressField": "", + "VerifiedTranscationsCacheSize": 30000 +} From 2b1f8df6bf4be7666e075443d726281577eb1166 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 22 Apr 2021 17:01:04 -0400 Subject: [PATCH 184/215] ledger: avoid producing empty local delta (#2086) Ensure local delta does not have entries with empty key-value stores --- config/consensus.go | 22 ++- ledger/appcow.go | 6 +- ledger/appcow_test.go | 87 +++++++++++ ledger/applications_test.go | 277 ++++++++++++++++++++++++++++++++++++ protocol/consensus.go | 7 +- 5 files changed, 395 insertions(+), 4 deletions(-) diff --git a/config/consensus.go b/config/consensus.go index 8c5200e0c5..bf1431c492 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -348,6 +348,9 @@ type ConsensusParams struct { // update the initial rewards rate calculation to take the reward pool minimum balance into account InitialRewardsRateCalculation bool + + // NoEmptyLocalDeltas updates how ApplyDelta.EvalDelta.LocalDeltas are stored + NoEmptyLocalDeltas bool } // PaysetCommitType enumerates possible ways for the block header to commit to @@ -872,9 +875,24 @@ func initConsensusProtocols() { v25.ApprovedUpgrades[protocol.ConsensusV26] = 140000 v24.ApprovedUpgrades[protocol.ConsensusV26] = 140000 + // v27 updates ApplyDelta.EvalDelta.LocalDeltas format + v27 := v26 + v27.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + + // Enable the ApplyDelta.EvalDelta.LocalDeltas fix + v27.NoEmptyLocalDeltas = true + + Consensus[protocol.ConsensusV27] = v27 + + // v26 can be upgraded to v27, with an update delay of 3 days + // 60279 = (3 * 24 * 60 * 60 / 4.3) + // for the sake of future manual calculations, we'll round that down + // a bit : + v26.ApprovedUpgrades[protocol.ConsensusV27] = 60000 + // ConsensusFuture is used to test features that are implemented // but not yet released in a production protocol version. - vFuture := v26 + vFuture := v27 vFuture.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} // FilterTimeout for period 0 should take a new optimized, configured value, need to revisit this later @@ -895,7 +913,7 @@ func initConsensusProtocols() { Consensus[protocol.ConsensusFuture] = vFuture } -// Global defines global Algorand protocol parameters which should not be overriden. +// Global defines global Algorand protocol parameters which should not be overridden. type Global struct { SmallLambda time.Duration // min amount of time to wait for leader's credential (i.e., time to propagate one credential) BigLambda time.Duration // max amount of time to wait for leader's proposal (i.e., time to propagate one block) diff --git a/ledger/appcow.go b/ledger/appcow.go index 0286bb6126..1d66deb92a 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -467,7 +467,11 @@ func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions. if err != nil { return basics.EvalDelta{}, err } - evalDelta.LocalDeltas[addrOffset] = sdelta.kvCow.serialize() + + d := sdelta.kvCow.serialize() + if !cb.proto.NoEmptyLocalDeltas || len(d) != 0 { + evalDelta.LocalDeltas[addrOffset] = d + } } } } diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go index df2affb017..936645e002 100644 --- a/ledger/appcow_test.go +++ b/ledger/appcow_test.go @@ -407,6 +407,7 @@ func TestCowBuildDelta(t *testing.T) { a.Contains(err.Error(), "could not find offset") a.Empty(ed) + // check pre v27 behavior txn.Sender = sender ed, err = cow.BuildEvalDelta(aidx, &txn) a.NoError(err) @@ -418,6 +419,18 @@ func TestCowBuildDelta(t *testing.T) { ed, ) + // check v27 behavior + cow.proto = config.Consensus[protocol.ConsensusCurrentVersion] + ed, err = cow.BuildEvalDelta(aidx, &txn) + a.NoError(err) + a.Equal( + basics.EvalDelta{ + GlobalDelta: basics.StateDelta{}, + LocalDeltas: map[uint64]basics.StateDelta{}, + }, + ed, + ) + // check actual serialization delete(cow.sdeltas[creator], storagePtr{aidx, true}) cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{ @@ -444,6 +457,80 @@ func TestCowBuildDelta(t *testing.T) { }, ed, ) + + // check empty sender delta (same key update) and non-empty others + delete(cow.sdeltas[sender], storagePtr{aidx, false}) + cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{ + action: remainAllocAction, + kvCow: stateDelta{ + "key1": valueDelta{ + old: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + new: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + oldExists: true, + newExists: true, + }, + }, + } + txn.Accounts = append(txn.Accounts, creator) + cow.sdeltas[creator][storagePtr{aidx, false}] = &storageDelta{ + action: remainAllocAction, + kvCow: stateDelta{ + "key2": valueDelta{ + old: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + new: basics.TealValue{Type: basics.TealUintType, Uint: 2}, + oldExists: true, + newExists: true, + }, + }, + } + + ed, err = cow.BuildEvalDelta(aidx, &txn) + a.NoError(err) + a.Equal( + basics.EvalDelta{ + GlobalDelta: basics.StateDelta(nil), + LocalDeltas: map[uint64]basics.StateDelta{ + 1: { + "key2": basics.ValueDelta{Action: basics.SetUintAction, Uint: 2}, + }, + }, + }, + ed, + ) + + // check two keys: empty change and value update + delete(cow.sdeltas[sender], storagePtr{aidx, false}) + delete(cow.sdeltas[creator], storagePtr{aidx, false}) + cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{ + action: remainAllocAction, + kvCow: stateDelta{ + "key1": valueDelta{ + old: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + new: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + oldExists: true, + newExists: true, + }, + "key2": valueDelta{ + old: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + new: basics.TealValue{Type: basics.TealUintType, Uint: 2}, + oldExists: true, + newExists: true, + }, + }, + } + ed, err = cow.BuildEvalDelta(aidx, &txn) + a.NoError(err) + a.Equal( + basics.EvalDelta{ + GlobalDelta: basics.StateDelta(nil), + LocalDeltas: map[uint64]basics.StateDelta{ + 0: { + "key2": basics.ValueDelta{Action: basics.SetUintAction, Uint: 2}, + }, + }, + }, + ed, + ) } func TestCowDeltaSerialize(t *testing.T) { diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 0e827940e9..0ce59341e1 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -567,3 +567,280 @@ return` }) a.NoError(err) } + +func TestAppAccountDelta(t *testing.T) { + a := require.New(t) + source := `#pragma version 2 +txn ApplicationID +int 0 +== +bnz success +// if no args then write local +// otherwise check args and write local or global +txn NumAppArgs +int 0 +== +bnz writelocal +txna ApplicationArgs 0 +byte "local" +== +bnz writelocal +txna ApplicationArgs 0 +byte "local1" +== +bnz writelocal1 +txna ApplicationArgs 0 +byte "global" +== +bnz writeglobal +int 0 +return +writelocal: +int 0 +byte "lk" +byte "local" +app_local_put +b success +writelocal1: +int 0 +byte "lk1" +byte "local1" +app_local_put +b success +writeglobal: +byte "gk" +byte "global" +app_global_put +success: +int 1 +return` + + ops, err := logic.AssembleString(source) + a.NoError(err) + a.Greater(len(ops.Program), 1) + program := ops.Program + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + + creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") + a.NoError(err) + userLocal, err := basics.UnmarshalChecksumAddress("UL5C6SRVLOROSB5FGAE6TY34VXPXVR7GNIELUB3DD5KTA4VT6JGOZ6WFAY") + a.NoError(err) + + a.Contains(genesisInitState.Accounts, creator) + a.Contains(genesisInitState.Accounts, userLocal) + + cfg := config.GetDefaultLocal() + l, err := OpenLedger(logging.Base(), "TestAppAccountDelta", true, genesisInitState, cfg) + a.NoError(err) + defer l.Close() + + genesisID := t.Name() + txHeader := transactions.Header{ + Sender: creator, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: genesisID, + GenesisHash: genesisInitState.GenesisHash, + } + + // create application + approvalProgram := program + clearStateProgram := []byte("\x02") // empty + appCreateFields := transactions.ApplicationCallTxnFields{ + ApprovalProgram: approvalProgram, + ClearStateProgram: clearStateProgram, + GlobalStateSchema: basics.StateSchema{NumByteSlice: 4}, + LocalStateSchema: basics.StateSchema{NumByteSlice: 2}, + } + appCreate := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCreateFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{}) + a.NoError(err) + + appIdx := basics.AppIndex(1) // first tnx => idx = 1 + + // opt-in, write to local + txHeader.Sender = userLocal + appCallFields := transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.OptInOC, + ApplicationID: appIdx, + } + appCall := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{ + EvalDelta: basics.EvalDelta{ + LocalDeltas: map[uint64]basics.StateDelta{0: {"lk": basics.ValueDelta{ + Action: basics.SetBytesAction, + Bytes: "local", + }}}, + }, + }) + a.NoError(err) + + txHeader.Sender = userLocal + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.NoOpOC, + ApplicationID: appIdx, + } + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{}) + a.NoError(err) + + // save data into DB and write into local state + l.accts.accountsWriting.Add(1) + l.accts.commitRound(3, 0, 0) + l.reloadLedger() + + // check first write + blk, err := l.Block(2) + a.NoError(err) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas, uint64(0)) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[0], "lk") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[0]["lk"].Bytes, "local") + expectedAD := transactions.ApplyData{} + dec, err := hex.DecodeString("81a2647481a26c64810081a26c6b82a2617401a26273a56c6f63616c") + a.NoError(err) + err = protocol.Decode(dec, &expectedAD) + a.NoError(err) + a.Equal(expectedAD, blk.Payset[0].ApplyData) + + // check repeated write + blk, err = l.Block(3) + a.NoError(err) + a.Empty(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas) + expectedAD = transactions.ApplyData{} + dec, err = hex.DecodeString("80") + a.NoError(err) + err = protocol.Decode(dec, &expectedAD) + a.NoError(err) + a.Equal(expectedAD, blk.Payset[0].ApplyData) + + txHeader.Sender = creator + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.NoOpOC, + ApplicationID: appIdx, + ApplicationArgs: [][]byte{[]byte("global")}, + } + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, + transactions.ApplyData{EvalDelta: basics.EvalDelta{ + GlobalDelta: basics.StateDelta{"gk": basics.ValueDelta{Action: basics.SetBytesAction, Bytes: "global"}}}, + }) + a.NoError(err) + + // repeat writing into global state + txHeader.Lease = [32]byte{1} + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{}) + a.NoError(err) + + // save data into DB + l.accts.accountsWriting.Add(1) + l.accts.commitRound(2, 3, 0) + l.reloadLedger() + + // check first write + blk, err = l.Block(4) + a.NoError(err) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.GlobalDelta, "gk") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.GlobalDelta["gk"].Bytes, "global") + expectedAD = transactions.ApplyData{} + dec, err = hex.DecodeString("81a2647481a2676481a2676b82a2617401a26273a6676c6f62616c") + a.NoError(err) + err = protocol.Decode(dec, &expectedAD) + a.NoError(err) + a.Equal(expectedAD, blk.Payset[0].ApplyData) + + // check repeated write + blk, err = l.Block(5) + a.NoError(err) + a.NotContains(blk.Payset[0].ApplyData.EvalDelta.GlobalDelta, "gk") + expectedAD = transactions.ApplyData{} + dec, err = hex.DecodeString("80") + a.NoError(err) + err = protocol.Decode(dec, &expectedAD) + a.NoError(err) + a.Equal(expectedAD, blk.Payset[0].ApplyData) + + // check same key update in the same block + txHeader.Sender = userLocal + txHeader.Lease = [32]byte{2} + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.NoOpOC, + ApplicationID: appIdx, + ApplicationArgs: [][]byte{[]byte("local1")}, + } + appCall1 := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + + txHeader.Lease = [32]byte{3} + appCall2 := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + + stx1 := sign(initKeys, appCall1) + stx2 := sign(initKeys, appCall2) + + blk = makeNewEmptyBlock(t, l, genesisID, genesisInitState.Accounts) + ad1 := transactions.ApplyData{ + EvalDelta: basics.EvalDelta{ + LocalDeltas: map[uint64]basics.StateDelta{0: {"lk1": basics.ValueDelta{ + Action: basics.SetBytesAction, + Bytes: "local1", + }}}, + }, + } + txib1, err := blk.EncodeSignedTxn(stx1, ad1) + a.NoError(err) + txib2, err := blk.EncodeSignedTxn(stx2, transactions.ApplyData{}) + a.NoError(err) + blk.TxnCounter = blk.TxnCounter + 2 + blk.Payset = append(blk.Payset, txib1, txib2) + blk.TxnRoot, err = blk.PaysetCommit() + a.NoError(err) + err = l.appendUnvalidated(blk) + a.NoError(err) + + // first txn has delta + blk, err = l.Block(6) + a.NoError(err) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas, uint64(0)) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[0], "lk1") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[0]["lk1"].Bytes, "local1") + expectedAD = transactions.ApplyData{} + dec, err = hex.DecodeString("81a2647481a26c64810081a36c6b3182a2617401a26273a66c6f63616c31") + a.NoError(err) + err = protocol.Decode(dec, &expectedAD) + a.NoError(err) + a.Equal(expectedAD, blk.Payset[0].ApplyData) + + // second txn does not have delta (same key/value update) + a.Empty(blk.Payset[1].ApplyData.EvalDelta.LocalDeltas) + a.Equal(transactions.ApplyData{}, blk.Payset[1].ApplyData) +} diff --git a/protocol/consensus.go b/protocol/consensus.go index 7b2a507931..02e6e1e262 100644 --- a/protocol/consensus.go +++ b/protocol/consensus.go @@ -143,6 +143,11 @@ const ConsensusV26 = ConsensusVersion( "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff", ) +// ConsensusV27 updates ApplyDelta.EvalDelta.LocalDeltas format +const ConsensusV27 = ConsensusVersion( + "https://github.com/algorandfoundation/specs/tree/d050b3cade6d5c664df8bd729bf219f179812595", +) + // ConsensusFuture is a protocol that should not appear in any production // network, but is used to test features before they are released. const ConsensusFuture = ConsensusVersion( @@ -155,7 +160,7 @@ const ConsensusFuture = ConsensusVersion( // ConsensusCurrentVersion is the latest version and should be used // when a specific version is not provided. -const ConsensusCurrentVersion = ConsensusV26 +const ConsensusCurrentVersion = ConsensusV27 // Error is used to indicate that an unsupported protocol has been detected. type Error ConsensusVersion From 74025a05f1f8e37afbf4e0b223aa37931550c5b0 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 22 Apr 2021 19:22:55 -0400 Subject: [PATCH 185/215] Prevent empty accounts stored in DB after opting out from an app (#2091) Fix for empty accounts stored in DB after app clear state or app removal --- ledger/appcow.go | 21 ++- ledger/appcow_test.go | 12 ++ ledger/applications_test.go | 279 ++++++++++++++++++++++++++++++++++++ 3 files changed, 305 insertions(+), 7 deletions(-) diff --git a/ledger/appcow.go b/ledger/appcow.go index 1d66deb92a..96262c3d2a 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -562,9 +562,12 @@ func applyStorageDelta(data basics.AccountData, aapp storagePtr, store *storageD // duplicate code in branches is proven to be a bit faster than // having basics.AppParams and basics.AppLocalState under a common interface with additional loops and type assertions if aapp.global { - owned := make(map[basics.AppIndex]basics.AppParams, len(data.AppParams)) - for k, v := range data.AppParams { - owned[k] = v + var owned map[basics.AppIndex]basics.AppParams + if len(data.AppParams) > 0 { + owned = make(map[basics.AppIndex]basics.AppParams, len(data.AppParams)) + for k, v := range data.AppParams { + owned[k] = v + } } switch store.action { @@ -600,9 +603,12 @@ func applyStorageDelta(data basics.AccountData, aapp storagePtr, store *storageD data.AppParams = owned } else { - owned := make(map[basics.AppIndex]basics.AppLocalState, len(data.AppLocalStates)) - for k, v := range data.AppLocalStates { - owned[k] = v + var owned map[basics.AppIndex]basics.AppLocalState + if len(data.AppLocalStates) > 0 { + owned = make(map[basics.AppIndex]basics.AppLocalState, len(data.AppLocalStates)) + for k, v := range data.AppLocalStates { + owned[k] = v + } } switch store.action { @@ -610,7 +616,8 @@ func applyStorageDelta(data basics.AccountData, aapp storagePtr, store *storageD delete(owned, aapp.aidx) case allocAction, remainAllocAction: // note: these should always exist because they were - // at least preceded by a call to Put? + // at least preceded by a call to Put (opting in), + // or the account has opted in before and local states are pre-allocated states, ok := owned[aapp.aidx] if !ok { return basics.AccountData{}, fmt.Errorf("could not find existing states for %v", aapp.aidx) diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go index 936645e002..162241fb1d 100644 --- a/ledger/appcow_test.go +++ b/ledger/appcow_test.go @@ -843,6 +843,18 @@ func TestApplyStorageDelta(t *testing.T) { data = applyAll(kv, &sdd) testDuplicateKeys(data.AppParams[1].GlobalState, data.AppParams[2].GlobalState) testDuplicateKeys(data.AppLocalStates[1].KeyValue, data.AppLocalStates[2].KeyValue) + + sd := storageDelta{action: deallocAction, kvCow: map[string]valueDelta{}} + data, err := applyStorageDelta(basics.AccountData{}, storagePtr{1, true}, &sd) + a.NoError(err) + a.Nil(data.AppParams) + a.Nil(data.AppLocalStates) + a.True(data.IsZero()) + data, err = applyStorageDelta(basics.AccountData{}, storagePtr{1, false}, &sd) + a.NoError(err) + a.Nil(data.AppParams) + a.Nil(data.AppLocalStates) + a.True(data.IsZero()) } func TestCowAllocated(t *testing.T) { diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 0ce59341e1..d208e33009 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -844,3 +844,282 @@ return` a.Empty(blk.Payset[1].ApplyData.EvalDelta.LocalDeltas) a.Equal(transactions.ApplyData{}, blk.Payset[1].ApplyData) } + +func TestAppEmptyAccountsLocal(t *testing.T) { + a := require.New(t) + source := `#pragma version 2 +txn ApplicationID +int 0 +== +bnz success +int 0 +byte "lk" +byte "local" +app_local_put +success: +int 1 +return` + + ops, err := logic.AssembleString(source) + a.NoError(err) + a.Greater(len(ops.Program), 1) + program := ops.Program + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + + creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") + a.NoError(err) + userLocal, err := basics.UnmarshalChecksumAddress("UL5C6SRVLOROSB5FGAE6TY34VXPXVR7GNIELUB3DD5KTA4VT6JGOZ6WFAY") + a.NoError(err) + + a.Contains(genesisInitState.Accounts, creator) + a.Contains(genesisInitState.Accounts, userLocal) + + cfg := config.GetDefaultLocal() + l, err := OpenLedger(logging.Base(), "TestAppEmptyAccounts", true, genesisInitState, cfg) + a.NoError(err) + defer l.Close() + + genesisID := t.Name() + txHeader := transactions.Header{ + Sender: creator, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: genesisID, + GenesisHash: genesisInitState.GenesisHash, + } + + // create application + approvalProgram := program + clearStateProgram := []byte("\x02") // empty + appCreateFields := transactions.ApplicationCallTxnFields{ + ApprovalProgram: approvalProgram, + ClearStateProgram: clearStateProgram, + GlobalStateSchema: basics.StateSchema{NumByteSlice: 4}, + LocalStateSchema: basics.StateSchema{NumByteSlice: 2}, + } + appCreate := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCreateFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{}) + a.NoError(err) + + appIdx := basics.AppIndex(1) // first tnx => idx = 1 + + // opt-in, write to local + txHeader.Sender = userLocal + appCallFields := transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.OptInOC, + ApplicationID: appIdx, + } + appCall := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{ + EvalDelta: basics.EvalDelta{ + LocalDeltas: map[uint64]basics.StateDelta{0: {"lk": basics.ValueDelta{ + Action: basics.SetBytesAction, + Bytes: "local", + }}}, + }, + }) + a.NoError(err) + + // close out + txHeader.Sender = userLocal + appCallFields = transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.CloseOutOC, + ApplicationID: appIdx, + } + appCall = transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + paymentFields := transactions.PaymentTxnFields{ + Amount: basics.MicroAlgos{Raw: 0}, + Receiver: creator, + CloseRemainderTo: creator, + } + payment := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: txHeader, + PaymentTxnFields: paymentFields, + } + + data := genesisInitState.Accounts[userLocal] + balance := basics.MicroAlgos{Raw: data.MicroAlgos.Raw - txHeader.Fee.Raw*3} + stx1 := sign(initKeys, appCall) + stx2 := sign(initKeys, payment) + + blk := makeNewEmptyBlock(t, l, genesisID, genesisInitState.Accounts) + txib1, err := blk.EncodeSignedTxn(stx1, transactions.ApplyData{}) + a.NoError(err) + txib2, err := blk.EncodeSignedTxn(stx2, transactions.ApplyData{ClosingAmount: balance}) + a.NoError(err) + blk.TxnCounter = blk.TxnCounter + 2 + blk.Payset = append(blk.Payset, txib1, txib2) + blk.TxnRoot, err = blk.PaysetCommit() + a.NoError(err) + err = l.appendUnvalidated(blk) + a.NoError(err) + + // save data into DB and write into local state + l.accts.accountsWriting.Add(1) + l.accts.commitRound(3, 0, 0) + l.reloadLedger() + + // check first write + blk, err = l.Block(2) + a.NoError(err) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas, uint64(0)) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[0], "lk") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[0]["lk"].Bytes, "local") + + // check close out + blk, err = l.Block(3) + a.NoError(err) + a.Empty(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas) + + pad, err := l.accts.accountsq.lookup(userLocal) + a.NoError(err) + a.Equal(basics.AccountData{}, pad.accountData) + a.Zero(pad.rowid) +} + +func TestAppEmptyAccountsGlobal(t *testing.T) { + a := require.New(t) + source := `#pragma version 2 +txn ApplicationID +int 0 +== +bnz success +byte "gk" +byte "global" +app_global_put +success: +int 1 +return` + + ops, err := logic.AssembleString(source) + a.NoError(err) + a.Greater(len(ops.Program), 1) + program := ops.Program + + proto := config.Consensus[protocol.ConsensusCurrentVersion] + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + + creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") + a.NoError(err) + userLocal, err := basics.UnmarshalChecksumAddress("UL5C6SRVLOROSB5FGAE6TY34VXPXVR7GNIELUB3DD5KTA4VT6JGOZ6WFAY") + a.NoError(err) + + a.Contains(genesisInitState.Accounts, creator) + a.Contains(genesisInitState.Accounts, userLocal) + + cfg := config.GetDefaultLocal() + l, err := OpenLedger(logging.Base(), "TestAppEmptyAccounts", true, genesisInitState, cfg) + a.NoError(err) + defer l.Close() + + genesisID := t.Name() + txHeader := transactions.Header{ + Sender: creator, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: genesisID, + GenesisHash: genesisInitState.GenesisHash, + } + + // create application + approvalProgram := program + clearStateProgram := []byte("\x02") // empty + appCreateFields := transactions.ApplicationCallTxnFields{ + ApprovalProgram: approvalProgram, + ClearStateProgram: clearStateProgram, + GlobalStateSchema: basics.StateSchema{NumByteSlice: 4}, + LocalStateSchema: basics.StateSchema{NumByteSlice: 2}, + } + appCreate := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCreateFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{}) + a.NoError(err) + + appIdx := basics.AppIndex(1) // first tnx => idx = 1 + + // destoy the app + txHeader.Sender = creator + appCallFields := transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.DeleteApplicationOC, + ApplicationID: appIdx, + } + appCall := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + paymentFields := transactions.PaymentTxnFields{ + Amount: basics.MicroAlgos{Raw: 0}, + Receiver: userLocal, + CloseRemainderTo: userLocal, + } + payment := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: txHeader, + PaymentTxnFields: paymentFields, + } + + data := genesisInitState.Accounts[creator] + balance := basics.MicroAlgos{Raw: data.MicroAlgos.Raw - txHeader.Fee.Raw*3} + stx1 := sign(initKeys, appCall) + stx2 := sign(initKeys, payment) + + blk := makeNewEmptyBlock(t, l, genesisID, genesisInitState.Accounts) + txib1, err := blk.EncodeSignedTxn(stx1, transactions.ApplyData{EvalDelta: basics.EvalDelta{ + GlobalDelta: basics.StateDelta{ + "gk": basics.ValueDelta{Action: basics.SetBytesAction, Bytes: "global"}, + }}, + }) + a.NoError(err) + txib2, err := blk.EncodeSignedTxn(stx2, transactions.ApplyData{ClosingAmount: balance}) + a.NoError(err) + blk.TxnCounter = blk.TxnCounter + 2 + blk.Payset = append(blk.Payset, txib1, txib2) + blk.TxnRoot, err = blk.PaysetCommit() + a.NoError(err) + err = l.appendUnvalidated(blk) + a.NoError(err) + + // save data into DB and write into local state + l.accts.accountsWriting.Add(1) + l.accts.commitRound(2, 0, 0) + l.reloadLedger() + + // check first write + blk, err = l.Block(1) + a.NoError(err) + a.Nil(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas) + a.Nil(blk.Payset[0].ApplyData.EvalDelta.GlobalDelta) + + // check deletion out + blk, err = l.Block(2) + a.NoError(err) + a.Nil(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.GlobalDelta, "gk") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.GlobalDelta["gk"].Bytes, "global") + + pad, err := l.accts.accountsq.lookup(creator) + a.NoError(err) + a.Equal(basics.AccountData{}, pad.accountData) + a.Zero(pad.rowid) +} From a5994a38b767204d9c254177b126f1596e5b8eb6 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 22 Apr 2021 23:56:37 -0400 Subject: [PATCH 186/215] Fix initial balances in ledger tests (#2092) * Some tests compare data against old released behavior and increasing initial balance breaks hard-coded records * Fix copy-paste error in tealdbg input parser --- cmd/tealdbg/local.go | 4 ++-- ledger/accountdb_test.go | 2 +- ledger/applications_test.go | 8 ++++---- ledger/catchupaccessor_test.go | 2 +- ledger/ledger_test.go | 26 +++++++++++++------------- 5 files changed, 21 insertions(+), 21 deletions(-) diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go index 7dba943332..f8edad06d7 100644 --- a/cmd/tealdbg/local.go +++ b/cmd/tealdbg/local.go @@ -61,14 +61,14 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err // 1. Attempt json - a single transaction var txn transactions.SignedTxn err1 := protocol.DecodeJSON(data, &txn) - if err == nil { + if err1 == nil { txnGroup = append(txnGroup, txn) return } // 2. Attempt json - array of transactions err2 := protocol.DecodeJSON(data, &txnGroup) - if err == nil { + if err2 == nil { return } diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go index 707117ca48..9cbb3060fe 100644 --- a/ledger/accountdb_test.go +++ b/ledger/accountdb_test.go @@ -1052,7 +1052,7 @@ func TestAccountsDbQueriesCreateClose(t *testing.T) { func benchmarkWriteCatchpointStagingBalancesSub(b *testing.B, ascendingOrder bool) { proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion) + genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100) const inMem = false log := logging.TestingLog(b) cfg := config.GetDefaultLocal() diff --git a/ledger/applications_test.go b/ledger/applications_test.go index d208e33009..256b17fc31 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -400,7 +400,7 @@ return` program := ops.Program proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") a.NoError(err) @@ -621,7 +621,7 @@ return` program := ops.Program proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") a.NoError(err) @@ -866,7 +866,7 @@ return` program := ops.Program proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") a.NoError(err) @@ -1013,7 +1013,7 @@ return` program := ops.Program proto := config.Consensus[protocol.ConsensusCurrentVersion] - genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") a.NoError(err) diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go index a0f28b37d4..96d7e1b5f0 100644 --- a/ledger/catchupaccessor_test.go +++ b/ledger/catchupaccessor_test.go @@ -36,7 +36,7 @@ import ( ) func benchmarkRestoringFromCatchpointFileHelper(b *testing.B) { - genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion) + genesisInitState, _ := testGenerateInitState(b, protocol.ConsensusCurrentVersion, 100) const inMem = false log := logging.TestingLog(b) cfg := config.GetDefaultLocal() diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index bb1a94268a..c232ecaefa 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -66,7 +66,7 @@ func sign(secrets map[basics.Address]*crypto.SignatureSecrets, t transactions.Tr } } -func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) { +func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion, baseAlgoPerAccount int) (genesisInitState InitState, initKeys map[basics.Address]*crypto.SignatureSecrets) { params := config.Consensus[proto] poolAddr := testPoolAddr sinkAddr := testSinkAddr @@ -87,7 +87,7 @@ func testGenerateInitState(tb testing.TB, proto protocol.ConsensusVersion) (gene for i := range genaddrs { initKeys[genaddrs[i]] = gensecrets[i] // Give each account quite a bit more balance than MinFee or MinBalance - initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + 10000000000) * 100000)}) + initAccounts[genaddrs[i]] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: uint64((i + baseAlgoPerAccount) * 100000)}) } initKeys[poolAddr] = poolSecret initAccounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, basics.MicroAlgos{Raw: 1234567}) @@ -244,7 +244,7 @@ func (l *Ledger) addBlockTxns(t *testing.T, accounts map[basics.Address]basics.A } func TestLedgerBasic(t *testing.T) { - genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true @@ -257,7 +257,7 @@ func TestLedgerBasic(t *testing.T) { func TestLedgerBlockHeaders(t *testing.T) { a := require.New(t) - genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true @@ -401,7 +401,7 @@ func TestLedgerSingleTx(t *testing.T) { // V15 is the earliest protocol version in active use. // The genesis for betanet and testnet is at V15 // The genesis for mainnet is at V17 - genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV15) + genesisInitState, initSecrets := testGenerateInitState(t, protocol.ConsensusV15, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -599,7 +599,7 @@ func TestLedgerSingleTxV24(t *testing.T) { a := require.New(t) protoName := protocol.ConsensusV24 - genesisInitState, initSecrets := testGenerateInitState(t, protoName) + genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -768,7 +768,7 @@ func TestLedgerAppCrossRoundWrites(t *testing.T) { a := require.New(t) protoName := protocol.ConsensusV24 - genesisInitState, initSecrets := testGenerateInitState(t, protoName) + genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -904,7 +904,7 @@ func TestLedgerAppMultiTxnWrites(t *testing.T) { a := require.New(t) protoName := protocol.ConsensusV24 - genesisInitState, initSecrets := testGenerateInitState(t, protoName) + genesisInitState, initSecrets := testGenerateInitState(t, protoName, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -1069,7 +1069,7 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil) defer backlogPool.Shutdown() - genesisInitState, initSecrets := testGenerateInitState(t, version) + genesisInitState, initSecrets := testGenerateInitState(t, version, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -1324,7 +1324,7 @@ func TestLedgerRegressionFaultyLeaseFirstValidCheckFuture(t *testing.T) { func testLedgerRegressionFaultyLeaseFirstValidCheck2f3880f7(t *testing.T, version protocol.ConsensusVersion) { a := require.New(t) - genesisInitState, initSecrets := testGenerateInitState(t, version) + genesisInitState, initSecrets := testGenerateInitState(t, version, 100) const inMem = true cfg := config.GetDefaultLocal() cfg.Archival = true @@ -1445,7 +1445,7 @@ func TestLedgerReload(t *testing.T) { func TestGetLastCatchpointLabel(t *testing.T) { //initLedger - genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -1523,7 +1523,7 @@ func TestListAssetsAndApplications(t *testing.T) { numElementsPerSegement := 10 // This is multiplied by 10. see randomCreatables //initLedger - genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, _ := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 100) const inMem = true log := logging.TestingLog(t) cfg := config.GetDefaultLocal() @@ -1580,7 +1580,7 @@ func TestListAssetsAndApplications(t *testing.T) { func TestLedgerMemoryLeak(t *testing.T) { t.Skip() // for manual runs only dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) - genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion) + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusCurrentVersion, 10000000000) const inMem = false cfg := config.GetDefaultLocal() cfg.Archival = true From cee0bec485f0b677a4566ab6cf69d2eeaf5dca59 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Thu, 22 Apr 2021 23:57:24 -0400 Subject: [PATCH 187/215] Elimitate empty AccountData entries from DB (#2093) Delete empty entries, update merkleTrie hashes. --- ledger/accountdb.go | 41 +++++++++++++++++++++++++++++- ledger/acctupdates.go | 59 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 99 insertions(+), 1 deletion(-) diff --git a/ledger/accountdb.go b/ledger/accountdb.go index fd33308853..a7d3aae4fd 100644 --- a/ledger/accountdb.go +++ b/ledger/accountdb.go @@ -114,7 +114,7 @@ var accountsResetExprs = []string{ // accountDBVersion is the database version that this binary would know how to support and how to upgrade to. // details about the content of each of the versions can be found in the upgrade functions upgradeDatabaseSchemaXXXX // and their descriptions. -var accountDBVersion = int32(4) +var accountDBVersion = int32(5) // persistedAccountData is used for representing a single account stored on the disk. In addition to the // basics.AccountData, it also stores complete referencing information used to maintain the base accounts @@ -631,6 +631,45 @@ func accountsAddNormalizedBalance(tx *sql.Tx, proto config.ConsensusParams) erro return rows.Err() } +// removeEmptyAccountData removes empty AccountData msgp-encoded entries from accountbase table +// and optionally returns list of addresses that were eliminated +func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, addresses []basics.Address, err error) { + if queryAddresses { + rows, err := tx.Query("SELECT address FROM accountbase where length(data) = 1 and data = x'80'") // empty AccountData is 0x80 + if err != nil { + return 0, nil, err + } + defer rows.Close() + + for rows.Next() { + var addrbuf []byte + err = rows.Scan(&addrbuf) + if err != nil { + return 0, nil, err + } + var addr basics.Address + if len(addrbuf) != len(addr) { + err = fmt.Errorf("Account DB address length mismatch: %d != %d", len(addrbuf), len(addr)) + return 0, nil, err + } + copy(addr[:], addrbuf) + addresses = append(addresses, addr) + } + } + + result, err := tx.Exec("DELETE from accountbase where length(data) = 1 and data = x'80'") + if err != nil { + return 0, nil, err + } + num, err = result.RowsAffected() + if err != nil { + // something wrong on getting rows count but data deleted, ignore the error + num = int64(len(addresses)) + err = nil + } + return num, addresses, err +} + // accountDataToOnline returns the part of the AccountData that matters // for online accounts (to answer top-N queries). We store a subset of // the full AccountData because we need to store a large number of these diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 9c6f15e04c..d5a3cc6628 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1232,6 +1232,12 @@ func (au *accountUpdates) accountsInitialize(ctx context.Context, tx *sql.Tx) (b au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 3 : %v", err) return 0, err } + case 4: + dbVersion, err = au.upgradeDatabaseSchema4(ctx, tx) + if err != nil { + au.log.Warnf("accountsInitialize failed to upgrade accounts database (ledger.tracker.sqlite) from schema 4 : %v", err) + return 0, err + } default: return 0, fmt.Errorf("accountsInitialize unable to upgrade database from schema version %d", dbVersion) } @@ -1487,6 +1493,59 @@ func (au *accountUpdates) upgradeDatabaseSchema3(ctx context.Context, tx *sql.Tx return 4, nil } +// upgradeDatabaseSchema4 does not change the schema but migrates data: +// remove empty AccountData entries from accountbase table +func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx) (updatedDBVersion int32, err error) { + + queryAddresses := au.catchpointInterval != 0 + numDeleted, addresses, err := removeEmptyAccountData(tx, queryAddresses) + if err != nil { + return 0, err + } + + if queryAddresses { + mc, err := MakeMerkleCommitter(tx, false) + if err != nil { + // at this point record deleted and DB is pruned for account data + // if hash deletion fails just log it and do not about startup + au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err) + goto done + } + trie, err := merkletrie.MakeTrie(mc, TrieMemoryConfig) + if err != nil { + au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle trie: %v", err) + goto done + } + + var totalHashesDeleted int + for _, addr := range addresses { + hash := accountHashBuilder(addr, basics.AccountData{}, []byte{0x80}) + deleted, err := trie.Delete(hash) + if err != nil { + au.log.Errorf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v: %v", hex.EncodeToString(hash), addr, err) + } else { + if !deleted { + au.log.Warnf("upgradeDatabaseSchema4: failed to delete hash '%s' from merkle trie for account %v", hex.EncodeToString(hash), addr) + } else { + totalHashesDeleted++ + } + } + } + trie.Commit() + au.log.Infof("upgradeDatabaseSchema4: deleted %d hashes", totalHashesDeleted) + } + +done: + au.log.Infof("upgradeDatabaseSchema4: deleted %d rows", numDeleted) + + // update version + _, err = db.SetUserVersion(ctx, tx, 5) + if err != nil { + return 0, fmt.Errorf("accountsInitialize unable to update database schema version from 4 to 5: %v", err) + } + return 5, nil +} + // deleteStoredCatchpoints iterates over the storedcatchpoints table and deletes all the files stored on disk. // once all the files have been deleted, it would go ahead and remove the entries from the table. func (au *accountUpdates) deleteStoredCatchpoints(ctx context.Context, dbQueries *accountsDbQueries) (err error) { From 0b885fe31be3cc0188e0262ea4653ecac5060a9c Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 23 Apr 2021 14:33:22 -0400 Subject: [PATCH 188/215] ledger: fix consensus version flushes incorrectly (#2096) Make sure mutex is released in commitRound. provide correct handling for zero-offset changes. --- ledger/acctupdates.go | 39 ++++++++++++++++++++++++++------------ ledger/acctupdates_test.go | 28 +++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 12 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index d5a3cc6628..ddbd1ef7f4 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -712,18 +712,7 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b offset = uint64(newBase - au.dbRound) - // check if this update chunk spans across multiple consensus versions. If so, break it so that each update would tackle only a single - // consensus version. - if au.versions[1] != au.versions[offset] { - // find the tip point. - tipPoint := sort.Search(int(offset), func(i int) bool { - // we're going to search here for version inequality, with the assumption that consensus versions won't repeat. - // that allow us to support [ver1, ver1, ..., ver2, ver2, ..., ver3, ver3] but not [ver1, ver1, ..., ver2, ver2, ..., ver1, ver3]. - return au.versions[1] != au.versions[1+i] - }) - // no need to handle the case of "no found", or tipPoint==int(offset), since we already know that it's there. - offset = uint64(tipPoint) - } + offset = au.consecutiveVersion(offset) // check to see if this is a catchpoint round isCatchpointRound = ((offset + uint64(lookback+au.dbRound)) > 0) && (au.catchpointInterval != 0) && (0 == (uint64((offset + uint64(lookback+au.dbRound))) % au.catchpointInterval)) @@ -759,6 +748,22 @@ func (au *accountUpdates) committedUpTo(committedRound basics.Round) (retRound b return } +func (au *accountUpdates) consecutiveVersion(offset uint64) uint64 { + // check if this update chunk spans across multiple consensus versions. If so, break it so that each update would tackle only a single + // consensus version. + if au.versions[1] != au.versions[offset] { + // find the tip point. + tipPoint := sort.Search(int(offset), func(i int) bool { + // we're going to search here for version inequality, with the assumption that consensus versions won't repeat. + // that allow us to support [ver1, ver1, ..., ver2, ver2, ..., ver3, ver3] but not [ver1, ver1, ..., ver2, ver2, ..., ver1, ver3]. + return au.versions[1] != au.versions[1+i] + }) + // no need to handle the case of "no found", or tipPoint==int(offset), since we already know that it's there. + offset = uint64(tipPoint) + } + return offset +} + // newBlock is the accountUpdates implementation of the ledgerTracker interface. This is the "external" facing function // which invokes the internal implementation after taking the lock. func (au *accountUpdates) newBlock(blk bookkeeping.Block, delta ledgercore.StateDelta) { @@ -2045,6 +2050,15 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb // adjust the offset according to what happened meanwhile.. offset -= uint64(au.dbRound - dbRound) + + // if this iteration need to flush out zero rounds, just return right away. + // this usecase can happen when two subsequent calls to committedUpTo concludes that the same rounds range need to be + // flush, without the commitRound have a chance of committing these rounds. + if offset == 0 { + au.accountsMu.RUnlock() + return + } + dbRound = au.dbRound newBase := basics.Round(offset) + dbRound @@ -2061,6 +2075,7 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb // verify version correctness : all the entries in the au.versions[1:offset+1] should have the *same* version, and the committedUpTo should be enforcing that. if au.versions[1] != au.versions[offset] { + au.accountsMu.RUnlock() au.log.Errorf("attempted to commit series of rounds with non-uniform consensus versions") return } diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index f6b422e5ec..9657d9ce01 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -1892,3 +1892,31 @@ func TestSplittingConsensusVersionCommitsBoundry(t *testing.T) { au.waitAccountsWriting() require.Equal(t, basics.Round(initialRounds+2*extraRounds), au.dbRound) } + +// TestConsecutiveVersion tests the consecutiveVersion method correctness. +func TestConsecutiveVersion(t *testing.T) { + var au accountUpdates + au.versions = []protocol.ConsensusVersion{ + protocol.ConsensusV19, + protocol.ConsensusV20, + protocol.ConsensusV20, + protocol.ConsensusV20, + protocol.ConsensusV20, + protocol.ConsensusV21, + protocol.ConsensusV21, + protocol.ConsensusV21, + protocol.ConsensusV21, + protocol.ConsensusV21, + protocol.ConsensusV21, + protocol.ConsensusV22, + } + for offset := uint64(1); offset < uint64(len(au.versions)); offset++ { + co := au.consecutiveVersion(offset) + require.Equal(t, au.versions[1], au.versions[co]) + } + au.versions = []protocol.ConsensusVersion{ + protocol.ConsensusV19, + protocol.ConsensusV20, + protocol.ConsensusV21, + } +} From ba97924b8a400cb18e191317f4d2be3a696a6354 Mon Sep 17 00:00:00 2001 From: shiqizng <80276844+shiqizng@users.noreply.github.com> Date: Fri, 23 Apr 2021 14:48:35 -0400 Subject: [PATCH 189/215] Copy database files to nodes (#2094) This PR enables database files generation in create_and_deploy_recipe.sh and upload them to S3, and copies these files to all nodes during algonet apply update when available. --- cmd/netgoal/network.go | 9 ++-- netdeploy/remote/nodecfg/nodeConfigurator.go | 44 +++++++++++++++++--- scripts/create_and_deploy_recipe.sh | 6 ++- 3 files changed, 49 insertions(+), 10 deletions(-) diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go index 199460c249..05726f8397 100644 --- a/cmd/netgoal/network.go +++ b/cmd/netgoal/network.go @@ -142,16 +142,19 @@ func runBuildNetwork() (err error) { net.GenesisData.VersionModifier = networkGenesisVersionModifier } - if bootstrapLoadingFile { - fileTemplate, err := remote.LoadBootstrappedData(resolveFile(r.BootstrappedFile, templateBaseDir)) + bootstrappedFile := resolveFile(r.BootstrappedFile, templateBaseDir) + if util.FileExists(bootstrappedFile) && bootstrapLoadingFile { + fileTemplate, err := remote.LoadBootstrappedData(bootstrappedFile) if err != nil { return fmt.Errorf("error resolving bootstrap file: %v", err) } net.BootstrappedNet = fileTemplate + net.SetUseBoostrappedFiles(bootstrapLoadingFile) + } else { + net.SetUseBoostrappedFiles(false) } net.SetUseExistingGenesisFiles(networkUseGenesisFiles) - net.SetUseBoostrappedFiles(bootstrapLoadingFile) err = net.Validate(buildConfig, networkRootDir) if err != nil { return fmt.Errorf("error validating Network Config file: %v", err) diff --git a/netdeploy/remote/nodecfg/nodeConfigurator.go b/netdeploy/remote/nodecfg/nodeConfigurator.go index 09789cba44..acca0a5929 100644 --- a/netdeploy/remote/nodecfg/nodeConfigurator.go +++ b/netdeploy/remote/nodecfg/nodeConfigurator.go @@ -33,12 +33,14 @@ import ( ) type nodeConfigurator struct { - config remote.HostConfig - dnsName string - genesisFile string - genesisData bookkeeping.Genesis - relayEndpoints []srvEntry - metricsEndpoints []srvEntry + config remote.HostConfig + dnsName string + genesisFile string + genesisData bookkeeping.Genesis + bootstrappedBlockFile string + bootstrappedTrackerFile string + relayEndpoints []srvEntry + metricsEndpoints []srvEntry } type srvEntry struct { @@ -63,6 +65,19 @@ func ApplyConfigurationToHost(cfg remote.HostConfig, rootConfigDir, rootNodeDir // Copy node directories from configuration folder to the rootNodeDir // Then configure func (nc *nodeConfigurator) apply(rootConfigDir, rootNodeDir string) (err error) { + + blockFile := filepath.Join(rootConfigDir, "genesisdata", "bootstrapped.block.sqlite") + blockFileExists := util.FileExists(blockFile) + if blockFileExists { + nc.bootstrappedBlockFile = blockFile + } + + trackerFile := filepath.Join(rootConfigDir, "genesisdata", "bootstrapped.tracker.sqlite") + trackerFileExists := util.FileExists(trackerFile) + if trackerFileExists { + nc.bootstrappedTrackerFile = trackerFile + } + nc.genesisFile = filepath.Join(rootConfigDir, "genesisdata", config.GenesisJSONFile) nc.genesisData, err = bookkeeping.LoadGenesisFromFile(nc.genesisFile) nodeDirs, err := nc.prepareNodeDirs(nc.config.Nodes, rootConfigDir, rootNodeDir) @@ -134,6 +149,23 @@ func (nc *nodeConfigurator) prepareNodeDirs(configs []remote.NodeConfig, rootCon return } + // Copy the bootstrapped files into current ledger folder + if nc.bootstrappedBlockFile != "" && nc.bootstrappedTrackerFile != "" { + fmt.Fprintf(os.Stdout, "... copying block database file to ledger folder ...\n") + dest := filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.block.sqlite", config.LedgerFilenamePrefix)) + _, err = util.CopyFile(nc.bootstrappedBlockFile, dest) + if err != nil { + return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", "bootstrapped.block.sqlite", filepath.Dir(nc.bootstrappedBlockFile), dest, err) + } + fmt.Fprintf(os.Stdout, "... copying tracker database file to ledger folder ...\n") + dest = filepath.Join(nodeDest, genesisDir, fmt.Sprintf("%s.tracker.sqlite", config.LedgerFilenamePrefix)) + _, err = util.CopyFile(nc.bootstrappedTrackerFile, dest) + if err != nil { + return nil, fmt.Errorf("failed to copy database file %s from %s to %s : %w", "bootstrapped.tracker.sqlite", filepath.Dir(nc.bootstrappedBlockFile), dest, err) + } + + } + nodeDirs = append(nodeDirs, nodeDir{ NodeConfig: node, dataDir: nodeDest, diff --git a/scripts/create_and_deploy_recipe.sh b/scripts/create_and_deploy_recipe.sh index 9a0e01c982..4c55edaf01 100755 --- a/scripts/create_and_deploy_recipe.sh +++ b/scripts/create_and_deploy_recipe.sh @@ -40,6 +40,7 @@ FORCE_OPTION="" SCHEMA_MODIFIER="" BUCKET="" SKIP_BUILD="" +BOOTSTRAP="" while [ "$1" != "" ]; do case "$1" in @@ -73,6 +74,9 @@ while [ "$1" != "" ]; do shift BUCKET="$1" ;; + --skip-dbfiles) + BOOTSTRAP="false" + ;; --skip-build) SKIP_BUILD="true" ;; @@ -108,7 +112,7 @@ if [[ "${SKIP_BUILD}" != "true" || ! -f ${GOPATH}/bin/netgoal ]]; then fi # Generate the nodecfg package directory -${GOPATH}/bin/netgoal build -r "${ROOTDIR}" -n "${NETWORK}" --recipe "${RECIPEFILE}" "${FORCE_OPTION}" -m "${SCHEMA_MODIFIER}" +${GOPATH}/bin/netgoal build -r "${ROOTDIR}" -n "${NETWORK}" --recipe "${RECIPEFILE}" "${FORCE_OPTION}" -m "${SCHEMA_MODIFIER}" -b=${BOOTSTRAP:-true} # Package and upload the config package export S3_RELEASE_BUCKET="${S3_RELEASE_BUCKET}" From c39f91ac01d25aaa7fc13648419d0f5638ea070a Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Sat, 24 Apr 2021 08:53:11 -0400 Subject: [PATCH 190/215] Make sure to exit with non-zero when input go test fails. (#2100) Exit from the logfilter process with non-zero output when "FAIL" was detected on input stream. This allows us to pipe the input of the test without using -o pipefail. --- debug/logfilter/main.go | 7 ++++++- debug/logfilter/main_test.go | 6 +++++- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/debug/logfilter/main.go b/debug/logfilter/main.go index 78ac1222e4..914d9b90bc 100644 --- a/debug/logfilter/main.go +++ b/debug/logfilter/main.go @@ -35,6 +35,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { tests := make(map[string]test) currentTestName := "" + incomingFails := false // packageOutputBuffer is used to buffer messages that are package-oriented. i.e. TestMain() generated messages, // which are called before any test starts to run. packageOutputBuffer := "" @@ -83,6 +84,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { continue } if idx := strings.Index(line, "--- FAIL:"); idx >= 0 { + incomingFails = true var testName string fmt.Sscanf(line[idx:], "--- FAIL: %s", &testName) test, have := tests[testName] @@ -114,6 +116,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { continue } if strings.HasPrefix(line, "FAIL ") { + incomingFails = true if len(packageOutputBuffer) > 0 { fmt.Fprintf(outFile, line+"...\r\n%s\r\n", packageOutputBuffer) } @@ -130,7 +133,9 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { fmt.Fprint(outFile, tests[currentTestName].outputBuffer) } fmt.Fprintf(outFile, "logfilter: the following error received on the input stream : %v\r\n", scannerErr) - return 0 + } + if incomingFails { + return 1 } return 0 } diff --git a/debug/logfilter/main_test.go b/debug/logfilter/main_test.go index be173271cb..2697a88c0f 100644 --- a/debug/logfilter/main_test.go +++ b/debug/logfilter/main_test.go @@ -48,12 +48,16 @@ func TestLogFilterExamples(t *testing.T) { expectedOutFile := strings.Replace(exampleFileName, ".in", ".out.expected", 1) expectedOutBytes, err := ioutil.ReadFile(expectedOutFile) require.NoError(t, err) + expectedErrorCode := 0 + if strings.Contains(string(expectedOutBytes), "FAIL") { + expectedErrorCode = 1 + } inFile, err := os.Open(exampleFileName) require.NoError(t, err) writingBuffer := bytes.NewBuffer(nil) errCode := logFilter(inFile, writingBuffer) - require.Zero(t, errCode) + require.Equal(t, expectedErrorCode, errCode) require.Equal(t, string(expectedOutBytes), writingBuffer.String()) } } From 5bcbd7e33b8f213bdc39f1c2c1a7cca1fe7b9dcc Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 26 Apr 2021 10:31:23 -0400 Subject: [PATCH 191/215] ledger: improve error testing on database migration 4 (#2103) Improve error test conditions during database migration 4. --- ledger/accountdb.go | 5 +++++ ledger/acctupdates.go | 8 ++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/ledger/accountdb.go b/ledger/accountdb.go index a7d3aae4fd..d9e96dd444 100644 --- a/ledger/accountdb.go +++ b/ledger/accountdb.go @@ -655,6 +655,11 @@ func removeEmptyAccountData(tx *sql.Tx, queryAddresses bool) (num int64, address copy(addr[:], addrbuf) addresses = append(addresses, addr) } + + // if the above loop was abrupted by an error, test it now. + if err = rows.Err(); err != nil { + return 0, nil, err + } } result, err := tx.Exec("DELETE from accountbase where length(data) = 1 and data = x'80'") diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index ddbd1ef7f4..75bb77b2a1 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1508,7 +1508,7 @@ func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx return 0, err } - if queryAddresses { + if queryAddresses && len(addresses) > 0 { mc, err := MakeMerkleCommitter(tx, false) if err != nil { // at this point record deleted and DB is pruned for account data @@ -1536,7 +1536,11 @@ func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx } } } - trie.Commit() + + if _, err = trie.Commit(); err != nil { + au.log.Errorf("upgradeDatabaseSchema4: failed to commit changes to merkle trie: %v", err) + } + au.log.Infof("upgradeDatabaseSchema4: deleted %d hashes", totalHashesDeleted) } From 6917c8799aa854ea1bce02f31590c31280834a5a Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 26 Apr 2021 11:40:39 -0400 Subject: [PATCH 192/215] Fix accessing int64 atomic variables on arm32 (#2102) Fix accessing int64 atomic variables on arm32, by aligning variables on 64-bit aligned address. --- crypto/merklearray/worker.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/crypto/merklearray/worker.go b/crypto/merklearray/worker.go index f890ac816e..67f6aa0c50 100644 --- a/crypto/merklearray/worker.go +++ b/crypto/merklearray/worker.go @@ -25,6 +25,11 @@ import ( // workerState describes a group of goroutines processing a sequential list // of maxidx elements starting from 0. type workerState struct { + // maxidx is the total number of elements to process, and nextidx + // is the next element that a worker should process. + maxidx uint64 + nextidx uint64 + // nworkers is the number of workers that can be started. // This field gets decremented once workers are launched, // and represents the number of remaining workers that can @@ -43,11 +48,6 @@ type workerState struct { // wg tracks outstanding workers, to determine when all workers // have finished their processing. wg sync.WaitGroup - - // maxidx is the total number of elements to process, and nextidx - // is the next element that a worker should process. - maxidx uint64 - nextidx uint64 } func newWorkerState(max uint64) *workerState { From 0a84ba97e021302ebbd94700d77465b2c92bf70e Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 26 Apr 2021 12:01:54 -0400 Subject: [PATCH 193/215] ledger: compatibility mode for account index calculation in eval delta (#2095) roundCowState now has compatibility mode accountIdx param is propagated from logic to GetKey/SetKey/DelKey Logic: - store account index for used in GetKey for this app - in SetKey/DelKey use store either the index from GetKey if exist or a current account index - if in compatibility mode, BuildEvalDelta uses indices mentioned above otherwise it calculates account index using `txn.Accounts` --- data/transactions/logic/eval.go | 12 +- data/transactions/logic/evalStateful_test.go | 6 +- ledger/appcow.go | 73 ++++++-- ledger/appcow_test.go | 167 ++++++++++++++--- ledger/applications.go | 24 +-- ledger/applications_test.go | 182 ++++++++++++++++++- ledger/cow.go | 30 ++- ledger/cow_test.go | 2 +- ledger/eval.go | 2 +- 9 files changed, 417 insertions(+), 81 deletions(-) diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index fd69df15cc..96e1fd5d4d 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -134,9 +134,9 @@ type LedgerForLogic interface { CreatorAddress() basics.Address OptedIn(addr basics.Address, appIdx basics.AppIndex) (bool, error) - GetLocal(addr basics.Address, appIdx basics.AppIndex, key string) (value basics.TealValue, exists bool, err error) - SetLocal(addr basics.Address, key string, value basics.TealValue) error - DelLocal(addr basics.Address, key string) error + GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (value basics.TealValue, exists bool, err error) + SetLocal(addr basics.Address, key string, value basics.TealValue, accountIdx uint64) error + DelLocal(addr basics.Address, key string, accountIdx uint64) error GetGlobal(appIdx basics.AppIndex, key string) (value basics.TealValue, exists bool, err error) SetGlobal(key string, value basics.TealValue) error @@ -2076,7 +2076,7 @@ func (cx *evalContext) appReadLocalKey(appIdx uint64, accountIdx uint64, key str if err != nil { return basics.TealValue{}, false, err } - return cx.Ledger.GetLocal(addr, basics.AppIndex(appIdx), key) + return cx.Ledger.GetLocal(addr, basics.AppIndex(appIdx), key, accountIdx) } // appWriteLocalKey writes value to local key/value cow @@ -2086,7 +2086,7 @@ func (cx *evalContext) appWriteLocalKey(accountIdx uint64, key string, tv basics if err != nil { return err } - return cx.Ledger.SetLocal(addr, key, tv) + return cx.Ledger.SetLocal(addr, key, tv, accountIdx) } // appDeleteLocalKey deletes a value from the key/value cow @@ -2096,7 +2096,7 @@ func (cx *evalContext) appDeleteLocalKey(accountIdx uint64, key string) error { if err != nil { return err } - return cx.Ledger.DelLocal(addr, key) + return cx.Ledger.DelLocal(addr, key, accountIdx) } func (cx *evalContext) appReadGlobalKey(foreignAppsIndex uint64, key string) (basics.TealValue, bool, error) { diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index af19c586ef..d5f262b3ac 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -258,7 +258,7 @@ func (l *testLedger) DelGlobal(key string) error { return nil } -func (l *testLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) { +func (l *testLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) { if appIdx == 0 { appIdx = l.appID } @@ -285,7 +285,7 @@ func (l *testLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key s return val, ok, nil } -func (l *testLedger) SetLocal(addr basics.Address, key string, value basics.TealValue) error { +func (l *testLedger) SetLocal(addr basics.Address, key string, value basics.TealValue, accountIdx uint64) error { appIdx := l.appID br, ok := l.balances[addr] @@ -313,7 +313,7 @@ func (l *testLedger) SetLocal(addr basics.Address, key string, value basics.Teal return nil } -func (l *testLedger) DelLocal(addr basics.Address, key string) error { +func (l *testLedger) DelLocal(addr basics.Address, key string, accountIdx uint64) error { appIdx := l.appID br, ok := l.balances[addr] diff --git a/ledger/appcow.go b/ledger/appcow.go index 96262c3d2a..f0904a6ccf 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -92,10 +92,15 @@ type storageDelta struct { kvCow stateDelta counts, maxCounts *basics.StateSchema + + // account index for an address that was first referenced as in app_local_get/app_local_put/app_local_del + // this is for backward compatibility with original implementation of applications + // it is set only once on storageDelta creation and used only for local delta generation + accountIdx uint64 } // ensureStorageDelta finds existing or allocate a new storageDelta for given {addr, aidx, global} -func (cb *roundCowState) ensureStorageDelta(addr basics.Address, aidx basics.AppIndex, global bool, defaultAction storageAction) (*storageDelta, error) { +func (cb *roundCowState) ensureStorageDelta(addr basics.Address, aidx basics.AppIndex, global bool, defaultAction storageAction, accountIdx uint64) (*storageDelta, error) { // If we already have a storageDelta, return it aapp := storagePtr{aidx, global} lsd, ok := cb.sdeltas[addr][aapp] @@ -122,6 +127,17 @@ func (cb *roundCowState) ensureStorageDelta(addr basics.Address, aidx basics.App maxCounts: &maxCounts, } + if cb.compatibilityMode && !global { + lsd.accountIdx = accountIdx + + // if there was previous getKey call for this app and address, use that index instead + if s, ok := cb.compatibilityGetKeyCache[addr]; ok { + if idx, ok := s[aapp]; ok { + lsd.accountIdx = idx + } + } + } + _, ok = cb.sdeltas[addr] if !ok { cb.sdeltas[addr] = make(map[storagePtr]*storageDelta) @@ -217,7 +233,7 @@ func (cb *roundCowState) Allocate(addr basics.Address, aidx basics.AppIndex, glo return err } - lsd, err := cb.ensureStorageDelta(addr, aidx, global, allocAction) + lsd, err := cb.ensureStorageDelta(addr, aidx, global, allocAction, 0) if err != nil { return err } @@ -240,7 +256,7 @@ func (cb *roundCowState) Deallocate(addr basics.Address, aidx basics.AppIndex, g return err } - lsd, err := cb.ensureStorageDelta(addr, aidx, global, deallocAction) + lsd, err := cb.ensureStorageDelta(addr, aidx, global, deallocAction, 0) if err != nil { return err } @@ -253,13 +269,13 @@ func (cb *roundCowState) Deallocate(addr basics.Address, aidx basics.AppIndex, g } // GetKey looks for a key in {addr, aidx, global} storage -func (cb *roundCowState) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) { - return cb.getKey(addr, aidx, global, key) +func (cb *roundCowState) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) { + return cb.getKey(addr, aidx, global, key, accountIdx) } // getKey looks for a key in {addr, aidx, global} storage // This is hierarchical lookup: if the key not in this cow cache, then request parent and all way down to ledger -func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) { +func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) { // Check that account has allocated storage allocated, err := cb.allocated(addr, aidx, global) if err != nil { @@ -287,13 +303,28 @@ func (cb *roundCowState) getKey(addr basics.Address, aidx basics.AppIndex, globa } } + if cb.compatibilityMode && !global { + // if fetching a key first time for this app, + // cache account index, and use it later on lsd allocation + s, ok := cb.compatibilityGetKeyCache[addr] + if !ok { + s = map[storagePtr]uint64{{aidx, global}: accountIdx} + cb.compatibilityGetKeyCache[addr] = s + } else { + if _, ok := s[storagePtr{aidx, global}]; !ok { + s[storagePtr{aidx, global}] = accountIdx + cb.compatibilityGetKeyCache[addr] = s + } + } + } + // At this point, we know we're allocated, and we don't have a delta, // so we should check our parent. - return cb.lookupParent.getKey(addr, aidx, global, key) + return cb.lookupParent.getKey(addr, aidx, global, key, accountIdx) } // SetKey creates a new key-value in {addr, aidx, global} storage -func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue) error { +func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error { // Enforce maximum key length if len(key) > cb.proto.MaxAppKeyLen { return fmt.Errorf("key too long: length was %d, maximum is %d", len(key), cb.proto.MaxAppKeyLen) @@ -316,13 +347,13 @@ func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, globa } // Fetch the old value + presence so we know how to update - oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key) + oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key, accountIdx) if err != nil { return err } // Write the value delta associated with this key/value - lsd, err := cb.ensureStorageDelta(addr, aidx, global, remainAllocAction) + lsd, err := cb.ensureStorageDelta(addr, aidx, global, remainAllocAction, accountIdx) if err != nil { return err } @@ -347,7 +378,7 @@ func (cb *roundCowState) SetKey(addr basics.Address, aidx basics.AppIndex, globa } // DelKey removes a key from {addr, aidx, global} storage -func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) error { +func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error { // Check that account has allocated storage allocated, err := cb.allocated(addr, aidx, global) if err != nil { @@ -359,13 +390,13 @@ func (cb *roundCowState) DelKey(addr basics.Address, aidx basics.AppIndex, globa } // Fetch the old value + presence so we know how to update counts - oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key) + oldValue, oldOk, err := cb.GetKey(addr, aidx, global, key, accountIdx) if err != nil { return err } // Write the value delta associated with deleting this key - lsd, err := cb.ensureStorageDelta(addr, aidx, global, remainAllocAction) + lsd, err := cb.ensureStorageDelta(addr, aidx, global, remainAllocAction, accountIdx) if err != nil { return nil } @@ -457,19 +488,27 @@ func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions. if evalDelta.LocalDeltas == nil { evalDelta.LocalDeltas = make(map[uint64]basics.StateDelta) } + // It is impossible for there to be more than one local delta for // a particular (address, app ID) in sdeltas, because the appAddr // type consists only of (address, appID, global=false). So if // IndexByAddress is deterministic (and it is), there is no need // to check for duplicates here. var addrOffset uint64 - addrOffset, err = txn.IndexByAddress(addr, txn.Sender) - if err != nil { - return basics.EvalDelta{}, err + if cb.compatibilityMode { + addrOffset = sdelta.accountIdx + } else { + addrOffset, err = txn.IndexByAddress(addr, txn.Sender) + if err != nil { + return basics.EvalDelta{}, err + } } d := sdelta.kvCow.serialize() - if !cb.proto.NoEmptyLocalDeltas || len(d) != 0 { + // noEmptyDeltas restricts prodicing empty local deltas in general + // but allows it for a period of time when a buggy version was live + noEmptyDeltas := cb.proto.NoEmptyLocalDeltas || (cb.mods.Hdr.CurrentProtocol == protocol.ConsensusV24) && (cb.mods.Hdr.NextProtocol != protocol.ConsensusV26) + if !noEmptyDeltas || len(d) != 0 { evalDelta.LocalDeltas[addrOffset] = d } } diff --git a/ledger/appcow_test.go b/ledger/appcow_test.go index 162241fb1d..de0cbdc7d3 100644 --- a/ledger/appcow_test.go +++ b/ledger/appcow_test.go @@ -73,7 +73,7 @@ func (ml *emptyLedger) allocated(addr basics.Address, aidx basics.AppIndex, glob return false, nil } -func (ml *emptyLedger) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) { +func (ml *emptyLedger) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) { return basics.TealValue{}, false, nil } @@ -269,7 +269,7 @@ func TestCowStorage(t *testing.T) { actuallyAllocated := st.allocated(aapp) rkey := allKeys[rand.Intn(len(allKeys))] rval := allValues[rand.Intn(len(allValues))] - err := cow.SetKey(addr, sptr.aidx, sptr.global, rkey, rval) + err := cow.SetKey(addr, sptr.aidx, sptr.global, rkey, rval, 0) if actuallyAllocated { require.NoError(t, err) err = st.set(aapp, rkey, rval) @@ -284,7 +284,7 @@ func TestCowStorage(t *testing.T) { if rand.Float32() < 0.25 { actuallyAllocated := st.allocated(aapp) rkey := allKeys[rand.Intn(len(allKeys))] - err := cow.DelKey(addr, sptr.aidx, sptr.global, rkey) + err := cow.DelKey(addr, sptr.aidx, sptr.global, rkey, 0) if actuallyAllocated { require.NoError(t, err) err = st.del(aapp, rkey) @@ -326,7 +326,7 @@ func TestCowStorage(t *testing.T) { tval, tok, err := st.get(aapp, key) require.NoError(t, err) - cval, cok, err := cow.GetKey(addr, sptr.aidx, sptr.global, key) + cval, cok, err := cow.GetKey(addr, sptr.aidx, sptr.global, key, 0) require.NoError(t, err) require.Equal(t, tok, cok) require.Equal(t, tval, cval) @@ -407,8 +407,11 @@ func TestCowBuildDelta(t *testing.T) { a.Contains(err.Error(), "could not find offset") a.Empty(ed) - // check pre v27 behavior + // check v26 behavior for empty deltas txn.Sender = sender + cow.mods.Hdr = &bookkeeping.BlockHeader{ + UpgradeState: bookkeeping.UpgradeState{CurrentProtocol: protocol.ConsensusV25}, + } ed, err = cow.BuildEvalDelta(aidx, &txn) a.NoError(err) a.Equal( @@ -419,7 +422,8 @@ func TestCowBuildDelta(t *testing.T) { ed, ) - // check v27 behavior + // check v27 behavior for empty deltas + cow.mods.Hdr = nil cow.proto = config.Consensus[protocol.ConsensusCurrentVersion] ed, err = cow.BuildEvalDelta(aidx, &txn) a.NoError(err) @@ -531,6 +535,65 @@ func TestCowBuildDelta(t *testing.T) { }, ed, ) + + // check pre v26 behavior for account index ordering + txn.Sender = sender + txn.Accounts = append(txn.Accounts, sender) + cow.compatibilityMode = true + cow.compatibilityGetKeyCache = make(map[basics.Address]map[storagePtr]uint64) + cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{ + action: remainAllocAction, + kvCow: stateDelta{ + "key1": valueDelta{ + old: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + new: basics.TealValue{Type: basics.TealUintType, Uint: 2}, + oldExists: true, + newExists: true, + }, + }, + accountIdx: 1, + } + ed, err = cow.BuildEvalDelta(aidx, &txn) + a.NoError(err) + a.Equal( + basics.EvalDelta{ + GlobalDelta: basics.StateDelta(nil), + LocalDeltas: map[uint64]basics.StateDelta{ + 1: { + "key1": basics.ValueDelta{Action: basics.SetUintAction, Uint: 2}, + }, + }, + }, + ed, + ) + + // check v27 behavior for account ordering + cow.compatibilityMode = false + cow.sdeltas[sender][storagePtr{aidx, false}] = &storageDelta{ + action: remainAllocAction, + kvCow: stateDelta{ + "key1": valueDelta{ + old: basics.TealValue{Type: basics.TealUintType, Uint: 1}, + new: basics.TealValue{Type: basics.TealUintType, Uint: 2}, + oldExists: true, + newExists: true, + }, + }, + accountIdx: 1, + } + ed, err = cow.BuildEvalDelta(aidx, &txn) + a.NoError(err) + a.Equal( + basics.EvalDelta{ + GlobalDelta: basics.StateDelta(nil), + LocalDeltas: map[uint64]basics.StateDelta{ + 0: { + "key1": basics.ValueDelta{Action: basics.SetUintAction, Uint: 2}, + }, + }, + }, + ed, + ) } func TestCowDeltaSerialize(t *testing.T) { @@ -954,7 +1017,7 @@ func TestCowGetKey(t *testing.T) { c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}}, } - _, ok, err := c.GetKey(addr, aidx, true, "gkey") + _, ok, err := c.GetKey(addr, aidx, true, "gkey", 0) a.Error(err) a.False(ok) a.Contains(err.Error(), "cannot fetch key") @@ -962,10 +1025,10 @@ func TestCowGetKey(t *testing.T) { c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: {storagePtr{aidx, true}: &storageDelta{action: allocAction}}, } - _, ok, err = c.GetKey(addr, aidx, true, "gkey") + _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0) a.NoError(err) a.False(ok) - _, ok, err = c.GetKey(addr, aidx, true, "gkey") + _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0) a.NoError(err) a.False(ok) @@ -978,7 +1041,7 @@ func TestCowGetKey(t *testing.T) { }, }, } - _, ok, err = c.GetKey(addr, aidx, true, "gkey") + _, ok, err = c.GetKey(addr, aidx, true, "gkey", 0) a.NoError(err) a.False(ok) @@ -990,7 +1053,7 @@ func TestCowGetKey(t *testing.T) { }, }, } - val, ok, err := c.GetKey(addr, aidx, true, "gkey") + val, ok, err := c.GetKey(addr, aidx, true, "gkey", 0) a.NoError(err) a.True(ok) a.Equal(tv, val) @@ -1005,14 +1068,14 @@ func TestCowGetKey(t *testing.T) { }, } - val, ok, err = c.GetKey(addr, aidx, false, "lkey") + val, ok, err = c.GetKey(addr, aidx, false, "lkey", 0) a.NoError(err) a.True(ok) a.Equal(tv, val) // ensure other requests go down to roundCowParent - a.Panics(func() { c.GetKey(getRandomAddress(a), aidx, false, "lkey") }) - a.Panics(func() { c.GetKey(addr, aidx+1, false, "lkey") }) + a.Panics(func() { c.GetKey(getRandomAddress(a), aidx, false, "lkey", 0) }) + a.Panics(func() { c.GetKey(addr, aidx+1, false, "lkey", 0) }) } func TestCowSetKey(t *testing.T) { @@ -1027,14 +1090,14 @@ func TestCowSetKey(t *testing.T) { key := strings.Repeat("key", 100) val := "val" tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val} - err := c.SetKey(addr, aidx, true, key, tv) + err := c.SetKey(addr, aidx, true, key, tv, 0) a.Error(err) a.Contains(err.Error(), "key too long") key = "key" val = strings.Repeat("val", 100) tv = basics.TealValue{Type: basics.TealBytesType, Bytes: val} - err = c.SetKey(addr, aidx, true, key, tv) + err = c.SetKey(addr, aidx, true, key, tv, 0) a.Error(err) a.Contains(err.Error(), "value too long") @@ -1043,7 +1106,7 @@ func TestCowSetKey(t *testing.T) { c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}}, } - err = c.SetKey(addr, aidx, true, key, tv) + err = c.SetKey(addr, aidx, true, key, tv, 0) a.Error(err) a.Contains(err.Error(), "cannot set key") @@ -1059,13 +1122,13 @@ func TestCowSetKey(t *testing.T) { }, }, } - err = c.SetKey(addr, aidx, true, key, tv) + err = c.SetKey(addr, aidx, true, key, tv, 0) a.Error(err) a.Contains(err.Error(), "exceeds schema bytes") counts = basics.StateSchema{NumUint: 1} maxCounts = basics.StateSchema{NumByteSlice: 1} - err = c.SetKey(addr, aidx, true, key, tv) + err = c.SetKey(addr, aidx, true, key, tv, 0) a.Error(err) a.Contains(err.Error(), "exceeds schema integer") @@ -1080,12 +1143,12 @@ func TestCowSetKey(t *testing.T) { }, }, } - err = c.SetKey(addr, aidx, true, key, tv) + err = c.SetKey(addr, aidx, true, key, tv, 0) a.NoError(err) counts = basics.StateSchema{NumUint: 1} maxCounts = basics.StateSchema{NumByteSlice: 1, NumUint: 1} - err = c.SetKey(addr, aidx, true, key, tv) + err = c.SetKey(addr, aidx, true, key, tv, 0) a.NoError(err) // check local @@ -1100,12 +1163,58 @@ func TestCowSetKey(t *testing.T) { }, }, } - err = c.SetKey(addr1, aidx, false, key, tv) + err = c.SetKey(addr1, aidx, false, key, tv, 0) a.NoError(err) // ensure other requests go down to roundCowParent - a.Panics(func() { c.SetKey(getRandomAddress(a), aidx, false, key, tv) }) - a.Panics(func() { c.SetKey(addr, aidx+1, false, key, tv) }) + a.Panics(func() { c.SetKey(getRandomAddress(a), aidx, false, key, tv, 0) }) + a.Panics(func() { c.SetKey(addr, aidx+1, false, key, tv, 0) }) +} + +func TestCowAccountIdx(t *testing.T) { + a := require.New(t) + + l := emptyLedger{} + addr := getRandomAddress(a) + aidx := basics.AppIndex(1) + c := getCow([]modsData{ + {addr, basics.CreatableIndex(aidx), basics.AppCreatable}, + }) + c.lookupParent = &l + c.compatibilityMode = true + + key := "key" + val := "val" + + c.sdeltas = make(map[basics.Address]map[storagePtr]*storageDelta) + tv := basics.TealValue{Type: basics.TealBytesType, Bytes: val} + sd, err := c.ensureStorageDelta(addr, aidx, true, remainAllocAction, 123) + a.NoError(err) + a.Equal(uint64(0), sd.accountIdx) + + c.sdeltas = make(map[basics.Address]map[storagePtr]*storageDelta) + sd, err = c.ensureStorageDelta(addr, aidx, false, remainAllocAction, 123) + a.NoError(err) + a.Equal(uint64(123), sd.accountIdx) + + counts := basics.StateSchema{} + maxCounts := basics.StateSchema{} + for _, global := range []bool{false, true} { + c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ + addr: { + storagePtr{aidx, global}: &storageDelta{ + action: allocAction, + kvCow: stateDelta{key: valueDelta{new: tv, newExists: true}}, + counts: &counts, + maxCounts: &maxCounts, + accountIdx: 123, + }, + }, + } + sd, err = c.ensureStorageDelta(addr, aidx, global, remainAllocAction, 456) + a.NoError(err) + a.Equal(uint64(123), sd.accountIdx) + } } func TestCowDelKey(t *testing.T) { @@ -1121,7 +1230,7 @@ func TestCowDelKey(t *testing.T) { c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ addr: {storagePtr{aidx, true}: &storageDelta{action: deallocAction}}, } - err := c.DelKey(addr, aidx, true, key) + err := c.DelKey(addr, aidx, true, key, 0) a.Error(err) a.Contains(err.Error(), "cannot del key") @@ -1137,7 +1246,7 @@ func TestCowDelKey(t *testing.T) { }, }, } - err = c.DelKey(addr, aidx, true, key) + err = c.DelKey(addr, aidx, true, key, 0) a.NoError(err) c.sdeltas = map[basics.Address]map[storagePtr]*storageDelta{ @@ -1150,10 +1259,10 @@ func TestCowDelKey(t *testing.T) { }, }, } - err = c.DelKey(addr, aidx, false, key) + err = c.DelKey(addr, aidx, false, key, 0) a.NoError(err) // ensure other requests go down to roundCowParent - a.Panics(func() { c.DelKey(getRandomAddress(a), aidx, false, key) }) - a.Panics(func() { c.DelKey(addr, aidx+1, false, key) }) + a.Panics(func() { c.DelKey(getRandomAddress(a), aidx, false, key, 0) }) + a.Panics(func() { c.DelKey(addr, aidx+1, false, key, 0) }) } diff --git a/ledger/applications.go b/ledger/applications.go index dae09990ff..54d87d6343 100644 --- a/ledger/applications.go +++ b/ledger/applications.go @@ -33,11 +33,11 @@ type logicLedger struct { type cowForLogicLedger interface { Get(addr basics.Address, withPendingRewards bool) (basics.AccountData, error) GetCreator(cidx basics.CreatableIndex, ctype basics.CreatableType) (basics.Address, bool, error) - GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) + GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) BuildEvalDelta(aidx basics.AppIndex, txn *transactions.Transaction) (basics.EvalDelta, error) - SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue) error - DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) error + SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error + DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error round() basics.Round prevTimestamp() int64 @@ -152,19 +152,19 @@ func (al *logicLedger) OptedIn(addr basics.Address, appIdx basics.AppIndex) (boo return al.cow.allocated(addr, appIdx, false) } -func (al *logicLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string) (basics.TealValue, bool, error) { +func (al *logicLedger) GetLocal(addr basics.Address, appIdx basics.AppIndex, key string, accountIdx uint64) (basics.TealValue, bool, error) { if appIdx == basics.AppIndex(0) { appIdx = al.aidx } - return al.cow.GetKey(addr, appIdx, false, key) + return al.cow.GetKey(addr, appIdx, false, key, accountIdx) } -func (al *logicLedger) SetLocal(addr basics.Address, key string, value basics.TealValue) error { - return al.cow.SetKey(addr, al.aidx, false, key, value) +func (al *logicLedger) SetLocal(addr basics.Address, key string, value basics.TealValue, accountIdx uint64) error { + return al.cow.SetKey(addr, al.aidx, false, key, value, accountIdx) } -func (al *logicLedger) DelLocal(addr basics.Address, key string) error { - return al.cow.DelKey(addr, al.aidx, false, key) +func (al *logicLedger) DelLocal(addr basics.Address, key string, accountIdx uint64) error { + return al.cow.DelKey(addr, al.aidx, false, key, accountIdx) } func (al *logicLedger) fetchAppCreator(appIdx basics.AppIndex) (basics.Address, error) { @@ -188,15 +188,15 @@ func (al *logicLedger) GetGlobal(appIdx basics.AppIndex, key string) (basics.Tea if err != nil { return basics.TealValue{}, false, err } - return al.cow.GetKey(addr, appIdx, true, key) + return al.cow.GetKey(addr, appIdx, true, key, 0) } func (al *logicLedger) SetGlobal(key string, value basics.TealValue) error { - return al.cow.SetKey(al.creator, al.aidx, true, key, value) + return al.cow.SetKey(al.creator, al.aidx, true, key, value, 0) } func (al *logicLedger) DelGlobal(key string) error { - return al.cow.DelKey(al.creator, al.aidx, true, key) + return al.cow.DelKey(al.creator, al.aidx, true, key, 0) } func (al *logicLedger) GetDelta(txn *transactions.Transaction) (evalDelta basics.EvalDelta, err error) { diff --git a/ledger/applications_test.go b/ledger/applications_test.go index 256b17fc31..5465cdcccc 100644 --- a/ledger/applications_test.go +++ b/ledger/applications_test.go @@ -74,7 +74,7 @@ func (c *mockCowForLogicLedger) GetCreator(cidx basics.CreatableIndex, ctype bas return addr, found, nil } -func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) { +func (c *mockCowForLogicLedger) GetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) { kv, ok := c.stores[storeLocator{addr, aidx, global}] if !ok { return basics.TealValue{}, false, fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global) @@ -87,7 +87,7 @@ func (c *mockCowForLogicLedger) BuildEvalDelta(aidx basics.AppIndex, txn *transa return basics.EvalDelta{}, nil } -func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue) error { +func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, value basics.TealValue, accountIdx uint64) error { kv, ok := c.stores[storeLocator{addr, aidx, global}] if !ok { return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global) @@ -97,7 +97,7 @@ func (c *mockCowForLogicLedger) SetKey(addr basics.Address, aidx basics.AppIndex return nil } -func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) error { +func (c *mockCowForLogicLedger) DelKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) error { kv, ok := c.stores[storeLocator{addr, aidx, global}] if !ok { return fmt.Errorf("no store for (%s %d %v) in mock cow", addr.String(), aidx, global) @@ -277,7 +277,7 @@ func TestLogicLedgerGetKey(t *testing.T) { // check local c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}} - val, ok, err = l.GetLocal(addr, aidx, "lkey") + val, ok, err = l.GetLocal(addr, aidx, "lkey", 0) a.NoError(err) a.True(ok) a.Equal(tv, val) @@ -307,7 +307,7 @@ func TestLogicLedgerSetKey(t *testing.T) { // check local c.stores = map[storeLocator]basics.TealKeyValue{{addr, aidx, false}: {"lkey": tv}} - err = l.SetLocal(addr, "lkey", tv2) + err = l.SetLocal(addr, "lkey", tv2, 0) a.NoError(err) } @@ -334,7 +334,7 @@ func TestLogicLedgerDelKey(t *testing.T) { addr1 := getRandomAddress(a) c.stores = map[storeLocator]basics.TealKeyValue{{addr1, aidx, false}: {"lkey": tv}} - err = l.DelLocal(addr1, "lkey") + err = l.DelLocal(addr1, "lkey", 0) a.NoError(err) } @@ -632,7 +632,7 @@ return` a.Contains(genesisInitState.Accounts, userLocal) cfg := config.GetDefaultLocal() - l, err := OpenLedger(logging.Base(), "TestAppAccountDelta", true, genesisInitState, cfg) + l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg) a.NoError(err) defer l.Close() @@ -877,7 +877,7 @@ return` a.Contains(genesisInitState.Accounts, userLocal) cfg := config.GetDefaultLocal() - l, err := OpenLedger(logging.Base(), "TestAppEmptyAccounts", true, genesisInitState, cfg) + l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg) a.NoError(err) defer l.Close() @@ -1024,7 +1024,7 @@ return` a.Contains(genesisInitState.Accounts, userLocal) cfg := config.GetDefaultLocal() - l, err := OpenLedger(logging.Base(), "TestAppEmptyAccounts", true, genesisInitState, cfg) + l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg) a.NoError(err) defer l.Close() @@ -1123,3 +1123,167 @@ return` a.Equal(basics.AccountData{}, pad.accountData) a.Zero(pad.rowid) } + +func TestAppAccountDeltaIndicesCompatibility1(t *testing.T) { + source := `#pragma version 2 +txn ApplicationID +int 0 +== +bnz success +int 0 +byte "lk0" +byte "local0" +app_local_put +int 1 +byte "lk1" +byte "local1" +app_local_put +success: +int 1 +` + // put into sender account as idx 0, expect 0 + testAppAccountDeltaIndicesCompatibility(t, source, 0) +} + +func TestAppAccountDeltaIndicesCompatibility2(t *testing.T) { + source := `#pragma version 2 +txn ApplicationID +int 0 +== +bnz success +int 1 +byte "lk1" +byte "local1" +app_local_put +int 0 +byte "lk0" +byte "local0" +app_local_put +success: +int 1 +` + // put into sender account as idx 1, expect 1 + testAppAccountDeltaIndicesCompatibility(t, source, 1) +} + +func TestAppAccountDeltaIndicesCompatibility3(t *testing.T) { + source := `#pragma version 2 +txn ApplicationID +int 0 +== +bnz success +int 1 +byte "lk" +app_local_get +pop +int 0 +byte "lk0" +byte "local0" +app_local_put +int 1 +byte "lk1" +byte "local1" +app_local_put +success: +int 1 +` + // get sender account as idx 1 but put into sender account as idx 0, expect 1 + testAppAccountDeltaIndicesCompatibility(t, source, 1) +} + +func testAppAccountDeltaIndicesCompatibility(t *testing.T, source string, accountIdx uint64) { + a := require.New(t) + ops, err := logic.AssembleString(source) + a.NoError(err) + a.Greater(len(ops.Program), 1) + program := ops.Program + + // explicitly trigger compatibility mode + proto := config.Consensus[protocol.ConsensusV24] + genesisInitState, initKeys := testGenerateInitState(t, protocol.ConsensusV24, 100) + + creator, err := basics.UnmarshalChecksumAddress("3LN5DBFC2UTPD265LQDP3LMTLGZCQ5M3JV7XTVTGRH5CKSVNQVDFPN6FG4") + a.NoError(err) + userLocal, err := basics.UnmarshalChecksumAddress("UL5C6SRVLOROSB5FGAE6TY34VXPXVR7GNIELUB3DD5KTA4VT6JGOZ6WFAY") + a.NoError(err) + + a.Contains(genesisInitState.Accounts, creator) + a.Contains(genesisInitState.Accounts, userLocal) + + cfg := config.GetDefaultLocal() + l, err := OpenLedger(logging.Base(), t.Name(), true, genesisInitState, cfg) + a.NoError(err) + defer l.Close() + + genesisID := t.Name() + txHeader := transactions.Header{ + Sender: creator, + Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2}, + FirstValid: l.Latest() + 1, + LastValid: l.Latest() + 10, + GenesisID: genesisID, + GenesisHash: genesisInitState.GenesisHash, + } + + // create application + approvalProgram := program + clearStateProgram := []byte("\x02") // empty + appCreateFields := transactions.ApplicationCallTxnFields{ + ApprovalProgram: approvalProgram, + ClearStateProgram: clearStateProgram, + GlobalStateSchema: basics.StateSchema{NumByteSlice: 4}, + LocalStateSchema: basics.StateSchema{NumByteSlice: 2}, + } + appCreate := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCreateFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCreate, transactions.ApplyData{}) + a.NoError(err) + + appIdx := basics.AppIndex(1) // first tnx => idx = 1 + + // opt-in + txHeader.Sender = userLocal + appCallFields := transactions.ApplicationCallTxnFields{ + OnCompletion: transactions.OptInOC, + ApplicationID: appIdx, + Accounts: []basics.Address{userLocal}, + } + appCall := transactions.Transaction{ + Type: protocol.ApplicationCallTx, + Header: txHeader, + ApplicationCallTxnFields: appCallFields, + } + err = l.appendUnvalidatedTx(t, genesisInitState.Accounts, initKeys, appCall, transactions.ApplyData{ + EvalDelta: basics.EvalDelta{ + LocalDeltas: map[uint64]basics.StateDelta{ + accountIdx: { + "lk0": basics.ValueDelta{ + Action: basics.SetBytesAction, + Bytes: "local0", + }, + "lk1": basics.ValueDelta{ + Action: basics.SetBytesAction, + Bytes: "local1"}, + }, + }, + }, + }) + a.NoError(err) + + // save data into DB and write into local state + l.accts.accountsWriting.Add(1) + l.accts.commitRound(2, 0, 0) + l.reloadLedger() + + // check first write + blk, err := l.Block(2) + a.NoError(err) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas, accountIdx) + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[accountIdx], "lk0") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[accountIdx]["lk0"].Bytes, "local0") + a.Contains(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[accountIdx], "lk1") + a.Equal(blk.Payset[0].ApplyData.EvalDelta.LocalDeltas[accountIdx]["lk1"].Bytes, "local1") +} diff --git a/ledger/cow.go b/ledger/cow.go index 7f5edcfc2d..68d7943412 100644 --- a/ledger/cow.go +++ b/ledger/cow.go @@ -24,6 +24,7 @@ import ( "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger/ledgercore" + "github.com/algorand/go-algorand/protocol" ) // ___________________ @@ -47,7 +48,7 @@ type roundCowParent interface { // and is provided to optimize state schema lookups getStorageLimits(addr basics.Address, aidx basics.AppIndex, global bool) (basics.StateSchema, error) allocated(addr basics.Address, aidx basics.AppIndex, global bool) (bool, error) - getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) + getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) } type roundCowState struct { @@ -61,16 +62,33 @@ type roundCowState struct { // 2. Stateful TEAL evaluation (see SetKey/DelKey) // must be incorporated into mods.accts before passing deltas forward sdeltas map[basics.Address]map[storagePtr]*storageDelta + + // either or not maintain compatibility with original app refactoring behavior + // this is needed for generating old eval delta in new code + compatibilityMode bool + // cache mainaining accountIdx used in getKey for local keys access + compatibilityGetKeyCache map[basics.Address]map[storagePtr]uint64 } func makeRoundCowState(b roundCowParent, hdr bookkeeping.BlockHeader, prevTimestamp int64, hint int) *roundCowState { - return &roundCowState{ + cb := roundCowState{ lookupParent: b, commitParent: nil, proto: config.Consensus[hdr.CurrentProtocol], mods: ledgercore.MakeStateDelta(&hdr, prevTimestamp, hint, 0), sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta), } + + // compatibilityMode retains producing application' eval deltas under the following rule: + // local delta has account index as it specified in TEAL either in set/del key or prior get key calls. + // The predicate is that complex in order to cover all the block seen on testnet and mainnet. + compatibilityMode := (hdr.CurrentProtocol == protocol.ConsensusV24) && + (hdr.NextProtocol != protocol.ConsensusV26 || (hdr.UpgradePropose == "" && hdr.UpgradeApprove == false && hdr.Round < hdr.UpgradeState.NextProtocolVoteBefore)) + if compatibilityMode { + cb.compatibilityMode = true + cb.compatibilityGetKeyCache = make(map[basics.Address]map[storagePtr]uint64) + } + return &cb } func (cb *roundCowState) deltas() ledgercore.StateDelta { @@ -196,13 +214,19 @@ func (cb *roundCowState) setCompactCertNext(rnd basics.Round) { } func (cb *roundCowState) child(hint int) *roundCowState { - return &roundCowState{ + ch := roundCowState{ lookupParent: cb, commitParent: cb, proto: cb.proto, mods: ledgercore.MakeStateDelta(cb.mods.Hdr, cb.mods.PrevTimestamp, hint, cb.mods.CompactCertNext), sdeltas: make(map[basics.Address]map[storagePtr]*storageDelta), } + + if cb.compatibilityMode { + ch.compatibilityMode = cb.compatibilityMode + ch.compatibilityGetKeyCache = make(map[basics.Address]map[storagePtr]uint64) + } + return &ch } func (cb *roundCowState) commitToParent() { diff --git a/ledger/cow_test.go b/ledger/cow_test.go index 307f4f3583..a4df0f1c1f 100644 --- a/ledger/cow_test.go +++ b/ledger/cow_test.go @@ -63,7 +63,7 @@ func (ml *mockLedger) allocated(addr basics.Address, aidx basics.AppIndex, globa return true, nil } -func (ml *mockLedger) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) { +func (ml *mockLedger) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) { return basics.TealValue{}, false, nil } diff --git a/ledger/eval.go b/ledger/eval.go index 398777782c..48e73ac08a 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -132,7 +132,7 @@ func (x *roundCowBase) allocated(addr basics.Address, aidx basics.AppIndex, glob // getKey gets the value for a particular key in some storage // associated with an application globally or locally -func (x *roundCowBase) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string) (basics.TealValue, bool, error) { +func (x *roundCowBase) getKey(addr basics.Address, aidx basics.AppIndex, global bool, key string, accountIdx uint64) (basics.TealValue, bool, error) { ad, _, err := x.l.LookupWithoutRewards(x.rnd, addr) if err != nil { return basics.TealValue{}, false, err From b188af3fa5ff50f30f33a8e1cc572486987224b8 Mon Sep 17 00:00:00 2001 From: Pavel Zbitskiy <65323360+algorandskiy@users.noreply.github.com> Date: Mon, 26 Apr 2021 17:16:55 -0400 Subject: [PATCH 194/215] catchpoint: truncate output file (#2106) Dump utility have not truncated output dump file so it might have some unexpected data. --- cmd/catchpointdump/database.go | 2 +- cmd/catchpointdump/file.go | 2 +- cmd/catchpointdump/net.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/cmd/catchpointdump/database.go b/cmd/catchpointdump/database.go index 1330af7a27..334063fe2c 100644 --- a/cmd/catchpointdump/database.go +++ b/cmd/catchpointdump/database.go @@ -52,7 +52,7 @@ var databaseCmd = &cobra.Command{ outFile := os.Stdout var err error if outFileName != "" { - outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_CREATE, 0755) + outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755) if err != nil { reportErrorf("Unable to create file '%s' : %v", outFileName, err) } diff --git a/cmd/catchpointdump/file.go b/cmd/catchpointdump/file.go index 9201d504cd..15eb857ce1 100644 --- a/cmd/catchpointdump/file.go +++ b/cmd/catchpointdump/file.go @@ -91,7 +91,7 @@ var fileCmd = &cobra.Command{ outFile := os.Stdout if outFileName != "" { - outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_CREATE, 0755) + outFile, err = os.OpenFile(outFileName, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755) if err != nil { reportErrorf("Unable to create file '%s' : %v", outFileName, err) } diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go index 66fad3cfc2..354d2c360a 100644 --- a/cmd/catchpointdump/net.go +++ b/cmd/catchpointdump/net.go @@ -300,7 +300,7 @@ func makeFileDump(addr string, catchpointFileBytes []byte) error { } dirName := "./" + strings.Split(networkName, ".")[0] + "/" + strings.Split(addr, ".")[0] - outFile, err := os.OpenFile(dirName+"/"+strconv.FormatUint(uint64(round), 10)+".dump", os.O_RDWR|os.O_CREATE, 0755) + outFile, err := os.OpenFile(dirName+"/"+strconv.FormatUint(uint64(round), 10)+".dump", os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0755) if err != nil { return err } From ce994290fe6e9a1a4e608fda2cc013bc2cda3d34 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 26 Apr 2021 17:40:40 -0400 Subject: [PATCH 195/215] fix typos in comments. (#2107) fix typos in comments --- ledger/acctupdates.go | 2 +- ledger/appcow.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 75bb77b2a1..491d94c78e 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -1512,7 +1512,7 @@ func (au *accountUpdates) upgradeDatabaseSchema4(ctx context.Context, tx *sql.Tx mc, err := MakeMerkleCommitter(tx, false) if err != nil { // at this point record deleted and DB is pruned for account data - // if hash deletion fails just log it and do not about startup + // if hash deletion fails just log it and do not abort startup au.log.Errorf("upgradeDatabaseSchema4: failed to create merkle committer: %v", err) goto done } diff --git a/ledger/appcow.go b/ledger/appcow.go index f0904a6ccf..53cb3a91fc 100644 --- a/ledger/appcow.go +++ b/ledger/appcow.go @@ -505,7 +505,7 @@ func (cb *roundCowState) BuildEvalDelta(aidx basics.AppIndex, txn *transactions. } d := sdelta.kvCow.serialize() - // noEmptyDeltas restricts prodicing empty local deltas in general + // noEmptyDeltas restricts producing empty local deltas in general // but allows it for a period of time when a buggy version was live noEmptyDeltas := cb.proto.NoEmptyLocalDeltas || (cb.mods.Hdr.CurrentProtocol == protocol.ConsensusV24) && (cb.mods.Hdr.NextProtocol != protocol.ConsensusV26) if !noEmptyDeltas || len(d) != 0 { From cdc1ab11e2be744f897fcb718cbcfcac58a574a0 Mon Sep 17 00:00:00 2001 From: shiqizng <80276844+shiqizng@users.noreply.github.com> Date: Mon, 26 Apr 2021 17:46:00 -0400 Subject: [PATCH 196/215] Fix insufficient account with preloaded db files (#2098) Not all accounts from genesis.json were added to the db file when created the initBlock. This caused pingpong to return the following error, ``` ensure accounts failed no accounts found with sufficient stake (> 100000) Error preparing accounts for transfers: no accounts found with sufficient stake (> 100000) ``` This PR fixes this issue. --- netdeploy/remote/deployedNetwork.go | 57 ++++++++++++++--------------- 1 file changed, 28 insertions(+), 29 deletions(-) diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go index 48ff12c157..331a101c0b 100644 --- a/netdeploy/remote/deployedNetwork.go +++ b/netdeploy/remote/deployedNetwork.go @@ -357,34 +357,35 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g return err } - srcWallet := getGenesisAlloc(fileCfgs.SourceWalletName, genesis.Allocation) - if srcWallet.Address == "" { - return fmt.Errorf("error finding source wallet address") - } + var src basics.Address + var addr basics.Address + var poolAddr basics.Address + var sinkAddr basics.Address - rewardsPool := getGenesisAlloc("RewardsPool", genesis.Allocation) - if rewardsPool.Address == "" { - return fmt.Errorf("error finding source rewards ppol address") - } + srcWalletName := strings.ToLower(fileCfgs.SourceWalletName) + + for _, alloc := range genesis.Allocation { + comment := strings.ToLower(alloc.Comment) + addr, err = basics.UnmarshalChecksumAddress(alloc.Address) + if err != nil { + return fmt.Errorf("failed to unmarshal '%s' address '%v' %w", alloc.Comment, alloc.Address, err) + } + switch comment { + case srcWalletName: + src = addr + case "feesink": + poolAddr = addr + case "rewardspool": + sinkAddr = addr + default: + } + + accounts[addr] = alloc.State - feeSink := getGenesisAlloc("FeeSink", genesis.Allocation) - if feeSink.Address == "" { - return fmt.Errorf("error finding fee sink address") - } - src, err := basics.UnmarshalChecksumAddress(srcWallet.Address) - if err != nil { - return fmt.Errorf("failed to unmarshal src address : %w", err) - } - poolAddr, err := basics.UnmarshalChecksumAddress(rewardsPool.Address) - if err != nil { - return fmt.Errorf("failed to unmarshal rewards pool address %w", err) - } - sinkAddr, err := basics.UnmarshalChecksumAddress(feeSink.Address) - if err != nil { - return fmt.Errorf("failed to unmarshal fee sink address %w", err) } //initial state + bootstrappedNet := netState{ nAssets: fileCfgs.GeneratedAssetsCount, nApplications: fileCfgs.GeneratedApplicationCount, @@ -393,6 +394,8 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g round: basics.Round(0), genesisID: genesis.ID(), genesisHash: crypto.HashObj(genesis), + poolAddr: poolAddr, + sinkAddr: sinkAddr, } var params config.ConsensusParams @@ -409,16 +412,12 @@ func (cfg DeployedNetwork) GenerateDatabaseFiles(fileCfgs BootstrappedNetwork, g } else { bootstrappedNet.nAccounts = nAccounts } - accounts[poolAddr] = basics.MakeAccountData(basics.NotParticipating, rewardsPool.State.MicroAlgos) - accounts[sinkAddr] = basics.MakeAccountData(basics.NotParticipating, feeSink.State.MicroAlgos) + //fund src account with enough funding bootstrappedNet.fundPerAccount = basics.MicroAlgos{Raw: uint64(bootstrappedNet.nAssets) * params.MinBalance * 2} - totalFunds := srcWallet.State.MicroAlgos.Raw + bootstrappedNet.fundPerAccount.Raw*bootstrappedNet.nAccounts + bootstrappedNet.roundTxnCnt*fileCfgs.NumRounds + totalFunds := accounts[src].MicroAlgos.Raw + bootstrappedNet.fundPerAccount.Raw*bootstrappedNet.nAccounts + bootstrappedNet.roundTxnCnt*fileCfgs.NumRounds accounts[src] = basics.MakeAccountData(basics.Online, basics.MicroAlgos{Raw: totalFunds}) - bootstrappedNet.poolAddr = poolAddr - bootstrappedNet.sinkAddr = sinkAddr - //init block initState, err := generateInitState(accounts, &bootstrappedNet) if err != nil { From b74014d5d83ea3fc80f03914c666877dd6c9f637 Mon Sep 17 00:00:00 2001 From: shiqizng <80276844+shiqizng@users.noreply.github.com> Date: Thu, 29 Apr 2021 19:34:55 -0400 Subject: [PATCH 197/215] Add performance metrics for account updates (#2115) This PR enables metrics logging configurations for account updates' commitRound(...). --- config/config.go | 7 + config/local_defaults.go | 2 + installer/config.json.example | 2 + ledger/acctupdates.go | 74 ++++++ logging/telemetryspec/metric.go | 22 ++ test/testdata/configs/config-v17.json | 2 + .../recipes/bootstrappedScenario/net.json | 216 +++++++++--------- .../recipes/bootstrappedScenario/node.json | 4 +- .../recipes/bootstrappedScenario/relay.json | 2 +- .../recipes/scenario1/node.json | 4 +- .../recipes/scenario1/relay.json | 2 +- .../recipes/scenario2/node.json | 2 +- .../recipes/scenario2/relay.json | 2 +- .../recipes/scenario3/node.json | 4 +- .../recipes/scenario3/relay.json | 2 +- 15 files changed, 228 insertions(+), 119 deletions(-) diff --git a/config/config.go b/config/config.go index 775f9ae0f7..1574ddb88d 100644 --- a/config/config.go +++ b/config/config.go @@ -402,6 +402,13 @@ type Local struct { // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against // previously used executabled, and would not provide any additional security guarantees. CatchupBlockValidateMode int `version[17]:"0"` + + // Generate AccountUpdates telemetry event + EnableAccountUpdatesStats bool `version[17]:"false"` + + // + // Time interval in ns for generating accountUpdates telemetry event + AccountUpdatesStatsInterval time.Duration `version[17]:"5000000000"` } // Filenames of config files within the configdir (e.g. ~/.algorand) diff --git a/config/local_defaults.go b/config/local_defaults.go index f186363b93..b84312c1ab 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -21,6 +21,7 @@ package config var defaultLocal = Local{ Version: 17, + AccountUpdatesStatsInterval: 5000000000, AccountsRebuildSynchronousMode: 1, AnnounceParticipationKey: true, Archival: false, @@ -45,6 +46,7 @@ var defaultLocal = Local{ DeadlockDetection: 0, DisableLocalhostConnectionRateLimit: true, DisableOutgoingConnectionThrottling: false, + EnableAccountUpdatesStats: false, EnableAgreementReporting: false, EnableAgreementTimeMetrics: false, EnableAssembleStats: false, diff --git a/installer/config.json.example b/installer/config.json.example index 2397cee029..902065ee78 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -1,5 +1,6 @@ { "Version": 17, + "AccountUpdatesStatsInterval": 5000000000, "AccountsRebuildSynchronousMode": 1, "AnnounceParticipationKey": true, "Archival": false, @@ -24,6 +25,7 @@ "DeadlockDetection": 0, "DisableLocalhostConnectionRateLimit": true, "DisableOutgoingConnectionThrottling": false, + "EnableAccountUpdatesStats": false, "EnableAgreementReporting": false, "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go index 491d94c78e..5dc94b9661 100644 --- a/ledger/acctupdates.go +++ b/ledger/acctupdates.go @@ -249,6 +249,15 @@ type accountUpdates struct { // the synchronous mode that would be used while the accounts database is being rebuilt. accountsRebuildSynchronousMode db.SynchronousMode + + // logAccountUpdatesMetrics is a flag for enable/disable metrics logging + logAccountUpdatesMetrics bool + + // logAccountUpdatesInterval sets a time interval for metrics logging + logAccountUpdatesInterval time.Duration + + // lastMetricsLogTime is the time when the previous metrics logging occurred + lastMetricsLogTime time.Time } type deferredCommit struct { @@ -324,6 +333,11 @@ func (au *accountUpdates) initialize(cfg config.Local, dbPathPrefix string, gene au.accountsReadCond = sync.NewCond(au.accountsMu.RLocker()) au.synchronousMode = db.SynchronousMode(cfg.LedgerSynchronousMode) au.accountsRebuildSynchronousMode = db.SynchronousMode(cfg.AccountsRebuildSynchronousMode) + + // log metrics + au.logAccountUpdatesMetrics = cfg.EnableAccountUpdatesStats + au.logAccountUpdatesInterval = cfg.AccountUpdatesStatsInterval + } // loadFromDisk is the 2nd level initialization, and is required before the accountUpdates becomes functional @@ -2034,6 +2048,17 @@ func (au *accountUpdates) commitSyncer(deferedCommits chan deferredCommit) { // commitRound write to the database a "chunk" of rounds, and update the dbRound accordingly. func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookback basics.Round) { + var stats telemetryspec.AccountsUpdateMetrics + var updateStats bool + + if au.logAccountUpdatesMetrics { + now := time.Now() + if now.Sub(au.lastMetricsLogTime) >= au.logAccountUpdatesInterval { + updateStats = true + au.lastMetricsLogTime = now + } + } + defer au.accountsWriting.Done() au.accountsMu.RLock() @@ -2116,6 +2141,9 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb start := time.Now() ledgerCommitroundCount.Inc(nil) var updatedPersistedAccounts []persistedAccountData + if updateStats { + stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) + } err := au.dbs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) { treeTargetRound := basics.Round(0) if au.catchpointInterval > 0 { @@ -2138,21 +2166,39 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb db.ResetTransactionWarnDeadline(ctx, tx, time.Now().Add(accountsUpdatePerRoundHighWatermark*time.Duration(offset))) + if updateStats { + stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) + } + err = compactDeltas.accountsLoadOld(tx) if err != nil { return err } + if updateStats { + stats.OldAccountPreloadDuration = time.Duration(time.Now().UnixNano()) - stats.OldAccountPreloadDuration + } + err = totalsNewRounds(tx, deltas[:offset], compactDeltas, roundTotals[1:offset+1], config.Consensus[consensusVersion]) if err != nil { return err } + if updateStats { + stats.MerkleTrieUpdateDuration = time.Duration(time.Now().UnixNano()) + } + err = au.accountsUpdateBalances(compactDeltas) if err != nil { return err } + if updateStats { + now := time.Duration(time.Now().UnixNano()) + stats.MerkleTrieUpdateDuration = now - stats.MerkleTrieUpdateDuration + stats.AccountsWritingDuration = now + } + // the updates of the actual account data is done last since the accountsNewRound would modify the compactDeltas old values // so that we can update the base account back. updatedPersistedAccounts, err = accountsNewRound(tx, compactDeltas, compactCreatableDeltas, genesisProto, dbRound+basics.Round(offset)) @@ -2160,6 +2206,10 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb return err } + if updateStats { + stats.AccountsWritingDuration = time.Duration(time.Now().UnixNano()) - stats.AccountsWritingDuration + } + err = updateAccountsRound(tx, dbRound+basics.Round(offset), treeTargetRound) if err != nil { return err @@ -2180,6 +2230,10 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb return } + if updateStats { + stats.DatabaseCommitDuration = time.Duration(time.Now().UnixNano()) - stats.DatabaseCommitDuration - stats.AccountsWritingDuration - stats.MerkleTrieUpdateDuration - stats.OldAccountPreloadDuration + } + if isCatchpointRound { catchpointLabel, err = au.accountsCreateCatchpointLabel(dbRound+basics.Round(offset)+lookback, roundTotals[offset], committedRoundDigest, trieBalancesHash) if err != nil { @@ -2198,6 +2252,9 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb } updatingBalancesDuration := time.Now().Sub(beforeUpdatingBalancesTime) + if updateStats { + stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) + } au.accountsMu.Lock() // Drop reference counts to modified accounts, and evict them // from in-memory cache when no references remain. @@ -2250,6 +2307,11 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb au.lastFlushTime = flushTime au.accountsMu.Unlock() + + if updateStats { + stats.MemoryUpdatesDuration = time.Duration(time.Now().UnixNano()) - stats.MemoryUpdatesDuration + } + au.accountsReadCond.Broadcast() if isCatchpointRound && au.archivalLedger && catchpointLabel != "" { @@ -2258,6 +2320,18 @@ func (au *accountUpdates) commitRound(offset uint64, dbRound basics.Round, lookb au.generateCatchpoint(basics.Round(offset)+dbRound+lookback, catchpointLabel, committedRoundDigest, updatingBalancesDuration) } + // log telemetry event + if updateStats { + stats.StartRound = uint64(dbRound) + stats.RoundsCount = offset + stats.UpdatedAccountsCount = uint64(len(updatedPersistedAccounts)) + stats.UpdatedCreatablesCount = uint64(len(compactCreatableDeltas)) + + var details struct { + } + au.log.Metrics(telemetryspec.Accounts, stats, details) + } + } // compactCreatableDeltas takes an array of creatables map deltas ( one array entry per round ), and compact the array into a single diff --git a/logging/telemetryspec/metric.go b/logging/telemetryspec/metric.go index ff82bb5ad4..349d3f9724 100644 --- a/logging/telemetryspec/metric.go +++ b/logging/telemetryspec/metric.go @@ -140,6 +140,28 @@ func (m RoundTimingMetrics) Identifier() Metric { return roundTimingMetricsIdentifier } +//------------------------------------------------------- +// AccountsUpdate +const accountsUpdateMetricsIdentifier Metric = "AccountsUpdate" + +// AccountsUpdateMetrics is the set of metrics captured when we process accountUpdates.commitRound +type AccountsUpdateMetrics struct { + StartRound uint64 + RoundsCount uint64 + OldAccountPreloadDuration time.Duration + MerkleTrieUpdateDuration time.Duration + AccountsWritingDuration time.Duration + DatabaseCommitDuration time.Duration + MemoryUpdatesDuration time.Duration + UpdatedAccountsCount uint64 + UpdatedCreatablesCount uint64 +} + +// Identifier implements the required MetricDetails interface, retrieving the Identifier for this set of metrics. +func (m AccountsUpdateMetrics) Identifier() Metric { + return accountsUpdateMetricsIdentifier +} + type transactionProcessingTimeDistibution struct { // 10 buckets: 0-100Kns, 100Kns-200Kns .. 900Kns-1ms // 9 buckets: 1ms-2ms .. 9ms-10ms diff --git a/test/testdata/configs/config-v17.json b/test/testdata/configs/config-v17.json index 2397cee029..902065ee78 100644 --- a/test/testdata/configs/config-v17.json +++ b/test/testdata/configs/config-v17.json @@ -1,5 +1,6 @@ { "Version": 17, + "AccountUpdatesStatsInterval": 5000000000, "AccountsRebuildSynchronousMode": 1, "AnnounceParticipationKey": true, "Archival": false, @@ -24,6 +25,7 @@ "DeadlockDetection": 0, "DisableLocalhostConnectionRateLimit": true, "DisableOutgoingConnectionThrottling": false, + "EnableAccountUpdatesStats": false, "EnableAgreementReporting": false, "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json index b6d25d3804..b971cdce54 100644 --- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/net.json @@ -16,7 +16,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -36,7 +36,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -56,7 +56,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -76,7 +76,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -96,7 +96,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -116,7 +116,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -136,7 +136,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -156,7 +156,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -179,7 +179,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node21", @@ -196,7 +196,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node41", @@ -213,7 +213,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node61", @@ -230,7 +230,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node81", @@ -247,7 +247,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -270,7 +270,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node22", @@ -287,7 +287,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node42", @@ -304,7 +304,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node62", @@ -321,7 +321,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node82", @@ -338,7 +338,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -361,7 +361,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node23", @@ -378,7 +378,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node43", @@ -395,7 +395,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node63", @@ -412,7 +412,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node83", @@ -429,7 +429,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" } ] }, @@ -452,7 +452,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node24", @@ -469,7 +469,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node44", @@ -486,7 +486,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node64", @@ -503,7 +503,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node84", @@ -520,7 +520,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -543,7 +543,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node25", @@ -560,7 +560,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node45", @@ -577,7 +577,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node65", @@ -594,7 +594,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node85", @@ -611,7 +611,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -634,7 +634,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node26", @@ -651,7 +651,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node46", @@ -668,7 +668,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node66", @@ -685,7 +685,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node86", @@ -702,7 +702,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -725,7 +725,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node27", @@ -742,7 +742,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node47", @@ -759,7 +759,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node67", @@ -776,7 +776,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node87", @@ -793,7 +793,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -816,7 +816,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node28", @@ -833,7 +833,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node48", @@ -850,7 +850,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node68", @@ -867,7 +867,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node88", @@ -884,7 +884,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -907,7 +907,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node29", @@ -924,7 +924,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node49", @@ -941,7 +941,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node69", @@ -958,7 +958,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node89", @@ -975,7 +975,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" } ] }, @@ -998,7 +998,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node30", @@ -1015,7 +1015,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node50", @@ -1032,7 +1032,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node70", @@ -1049,7 +1049,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node90", @@ -1066,7 +1066,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1089,7 +1089,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node31", @@ -1106,7 +1106,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node51", @@ -1123,7 +1123,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node71", @@ -1140,7 +1140,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node91", @@ -1157,7 +1157,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1180,7 +1180,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node32", @@ -1197,7 +1197,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node52", @@ -1214,7 +1214,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node72", @@ -1231,7 +1231,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node92", @@ -1248,7 +1248,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1271,7 +1271,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node33", @@ -1288,7 +1288,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node53", @@ -1305,7 +1305,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node73", @@ -1322,7 +1322,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node93", @@ -1339,7 +1339,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1362,7 +1362,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node34", @@ -1379,7 +1379,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node54", @@ -1396,7 +1396,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node74", @@ -1413,7 +1413,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node94", @@ -1430,7 +1430,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1453,7 +1453,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node35", @@ -1470,7 +1470,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node55", @@ -1487,7 +1487,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node75", @@ -1504,7 +1504,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node95", @@ -1521,7 +1521,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" } ] }, @@ -1544,7 +1544,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node36", @@ -1561,7 +1561,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node56", @@ -1578,7 +1578,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node76", @@ -1595,7 +1595,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node96", @@ -1612,7 +1612,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1635,7 +1635,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node37", @@ -1652,7 +1652,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node57", @@ -1669,7 +1669,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node77", @@ -1686,7 +1686,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node97", @@ -1703,7 +1703,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1726,7 +1726,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node38", @@ -1743,7 +1743,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" }, { "Name": "node58", @@ -1760,7 +1760,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node78", @@ -1777,7 +1777,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node98", @@ -1794,7 +1794,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": true, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }" } ] }, @@ -1817,7 +1817,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node39", @@ -1834,7 +1834,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node59", @@ -1851,7 +1851,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node79", @@ -1868,7 +1868,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node99", @@ -1885,7 +1885,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, @@ -1908,7 +1908,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node40", @@ -1925,7 +1925,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node60", @@ -1942,7 +1942,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node80", @@ -1959,7 +1959,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" }, { "Name": "node100", @@ -1976,7 +1976,7 @@ "MetricsURI": "{{MetricsURI}}", "EnableService": false, "EnableBlockStats": false, - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } ] }, diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json index 0788913ab9..412f2937a0 100644 --- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/node.json @@ -5,7 +5,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}", "AltConfigs": [ { "APIToken": "{{APIToken}}", @@ -14,7 +14,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0,\"EnableAccountUpdatesStats\": true }", "FractionApply": 0.2 } ] diff --git a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json index 25bb6b5a26..db8fb939d8 100644 --- a/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json +++ b/test/testdata/deployednettemplates/recipes/bootstrappedScenario/relay.json @@ -7,5 +7,5 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } diff --git a/test/testdata/deployednettemplates/recipes/scenario1/node.json b/test/testdata/deployednettemplates/recipes/scenario1/node.json index 0788913ab9..2d08bcf070 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1/node.json +++ b/test/testdata/deployednettemplates/recipes/scenario1/node.json @@ -5,7 +5,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}", "AltConfigs": [ { "APIToken": "{{APIToken}}", @@ -14,7 +14,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }", "FractionApply": 0.2 } ] diff --git a/test/testdata/deployednettemplates/recipes/scenario1/relay.json b/test/testdata/deployednettemplates/recipes/scenario1/relay.json index 25bb6b5a26..db8fb939d8 100644 --- a/test/testdata/deployednettemplates/recipes/scenario1/relay.json +++ b/test/testdata/deployednettemplates/recipes/scenario1/relay.json @@ -7,5 +7,5 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } diff --git a/test/testdata/deployednettemplates/recipes/scenario2/node.json b/test/testdata/deployednettemplates/recipes/scenario2/node.json index 6b3849736d..3641b0f6d2 100644 --- a/test/testdata/deployednettemplates/recipes/scenario2/node.json +++ b/test/testdata/deployednettemplates/recipes/scenario2/node.json @@ -5,5 +5,5 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}" } diff --git a/test/testdata/deployednettemplates/recipes/scenario2/relay.json b/test/testdata/deployednettemplates/recipes/scenario2/relay.json index 25bb6b5a26..db8fb939d8 100644 --- a/test/testdata/deployednettemplates/recipes/scenario2/relay.json +++ b/test/testdata/deployednettemplates/recipes/scenario2/relay.json @@ -7,5 +7,5 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" } diff --git a/test/testdata/deployednettemplates/recipes/scenario3/node.json b/test/testdata/deployednettemplates/recipes/scenario3/node.json index aec502ef53..b60d95e0c6 100644 --- a/test/testdata/deployednettemplates/recipes/scenario3/node.json +++ b/test/testdata/deployednettemplates/recipes/scenario3/node.json @@ -5,7 +5,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": false, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true }", "AltConfigs": [ { "APIToken": "{{APIToken}}", @@ -14,7 +14,7 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0 }", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true}", "FractionApply": 0.01 } ] diff --git a/test/testdata/deployednettemplates/recipes/scenario3/relay.json b/test/testdata/deployednettemplates/recipes/scenario3/relay.json index f568eb3ded..f0d447a819 100644 --- a/test/testdata/deployednettemplates/recipes/scenario3/relay.json +++ b/test/testdata/deployednettemplates/recipes/scenario3/relay.json @@ -7,5 +7,5 @@ "TelemetryURI": "{{TelemetryURI}}", "EnableMetrics": true, "MetricsURI": "{{MetricsURI}}", - "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true }" + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 3, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true }" } From 559d087b442fa5d94429561e57356ea71251e10a Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 3 May 2021 16:07:19 -0400 Subject: [PATCH 198/215] add support for a single node private network (#2122) This PR adds the support for creating a single node private network. On such a node, the entire network functionality need to be disabled, since there is no "other" parties to communicate with. --- catchup/service.go | 8 ++- config/config.go | 9 ++- config/local_defaults.go | 1 + installer/config.json.example | 1 + netdeploy/network.go | 4 +- netdeploy/networkTemplate.go | 29 +++++--- network/wsNetwork.go | 5 ++ node/node.go | 10 ++- .../cli/goal/expect/goalExpectCommon.exp | 14 +++- .../cli/goal/expect/singleNodeNetworkTest.exp | 72 +++++++++++++++++++ test/testdata/configs/config-v17.json | 1 + .../nettemplates/SingleNodeNetwork.json | 33 +++++++++ tools/network/telemetryURIUpdateService.go | 2 +- 13 files changed, 168 insertions(+), 21 deletions(-) create mode 100644 test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp create mode 100644 test/testdata/nettemplates/SingleNodeNetwork.json diff --git a/catchup/service.go b/catchup/service.go index 3de7809b91..21a0d3d061 100644 --- a/catchup/service.go +++ b/catchup/service.go @@ -480,7 +480,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) { func (s *Service) periodicSync() { defer close(s.done) // if the catchup is disabled in the config file, just skip it. - if s.parallelBlocks != 0 { + if s.parallelBlocks != 0 && !s.cfg.DisableNetworking { s.sync() } stuckInARow := 0 @@ -498,7 +498,7 @@ func (s *Service) periodicSync() { sleepDuration = time.Duration(crypto.RandUint63()) % s.deadlineTimeout continue case <-time.After(sleepDuration): - if sleepDuration < s.deadlineTimeout { + if sleepDuration < s.deadlineTimeout || s.cfg.DisableNetworking { sleepDuration = s.deadlineTimeout continue } @@ -516,6 +516,10 @@ func (s *Service) periodicSync() { s.sync() case cert := <-s.unmatchedPendingCertificates: // the agreement service has a valid certificate for a block, but not the block itself. + if s.cfg.DisableNetworking { + s.log.Warnf("the local node is missing block %d, however, the catchup would not be able to provide it when the network is disabled.", cert.Cert.Round) + continue + } s.syncCert(&cert) } diff --git a/config/config.go b/config/config.go index 1574ddb88d..71cf4fe7e7 100644 --- a/config/config.go +++ b/config/config.go @@ -406,9 +406,14 @@ type Local struct { // Generate AccountUpdates telemetry event EnableAccountUpdatesStats bool `version[17]:"false"` - // - // Time interval in ns for generating accountUpdates telemetry event + // Time interval in nanoseconds for generating accountUpdates telemetry event AccountUpdatesStatsInterval time.Duration `version[17]:"5000000000"` + + // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful + // when we have a single-node private network, where there is no other nodes that need to be communicated with. + // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner + // working would be completly dis-functional. + DisableNetworking bool `version[17]:"false"` } // Filenames of config files within the configdir (e.g. ~/.algorand) diff --git a/config/local_defaults.go b/config/local_defaults.go index b84312c1ab..a645dae85b 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -45,6 +45,7 @@ var defaultLocal = Local{ DNSSecurityFlags: 1, DeadlockDetection: 0, DisableLocalhostConnectionRateLimit: true, + DisableNetworking: false, DisableOutgoingConnectionThrottling: false, EnableAccountUpdatesStats: false, EnableAgreementReporting: false, diff --git a/installer/config.json.example b/installer/config.json.example index 902065ee78..6c2265d6c8 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -24,6 +24,7 @@ "DNSSecurityFlags": 1, "DeadlockDetection": 0, "DisableLocalhostConnectionRateLimit": true, + "DisableNetworking": false, "DisableOutgoingConnectionThrottling": false, "EnableAccountUpdatesStats": false, "EnableAgreementReporting": false, diff --git a/netdeploy/network.go b/netdeploy/network.go index 1260697fdc..567414f364 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -227,7 +227,7 @@ func (n *Network) scanForNodes() error { genesisFile := filepath.Join(n.getNodeFullPath(nodeName), genesisFileName) fileExists := util.FileExists(genesisFile) if fileExists { - isPrimeDir := strings.EqualFold(nodeName, n.cfg.RelayDirs[0]) + isPrimeDir := len(n.cfg.RelayDirs) > 0 && strings.EqualFold(nodeName, n.cfg.RelayDirs[0]) if isPrimeDir { sawPrimeDir = true } else { @@ -235,7 +235,7 @@ func (n *Network) scanForNodes() error { } } } - if !sawPrimeDir { + if !sawPrimeDir && len(nodes) > 1 { return fmt.Errorf("primary relay directory (%s) invalid - can't run", n.cfg.RelayDirs[0]) } n.nodeDirs = nodes diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index c9fe871a06..36f53a9a90 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -71,6 +71,8 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin } genesisVer = strings.TrimSpace(genesisVer) + relaysCount := countRelayNodes(t.Nodes) + for _, cfg := range t.Nodes { nodeDir := filepath.Join(targetFolder, cfg.Name) err = os.Mkdir(nodeDir, os.ModePerm) @@ -138,7 +140,7 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin // Create any necessary config.json file for this node nodeCfg := filepath.Join(nodeDir, config.ConfigFilename) - err = createConfigFile(cfg, nodeCfg, len(t.Nodes)-1) // minus 1 to avoid counting self + err = createConfigFile(cfg, nodeCfg, len(t.Nodes)-1, relaysCount) // minus 1 to avoid counting self if err != nil { return } @@ -200,11 +202,7 @@ func (t NetworkTemplate) Validate() error { // No wallet can be assigned to more than one node // At least one relay is required wallets := make(map[string]bool) - relayCount := 0 for _, cfg := range t.Nodes { - if cfg.IsRelay { - relayCount++ - } for _, wallet := range cfg.Wallets { upperWallet := strings.ToUpper(wallet.Name) if _, found := wallets[upperWallet]; found { @@ -213,20 +211,33 @@ func (t NetworkTemplate) Validate() error { wallets[upperWallet] = true } } - if relayCount == 0 { - return fmt.Errorf("invalid template: at least one relay is required") - } + if len(t.Nodes) > 1 && countRelayNodes(t.Nodes) == 0 { + return fmt.Errorf("invalid template: at least one relay is required when more than a single node presents") + } return nil } -func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes int) error { +// countRelayNodes counts the total number of relays +func countRelayNodes(nodeCfgs []remote.NodeConfigGoal) (relayCount int) { + for _, cfg := range nodeCfgs { + if cfg.IsRelay { + relayCount++ + } + } + return +} + +func createConfigFile(node remote.NodeConfigGoal, configFile string, numNodes int, relaysCount int) error { cfg := config.GetDefaultLocal() cfg.GossipFanout = numNodes // Override default :8080 REST endpoint, and disable SRV lookup cfg.EndpointAddress = "127.0.0.1:0" cfg.DNSBootstrapID = "" cfg.EnableProfiler = true + if relaysCount == 0 { + cfg.DisableNetworking = true + } if node.IsRelay { // Have relays listen on any localhost port diff --git a/network/wsNetwork.go b/network/wsNetwork.go index 4fa96d21f8..c9ce6d0523 100644 --- a/network/wsNetwork.go +++ b/network/wsNetwork.go @@ -455,6 +455,10 @@ func (wn *WebsocketNetwork) Broadcast(ctx context.Context, tag protocol.Tag, dat // if wait is true then the call blocks until the packet has actually been sent to all neighbors. // TODO: add `priority` argument so that we don't have to guess it based on tag func (wn *WebsocketNetwork) BroadcastArray(ctx context.Context, tags []protocol.Tag, data [][]byte, wait bool, except Peer) error { + if wn.config.DisableNetworking { + return nil + } + if len(tags) != len(data) { return errBcastInvalidArray } @@ -827,6 +831,7 @@ func (wn *WebsocketNetwork) innerStop() { // Stop blocks until all activity on this node is done. func (wn *WebsocketNetwork) Stop() { wn.handlers.ClearHandlers([]Tag{}) + wn.innerStop() var listenAddr string if wn.listener != nil { diff --git a/node/node.go b/node/node.go index 92aad1e133..267458eeb5 100644 --- a/node/node.go +++ b/node/node.go @@ -342,8 +342,10 @@ func (node *AlgorandFullNode) Start() { // Set up a context we can use to cancel goroutines on Stop() node.ctx, node.cancelCtx = context.WithCancel(context.Background()) - // start accepting connections - node.net.Start() + if !node.config.DisableNetworking { + // start accepting connections + node.net.Start() + } node.config.NetAddress, _ = node.net.Address() if node.catchpointCatchupService != nil { @@ -417,7 +419,9 @@ func (node *AlgorandFullNode) Stop() { }() node.net.ClearHandlers() - node.net.Stop() + if !node.config.DisableNetworking { + node.net.Stop() + } if node.catchpointCatchupService != nil { node.catchpointCatchupService.Stop() } else { diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp index 3d77d9065d..8a4ee7305c 100644 --- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp +++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp @@ -429,11 +429,21 @@ proc ::AlgorandGoal::GetAccountRewards { WALLET_NAME ACCOUNT_ADDRESS TEST_PRIMAR # Account Transfer proc ::AlgorandGoal::AccountTransfer { FROM_WALLET_NAME FROM_WALLET_PASSWORD FROM_ACCOUNT_ADDRESS TRANSFER_AMOUNT TO_ACCOUNT_ADDRESS FEE_AMOUNT TEST_PRIMARY_NODE_DIR OUT_FILE } { + set TRANSACTION_ID [::AlgorandGoal::AccountTransferWait $FROM_WALLET_NAME $FROM_WALLET_PASSWORD $FROM_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $TO_ACCOUNT_ADDRESS $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR $OUT_FILE "true"] + return $TRANSACTION_ID +} + +# Account Transfer ( with optional wait flag) +proc ::AlgorandGoal::AccountTransferWait { FROM_WALLET_NAME FROM_WALLET_PASSWORD FROM_ACCOUNT_ADDRESS TRANSFER_AMOUNT TO_ACCOUNT_ADDRESS FEE_AMOUNT TEST_PRIMARY_NODE_DIR OUT_FILE WAIT} { set timeout 60 if { [ catch { set TRANSACTION_ID "NOT SET" if { $OUT_FILE == "" } { - spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR + if { $WAIT == "" } { + spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --no-wait + } else { + spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR + } } else { spawn goal clerk send --fee $FEE_AMOUNT --wallet $FROM_WALLET_NAME --amount $TRANSFER_AMOUNT --from $FROM_ACCOUNT_ADDRESS --to $TO_ACCOUNT_ADDRESS -d $TEST_PRIMARY_NODE_DIR --out $OUT_FILE } @@ -809,7 +819,7 @@ proc ::AlgorandGoal::DeleteMultisigAccount { MULTISIG_ADDRESS TEST_PRIMARY_NODE_ } } -# Wait for node to reach a specific round +# Retrieve the node last catchpoint proc ::AlgorandGoal::GetNodeLastCatchpoint { NODE_DATA_DIR } { set CATCHPOINT "" if { [catch { diff --git a/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp b/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp new file mode 100644 index 0000000000..e0990ce12a --- /dev/null +++ b/test/e2e-go/cli/goal/expect/singleNodeNetworkTest.exp @@ -0,0 +1,72 @@ +#!/usr/bin/expect -f +#exp_internal 1 +set err 0 +log_user 1 + +if { [catch { + + source goalExpectCommon.exp + set TEST_ALGO_DIR [lindex $argv 0] + set TEST_DATA_DIR [lindex $argv 1] + + puts "TEST_ALGO_DIR: $TEST_ALGO_DIR" + puts "TEST_DATA_DIR: $TEST_DATA_DIR" + + set TIME_STAMP [clock seconds] + + set TEST_ROOT_DIR $TEST_ALGO_DIR/root + set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ + set NETWORK_NAME test_net_expect_$TIME_STAMP + set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/SingleNodeNetwork.json" + + # Create network + ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR + + # Start network + ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR + + # Determine primary account + set PRIMARY_WALLET_NAME unencrypted-default-wallet + set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR] + + # Check the primary account balance. + set INITIAL_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + + # set the destination as a random address + set DEST_ACCOUNT "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ" + + # send 1000 messages and wait for them to get applied. + set EXPECTED_BALANCE $INITIAL_ACCOUNT_BALANCE + set TRANSFER_AMOUNT_BASE 1000000 + set FEE_AMOUNT 1000 + set TRANSACTION_COUNT 10 + for {set txIdx 0} {$txIdx < $TRANSACTION_COUNT} {incr txIdx 1} { + set TRANSFER_AMOUNT [expr $TRANSFER_AMOUNT_BASE+$txIdx] + set TRANSACTION_ID [::AlgorandGoal::AccountTransferWait $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS $TRANSFER_AMOUNT $DEST_ACCOUNT $FEE_AMOUNT $TEST_PRIMARY_NODE_DIR "" ""] + set EXPECTED_BALANCE [expr $EXPECTED_BALANCE-$FEE_AMOUNT-$TRANSFER_AMOUNT] + } + + # Get node last committed round + set LAST_COMMITTED_ROUND [::AlgorandGoal::GetNodeLastCommittedBlock $TEST_PRIMARY_NODE_DIR] + + # Wait for node to advance to next round. + ::AlgorandGoal::WaitForRound [expr $LAST_COMMITTED_ROUND+2] $TEST_PRIMARY_NODE_DIR + + # Check the primary account balance. + set ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR] + + # Shutdown the network + ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR + + if { $EXPECTED_BALANCE != $ACCOUNT_BALANCE } { + puts "Node was supposed to have a balance of $EXPECTED_BALANCE but ended up with a balance of $ACCOUNT_BALANCE" + exit 1 + } + + puts "Single Node Network Goal Test Successful" + + exit 0 +} EXCEPTION] } { + puts "ERROR in singleNodeNetworkTest: $EXCEPTION" + exit 1 +} diff --git a/test/testdata/configs/config-v17.json b/test/testdata/configs/config-v17.json index 902065ee78..6c2265d6c8 100644 --- a/test/testdata/configs/config-v17.json +++ b/test/testdata/configs/config-v17.json @@ -24,6 +24,7 @@ "DNSSecurityFlags": 1, "DeadlockDetection": 0, "DisableLocalhostConnectionRateLimit": true, + "DisableNetworking": false, "DisableOutgoingConnectionThrottling": false, "EnableAccountUpdatesStats": false, "EnableAgreementReporting": false, diff --git a/test/testdata/nettemplates/SingleNodeNetwork.json b/test/testdata/nettemplates/SingleNodeNetwork.json new file mode 100644 index 0000000000..359b75067b --- /dev/null +++ b/test/testdata/nettemplates/SingleNodeNetwork.json @@ -0,0 +1,33 @@ +{ + "Genesis": { + "NetworkName": "snn", + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 50, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 25, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 25, + "Online": true + } + ] + }, + "Nodes": [ + { + "Name": "Primary", + "IsRelay": false, + "Wallets": [ + { "Name": "Wallet1", "ParticipationOnly": false }, + { "Name": "Wallet2", "ParticipationOnly": false }, + { "Name": "Wallet3", "ParticipationOnly": false } + ] + } + ] +} diff --git a/tools/network/telemetryURIUpdateService.go b/tools/network/telemetryURIUpdateService.go index ddc972e292..a6e0da3072 100644 --- a/tools/network/telemetryURIUpdateService.go +++ b/tools/network/telemetryURIUpdateService.go @@ -60,7 +60,7 @@ func (t *telemetryURIUpdater) Start() { updateTelemetryURI := func() { endpointURL := t.lookupTelemetryURL() - if endpointURL != nil && endpointURL.String() != t.log.GetTelemetryURI() { + if endpointURL != nil && endpointURL.String() != t.log.GetTelemetryURI() && false == t.cfg.DisableNetworking { err := t.log.UpdateTelemetryURI(endpointURL.String()) if err != nil { t.log.Warnf("Unable to update telemetry URI to '%s' : %v", endpointURL.String(), err) From 1341f39d0fda971e6fccfbd9d560d6182db7a6bb Mon Sep 17 00:00:00 2001 From: egieseke Date: Tue, 4 May 2021 15:01:21 -0400 Subject: [PATCH 199/215] Update GOPROXY, replace https://gocenter.io with https://pkg.go.dev (#2127) For GOPROXY, replace the use of https://gocenter.io with https://pkg.go.dev, since gocenter.io is at end of life. --- docker/build/Dockerfile | 2 +- docker/build/Dockerfile-deploy | 2 +- docker/build/cicd.alpine.Dockerfile | 2 +- docker/build/cicd.centos.Dockerfile | 2 +- docker/build/cicd.ubuntu.Dockerfile | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docker/build/Dockerfile b/docker/build/Dockerfile index 9a1bf07a2e..6228677f76 100644 --- a/docker/build/Dockerfile +++ b/docker/build/Dockerfile @@ -6,7 +6,7 @@ WORKDIR /root RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local ENV GOROOT=/usr/local/go \ GOPATH=$HOME/go \ - GOPROXY=https://gocenter.io,https://goproxy.io,direct + GOPROXY=https://pkg.go.dev,https://goproxy.io,direct RUN mkdir -p $GOPATH/src/github.com/algorand WORKDIR $GOPATH/src/github.com/algorand COPY ./go-algorand ./go-algorand/ diff --git a/docker/build/Dockerfile-deploy b/docker/build/Dockerfile-deploy index 63eb7cb5da..6f1ed8c85c 100644 --- a/docker/build/Dockerfile-deploy +++ b/docker/build/Dockerfile-deploy @@ -6,7 +6,7 @@ WORKDIR /root RUN wget --quiet https://dl.google.com/go/go${GOLANG_VERSION}.linux-amd64.tar.gz && tar -xvf go${GOLANG_VERSION}.linux-amd64.tar.gz && mv go /usr/local ENV GOROOT=/usr/local/go \ GOPATH=$HOME/go \ - GOPROXY=https://gocenter.io,https://goproxy.io,direct + GOPROXY=https://pkg.go.dev,https://goproxy.io,direct RUN mkdir -p $GOPATH/src/github.com/algorand WORKDIR $GOPATH/src/github.com/algorand COPY . ./go-algorand/ diff --git a/docker/build/cicd.alpine.Dockerfile b/docker/build/cicd.alpine.Dockerfile index 228a71ee88..e449d0b913 100644 --- a/docker/build/cicd.alpine.Dockerfile +++ b/docker/build/cicd.alpine.Dockerfile @@ -25,7 +25,7 @@ RUN apk add dpkg && \ COPY . $GOPATH/src/github.com/algorand/go-algorand WORKDIR $GOPATH/src/github.com/algorand/go-algorand ENV GCC_CONFIG="--with-arch=armv6" \ - GOPROXY=https://gocenter.io,https://goproxy.io,direct + GOPROXY=https://pkg.go.dev,https://goproxy.io,direct RUN make ci-deps && make clean RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ mkdir -p $GOPATH/src/github.com/algorand/go-algorand diff --git a/docker/build/cicd.centos.Dockerfile b/docker/build/cicd.centos.Dockerfile index b36ceca937..07c1e6fcb6 100644 --- a/docker/build/cicd.centos.Dockerfile +++ b/docker/build/cicd.centos.Dockerfile @@ -18,7 +18,7 @@ ENV GOROOT=/usr/local/go \ RUN mkdir -p $GOPATH/src/github.com/algorand COPY . $GOPATH/src/github.com/algorand/go-algorand ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ - GOPROXY=https://gocenter.io + GOPROXY=https://pkg.go.dev WORKDIR $GOPATH/src/github.com/algorand/go-algorand RUN make ci-deps && make clean RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ diff --git a/docker/build/cicd.ubuntu.Dockerfile b/docker/build/cicd.ubuntu.Dockerfile index f30ec85ccb..c12f3da602 100644 --- a/docker/build/cicd.ubuntu.Dockerfile +++ b/docker/build/cicd.ubuntu.Dockerfile @@ -15,7 +15,7 @@ ENV GOROOT=/usr/local/go \ RUN mkdir -p $GOPATH/src/github.com/algorand COPY . $GOPATH/src/github.com/algorand/go-algorand ENV PATH=$GOPATH/bin:$GOROOT/bin:$PATH \ - GOPROXY=https://gocenter.io + GOPROXY=https://pkg.go.dev WORKDIR $GOPATH/src/github.com/algorand/go-algorand RUN make ci-deps && make clean RUN rm -rf $GOPATH/src/github.com/algorand/go-algorand && \ From 76aaa2dfa8bfcb5c62e852b7412465bcf22a61b4 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 5 May 2021 07:56:19 -0400 Subject: [PATCH 200/215] port few small changes off the main PR into this one, for faster reviewing. (#2129) This PR adds a ParticipationKeysRefreshInterval config option that would replace the hard-coded 60-second value used. --- agreement/fuzzer/fuzzer_test.go | 4 ++-- config/config.go | 4 ++++ config/local_defaults.go | 1 + data/account/participation.go | 14 +++++++++----- installer/config.json.example | 1 + libgoal/transactions.go | 2 +- netdeploy/network.go | 2 ++ node/node.go | 4 +--- test/testdata/configs/config-v17.json | 1 + 9 files changed, 22 insertions(+), 11 deletions(-) diff --git a/agreement/fuzzer/fuzzer_test.go b/agreement/fuzzer/fuzzer_test.go index b2e1960d8c..bd3b6bb14a 100644 --- a/agreement/fuzzer/fuzzer_test.go +++ b/agreement/fuzzer/fuzzer_test.go @@ -84,7 +84,7 @@ func MakeFuzzer(config FuzzerConfig) *Fuzzer { crashAccessors: make([]db.Accessor, config.NodesCount), accounts: make([]account.Participation, config.NodesCount), balances: make(map[basics.Address]basics.AccountData), - accountAccessors: make([]db.Accessor, config.NodesCount*2), + accountAccessors: make([]db.Accessor, config.NodesCount), ledgers: make([]*testLedger, config.NodesCount), agreementParams: make([]agreement.Parameters, config.NodesCount), tickGranularity: time.Millisecond * 300, @@ -196,7 +196,7 @@ func (n *Fuzzer) initAccountsAndBalances(rootSeed []byte, onlineNodes []bool) er if err != nil { return err } - n.accountAccessors[i*2+0] = rootAccess + n.accountAccessors[i] = rootAccess seed = sha256.Sum256(seed[:]) root, err := account.ImportRoot(rootAccess, seed) diff --git a/config/config.go b/config/config.go index 71cf4fe7e7..89865d1ae2 100644 --- a/config/config.go +++ b/config/config.go @@ -409,6 +409,10 @@ type Local struct { // Time interval in nanoseconds for generating accountUpdates telemetry event AccountUpdatesStatsInterval time.Duration `version[17]:"5000000000"` + // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation + // keys have been placed on the genesis directory. + ParticipationKeysRefreshInterval time.Duration `version[17]:"60000000000"` + // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful // when we have a single-node private network, where there is no other nodes that need to be communicated with. // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner diff --git a/config/local_defaults.go b/config/local_defaults.go index a645dae85b..c5b26f65c9 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -88,6 +88,7 @@ var defaultLocal = Local{ OptimizeAccountsDatabaseOnStartup: false, OutgoingMessageFilterBucketCount: 3, OutgoingMessageFilterBucketSize: 128, + ParticipationKeysRefreshInterval: 60000000000, PeerConnectionsUpdateInterval: 3600, PeerPingPeriodSeconds: 0, PriorityPeers: map[string]bool{}, diff --git a/data/account/participation.go b/data/account/participation.go index 2883366f99..269163c998 100644 --- a/data/account/participation.go +++ b/data/account/participation.go @@ -106,7 +106,7 @@ func (part Participation) VotingSigner() crypto.OneTimeSigner { } // GenerateRegistrationTransaction returns a transaction object for registering a Participation with its parent. -func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte, params config.ConsensusParams) transactions.Transaction { +func (part Participation) GenerateRegistrationTransaction(fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) transactions.Transaction { t := transactions.Transaction{ Type: protocol.KeyRegistrationTx, Header: transactions.Header{ @@ -191,7 +191,6 @@ func FillDBWithParticipationKeys(store db.Accessor, address basics.Address, firs }, Store: store, } - // Persist the Participation into the database err = part.Persist() return part, err @@ -203,19 +202,24 @@ func (part PersistedParticipation) Persist() error { voting := part.Voting.Snapshot() rawVoting := protocol.Encode(&voting) - return part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error { + err := part.Store.Atomic(func(ctx context.Context, tx *sql.Tx) error { err := partInstallDatabase(tx) if err != nil { - return fmt.Errorf("Participation.persist: failed to install database: %v", err) + return fmt.Errorf("failed to install database: %w", err) } _, err = tx.Exec("INSERT INTO ParticipationAccount (parent, vrf, voting, firstValid, lastValid, keyDilution) VALUES (?, ?, ?, ?, ?, ?)", part.Parent[:], rawVRF, rawVoting, part.FirstValid, part.LastValid, part.KeyDilution) if err != nil { - return fmt.Errorf("Participation.persist: failed to insert account: %v", err) + return fmt.Errorf("failed to insert account: %w", err) } return nil }) + + if err != nil { + err = fmt.Errorf("PersistedParticipation.Persist: %w", err) + } + return err } // Migrate is called when loading participation keys. diff --git a/installer/config.json.example b/installer/config.json.example index 6c2265d6c8..a455ad2ec0 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -67,6 +67,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "ParticipationKeysRefreshInterval": 60000000000, "PeerConnectionsUpdateInterval": 3600, "PeerPingPeriodSeconds": 0, "PriorityPeers": {}, diff --git a/libgoal/transactions.go b/libgoal/transactions.go index b35b06cd66..8726daff50 100644 --- a/libgoal/transactions.go +++ b/libgoal/transactions.go @@ -229,7 +229,7 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participat parsedLastValid := basics.Round(lastValid) parsedFee := basics.MicroAlgos{Raw: fee} - goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes, cparams) + goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes) if cparams.SupportGenesisHash { var genHash crypto.Digest copy(genHash[:], params.GenesisHash) diff --git a/netdeploy/network.go b/netdeploy/network.go index 567414f364..513d464651 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -22,6 +22,7 @@ import ( "io/ioutil" "os" "path/filepath" + "sort" "strings" "time" @@ -146,6 +147,7 @@ func (n Network) NodeDataDirs() []string { for _, nodeDir := range n.nodeDirs { directories = append(directories, n.getNodeFullPath(nodeDir)) } + sort.Strings(directories) return directories } diff --git a/node/node.go b/node/node.go index 267458eeb5..bc6e2d20f8 100644 --- a/node/node.go +++ b/node/node.go @@ -55,8 +55,6 @@ import ( "github.com/algorand/go-deadlock" ) -const participationKeyCheckSecs = 60 - // StatusReport represents the current basic status of the node type StatusReport struct { LastRound basics.Round @@ -704,7 +702,7 @@ func (node *AlgorandFullNode) GetPendingTxnsFromPool() ([]transactions.SignedTxn // Reload participation keys from disk periodically func (node *AlgorandFullNode) checkForParticipationKeys() { defer node.monitoringRoutinesWaitGroup.Done() - ticker := time.NewTicker(participationKeyCheckSecs * time.Second) + ticker := time.NewTicker(node.config.ParticipationKeysRefreshInterval) for { select { case <-ticker.C: diff --git a/test/testdata/configs/config-v17.json b/test/testdata/configs/config-v17.json index 6c2265d6c8..a455ad2ec0 100644 --- a/test/testdata/configs/config-v17.json +++ b/test/testdata/configs/config-v17.json @@ -67,6 +67,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "ParticipationKeysRefreshInterval": 60000000000, "PeerConnectionsUpdateInterval": 3600, "PeerPingPeriodSeconds": 0, "PriorityPeers": {}, From 50b29793299d51040195b253e59147df04c5a880 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Wed, 5 May 2021 19:24:02 -0400 Subject: [PATCH 201/215] bugfix: netgoal errors when bootstrap files was missing (#2134) netgoal was failing to build network when the bootstrap files was missing. --- cmd/netgoal/network.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/cmd/netgoal/network.go b/cmd/netgoal/network.go index 05726f8397..9e2f12a0fb 100644 --- a/cmd/netgoal/network.go +++ b/cmd/netgoal/network.go @@ -142,7 +142,10 @@ func runBuildNetwork() (err error) { net.GenesisData.VersionModifier = networkGenesisVersionModifier } - bootstrappedFile := resolveFile(r.BootstrappedFile, templateBaseDir) + var bootstrappedFile string + if r.BootstrappedFile != "" { + bootstrappedFile = resolveFile(r.BootstrappedFile, templateBaseDir) + } if util.FileExists(bootstrappedFile) && bootstrapLoadingFile { fileTemplate, err := remote.LoadBootstrappedData(bootstrappedFile) if err != nil { @@ -201,7 +204,6 @@ func resolveFile(filename string, baseDir string) string { if filepath.IsAbs(filename) { return filename } - // Assume path is relative to the directory of the template file return filepath.Join(baseDir, filename) } From 3bd5d051b6102cc24ce084beac7e6aa63e94c213 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 6 May 2021 11:12:32 -0400 Subject: [PATCH 202/215] Provide proper handling for panicing unit tests. (#2135) The logfilter utility was not handling unit test panics correctly - the panic result was omitted from the output. --- debug/logfilter/example8.in | 47 +++++++++++++++++++++++++++ debug/logfilter/example8.out.expected | 30 +++++++++++++++++ debug/logfilter/main.go | 2 ++ 3 files changed, 79 insertions(+) create mode 100644 debug/logfilter/example8.in create mode 100644 debug/logfilter/example8.out.expected diff --git a/debug/logfilter/example8.in b/debug/logfilter/example8.in new file mode 100644 index 0000000000..b538811e09 --- /dev/null +++ b/debug/logfilter/example8.in @@ -0,0 +1,47 @@ +=== RUN TestParticipationKeyOnlyAccountParticipatesCorrectly +=== PAUSE TestParticipationKeyOnlyAccountParticipatesCorrectly +=== RUN TestNewAccountCanGoOnlineAndParticipate + onlineOfflineParticipation_test.go:105: +--- SKIP: TestNewAccountCanGoOnlineAndParticipate (0.00s) +=== RUN TestOverlappingParticipationKeys +=== PAUSE TestOverlappingParticipationKeys +=== RUN TestOnlineOfflineRewards +=== PAUSE TestOnlineOfflineRewards +=== RUN TestPartkeyOnlyRewards + participationRewards_test.go:139: +--- SKIP: TestPartkeyOnlyRewards (0.00s) +=== RUN TestRewardUnitThreshold +=== PAUSE TestRewardUnitThreshold +=== RUN TestRewardRateRecalculation +=== PAUSE TestRewardRateRecalculation +=== CONT TestOverlappingParticipationKeys +=== CONT TestRewardRateRecalculation +--- FAIL: TestOverlappingParticipationKeys (0.00s) +panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory [recovered] + panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory + +goroutine 119 [running]: +testing.tRunner.func1.1(0x1515200, 0xc0002f0880) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:988 +0x452 +testing.tRunner.func1(0xc0002377a0) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:991 +0x600 +panic(0x1515200, 0xc0002f0880) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/runtime/panic.go:975 +0x3e3 +github.com/algorand/go-algorand/test/framework/fixtures.(*baseFixture).failOnError(...) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/baseFixture.go:69 +github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).failOnError(0xc0001d6800, 0x19ee400, 0xc0002db290, 0x16d71fc, 0x24) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:329 +0x1d8 +github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).setup(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0x16d25f5, 0x20, 0xc000104ea0, 0x28, 0xc000091000) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:97 +0x490 +github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).SetupNoStart(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0xc000104ea0, 0x28) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:73 +0x92 +github.com/algorand/go-algorand/test/framework/fixtures.(*RestClientFixture).SetupNoStart(...) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/restClientFixture.go:50 +github.com/algorand/go-algorand/test/e2e-go/features/participation.TestOverlappingParticipationKeys(0xc0002377a0) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/e2e-go/features/participation/overlappingParticipationKeys_test.go:58 +0x3a0 +testing.tRunner(0xc0002377a0, 0x17230a0) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1039 +0x1ec +created by testing.(*T).Run + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1090 +0x701 +FAIL github.com/algorand/go-algorand/test/e2e-go/features/participation 0.069s + diff --git a/debug/logfilter/example8.out.expected b/debug/logfilter/example8.out.expected new file mode 100644 index 0000000000..68ea01a6bf --- /dev/null +++ b/debug/logfilter/example8.out.expected @@ -0,0 +1,30 @@ + +--- FAIL: TestOverlappingParticipationKeys (0.00s) +FAIL github.com/algorand/go-algorand/test/e2e-go/features/participation 0.069s... +panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory [recovered] + panic: CreateNetworkFromTemplate failed: open /home/travis/gopath/src/github.com/algorand/go-algorand/test/testdata/nettemplates/ShortParticipationKeys.json: no such file or directory +goroutine 119 [running]: +testing.tRunner.func1.1(0x1515200, 0xc0002f0880) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:988 +0x452 +testing.tRunner.func1(0xc0002377a0) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:991 +0x600 +panic(0x1515200, 0xc0002f0880) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/runtime/panic.go:975 +0x3e3 +github.com/algorand/go-algorand/test/framework/fixtures.(*baseFixture).failOnError(...) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/baseFixture.go:69 +github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).failOnError(0xc0001d6800, 0x19ee400, 0xc0002db290, 0x16d71fc, 0x24) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:329 +0x1d8 +github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).setup(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0x16d25f5, 0x20, 0xc000104ea0, 0x28, 0xc000091000) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:97 +0x490 +github.com/algorand/go-algorand/test/framework/fixtures.(*LibGoalFixture).SetupNoStart(0xc0001d6800, 0x1a19280, 0xc0002377a0, 0xc000104ea0, 0x28) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/libgoalFixture.go:73 +0x92 +github.com/algorand/go-algorand/test/framework/fixtures.(*RestClientFixture).SetupNoStart(...) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/framework/fixtures/restClientFixture.go:50 +github.com/algorand/go-algorand/test/e2e-go/features/participation.TestOverlappingParticipationKeys(0xc0002377a0) + /home/travis/gopath/src/github.com/algorand/go-algorand/test/e2e-go/features/participation/overlappingParticipationKeys_test.go:58 +0x3a0 +testing.tRunner(0xc0002377a0, 0x17230a0) + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1039 +0x1ec +created by testing.(*T).Run + /home/travis/.gimme/versions/go1.14.7.linux.amd64/src/testing/testing.go:1090 +0x701 + +FAIL github.com/algorand/go-algorand/test/e2e-go/features/participation 0.069s diff --git a/debug/logfilter/main.go b/debug/logfilter/main.go index 914d9b90bc..265c501fe7 100644 --- a/debug/logfilter/main.go +++ b/debug/logfilter/main.go @@ -80,6 +80,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { } else { fmt.Fprintf(outFile, line+"\r\n") delete(tests, testName) + currentTestName = "" } continue } @@ -96,6 +97,7 @@ func logFilter(inFile io.Reader, outFile io.Writer) int { fmt.Fprintf(outFile, line+"\r\n") test.outputBuffer = "" tests[testName] = test + currentTestName = "" } continue } From 490180bcb3a91bdb0398f34f9922f215bc2b5f07 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Thu, 6 May 2021 16:42:05 -0400 Subject: [PATCH 203/215] improve the support for overlapping participation keys (#2125) Improve the support for overlapping participation keys, and correct the participation key uniqueness. Highlights: * Keys used to be identified by account public address, first and last valid round. This wasn't that great. If you were to generate multiple participation keys for the same account, you could easily have few of them share the same parameters - making the algod vote with only one of them ( arbitrarily ). * When sending the votes, we were attempting to send the votes for all the participation keys that we had. If we had two participation keys valid with an overlapping range, we would vote for both of them ( and for the same account! ). This would not generate an issue on it's own, as we had a proper checking for the Vote and Selection keys on the verification side, but could lead to a malicious-like notifications in the log file. * Similar to the above, a newly added participation key would be used to vote right away, even before the 320 rounds have passed. This would cause the network to reject this vote. In this PR, I have changed the following: 1. `HasLiveKeys` - was removed. The method was not used anywhere in the codebase. 2. `Keys` was added a round number. This round number would be used for filtering a participation key that has the given round number between its first and last valid range. 3. The agreement `asyncPseudonode` would now cache the participation keys for a single round. This optimization wasn't done in a vacuum. The need for this came since we would add a database lookup for each account prior to returning the corresponding participation key. 4. The agreement now expect the `KeyManager` to have the `VotingKeys` method instead of the `Keys` method. This allow us to customize the node to provide a database lookup and ensure the voting keys available for the node are the one that would be accepted by the network. --- agreement/abstractions.go | 11 +- agreement/agreementtest/keyManager.go | 19 +- agreement/cryptoVerifier_test.go | 3 +- agreement/fuzzer/keyManager_test.go | 13 +- agreement/pseudonode.go | 88 ++++---- agreement/pseudonode_test.go | 78 ++++++- agreement/service_test.go | 17 +- compactcert/abstractions.go | 2 +- compactcert/signer.go | 12 +- compactcert/worker_test.go | 9 +- data/account/account.go | 9 - data/accountManager.go | 45 ++-- netdeploy/network.go | 3 +- netdeploy/networkTemplate.go | 6 +- node/netprio.go | 7 +- node/node.go | 53 ++++- .../overlappingParticipationKeys_test.go | 209 ++++++++++++++++++ .../nettemplates/ShortParticipationKeys.json | 55 +++++ 18 files changed, 509 insertions(+), 130 deletions(-) create mode 100644 test/e2e-go/features/participation/overlappingParticipationKeys_test.go create mode 100644 test/testdata/nettemplates/ShortParticipationKeys.json diff --git a/agreement/abstractions.go b/agreement/abstractions.go index ee0cf43b62..2384ef50ef 100644 --- a/agreement/abstractions.go +++ b/agreement/abstractions.go @@ -225,13 +225,10 @@ type LedgerWriter interface { // A KeyManager stores and deletes participation keys. type KeyManager interface { - // Keys returns an immutable array of participation intervals to - // participating accounts. - Keys() []account.Participation - - // HasLiveKeys returns true if we have any Participation - // keys valid for the specified round range (inclusive) - HasLiveKeys(from, to basics.Round) bool + // VotingKeys returns an immutable array of voting keys that are + // valid for the provided votingRound, and were available at + // keysRound. + VotingKeys(votingRound, keysRound basics.Round) []account.Participation } // MessageHandle is an ID referring to a specific message. diff --git a/agreement/agreementtest/keyManager.go b/agreement/agreementtest/keyManager.go index 6b3b968f68..384fba8cd5 100644 --- a/agreement/agreementtest/keyManager.go +++ b/agreement/agreementtest/keyManager.go @@ -24,24 +24,15 @@ import ( // SimpleKeyManager provides a simple implementation of a KeyManager. type SimpleKeyManager []account.Participation -// Keys implements KeyManager.Keys. -func (m SimpleKeyManager) Keys() []account.Participation { +// VotingKeys implements KeyManager.VotingKeys. +func (m SimpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation { var km []account.Participation for _, acc := range m { - km = append(km, acc) - } - return km -} - -// HasLiveKeys returns true if we have any Participation -// keys valid for the specified round range (inclusive) -func (m SimpleKeyManager) HasLiveKeys(from, to basics.Round) bool { - for _, acc := range m { - if acc.OverlapsInterval(from, to) { - return true + if acc.OverlapsInterval(votingRound, votingRound) { + km = append(km, acc) } } - return false + return km } // DeleteOldKeys implements KeyManager.DeleteOldKeys. diff --git a/agreement/cryptoVerifier_test.go b/agreement/cryptoVerifier_test.go index 124452454b..4f9e1edb9c 100644 --- a/agreement/cryptoVerifier_test.go +++ b/agreement/cryptoVerifier_test.go @@ -317,7 +317,8 @@ func BenchmarkCryptoVerifierProposalVertification(b *testing.B) { } Period := period(0) - participation := pn.getParticipations("BenchmarkCryptoVerifierProposalVertification", ledger.NextRound()) + pn.loadRoundParticipationKeys(ledger.NextRound()) + participation := pn.participationKeys proposals, _ := pn.makeProposals(ledger.NextRound(), Period, participation) diff --git a/agreement/fuzzer/keyManager_test.go b/agreement/fuzzer/keyManager_test.go index 1686552c0e..c888b4955e 100644 --- a/agreement/fuzzer/keyManager_test.go +++ b/agreement/fuzzer/keyManager_test.go @@ -23,15 +23,12 @@ import ( type simpleKeyManager []account.Participation -func (m simpleKeyManager) Keys() []account.Participation { - return m -} - -func (m simpleKeyManager) HasLiveKeys(from, to basics.Round) bool { +func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation { + var km []account.Participation for _, acc := range m { - if acc.OverlapsInterval(from, to) { - return true + if acc.OverlapsInterval(votingRound, votingRound) { + km = append(km, acc) } } - return false + return km } diff --git a/agreement/pseudonode.go b/agreement/pseudonode.go index 66cc2ae558..e2809fb134 100644 --- a/agreement/pseudonode.go +++ b/agreement/pseudonode.go @@ -67,14 +67,16 @@ type pseudonode interface { // asyncPseudonode creates proposals and votes asynchronously. type asyncPseudonode struct { - factory BlockFactory - validator BlockValidator - keys KeyManager - ledger Ledger - log serviceLogger - quit chan struct{} // a quit signal for the verifier goroutines - closeWg *sync.WaitGroup // frontend waitgroup to get notified when all the verifier goroutines are done. - monitor *coserviceMonitor + factory BlockFactory + validator BlockValidator + keys KeyManager + ledger Ledger + log serviceLogger + quit chan struct{} // a quit signal for the verifier goroutines + closeWg *sync.WaitGroup // frontend waitgroup to get notified when all the verifier goroutines are done. + monitor *coserviceMonitor + participationKeysRound basics.Round // the round to which the participationKeys matches + participationKeys []account.Participation // the list of the participation keys for round participationKeysRound proposalsVerifier *pseudonodeVerifier // dynamically generated verifier goroutine that manages incoming proposals making request. votesVerifier *pseudonodeVerifier // dynamically generated verifier goroutine that manages incoming votes making request. @@ -193,34 +195,53 @@ func (n asyncPseudonode) MakeVotes(ctx context.Context, r round, p period, s ste } } -func (n asyncPseudonode) makeProposalsTask(ctx context.Context, r round, p period) pseudonodeProposalsTask { - participation := n.getParticipations("asyncPseudonode.makeProposalsTask", r) +// load the participation keys from the account manager ( as needed ) for the +// current round. +func (n *asyncPseudonode) loadRoundParticipationKeys(voteRound basics.Round) []account.Participation { + // if we've already loaded up the keys, then just skip loading them. + if n.participationKeysRound == voteRound { + return n.participationKeys + } + cparams, err := n.ledger.ConsensusParams(ParamsRound(voteRound)) + if err != nil { + // if we cannot figure out the balance round number, reset the parameters so that we won't be sending + // any vote. + n.log.Warnf("asyncPseudonode: unable to retrieve consensus parameters for voting round %d : %v", voteRound, err) + n.participationKeysRound = basics.Round(0) + n.participationKeys = nil + return nil + } + balanceRound := balanceRound(voteRound, cparams) + + // otherwise, we want to load the participation keys. + n.participationKeys = n.keys.VotingKeys(voteRound, balanceRound) + n.participationKeysRound = voteRound + return n.participationKeys +} + +func (n asyncPseudonode) makeProposalsTask(ctx context.Context, r round, p period) pseudonodeProposalsTask { pt := pseudonodeProposalsTask{ pseudonodeBaseTask: pseudonodeBaseTask{ - node: &n, - context: ctx, - participation: participation, - out: make(chan externalEvent), + node: &n, + context: ctx, + out: make(chan externalEvent), }, round: r, period: p, } - if len(participation) == 0 { + if !pt.populateParticipationKeys(r) { close(pt.out) } return pt } func (n asyncPseudonode) makeVotesTask(ctx context.Context, r round, p period, s step, prop proposalValue, persistStateDone chan error) pseudonodeVotesTask { - participation := n.getParticipations("asyncPseudonode.makeVotesTask", r) - pvt := pseudonodeVotesTask{ pseudonodeBaseTask: pseudonodeBaseTask{ - node: &n, - context: ctx, - participation: participation, - out: make(chan externalEvent), + node: &n, + context: ctx, + out: make(chan externalEvent), }, round: r, period: p, @@ -228,7 +249,7 @@ func (n asyncPseudonode) makeVotesTask(ctx context.Context, r round, p period, s prop: prop, persistStateDone: persistStateDone, } - if len(participation) == 0 { + if !pvt.populateParticipationKeys(r) { close(pvt.out) } return pvt @@ -244,21 +265,6 @@ func (n asyncPseudonode) makePseudonodeVerifier(voteVerifier *AsyncVoteVerifier) return pv } -// getParticipations retrieves the participation accounts for a given round. -func (n asyncPseudonode) getParticipations(procName string, round basics.Round) []account.Participation { - keys := n.keys.Keys() - participations := make([]account.Participation, 0, len(keys)) - for _, part := range keys { - firstValid, lastValid := part.ValidInterval() - if round < firstValid || round > lastValid { - n.log.Debugf("%v (round=%v): Account %v not participating: %v not in [%v, %v]", procName, round, part.Address(), round, firstValid, lastValid) - continue - } - participations = append(participations, part) - } - return participations -} - // makeProposals creates a slice of block proposals for the given round and period. func (n asyncPseudonode) makeProposals(round basics.Round, period period, accounts []account.Participation) ([]proposal, []unauthenticatedVote) { deadline := time.Now().Add(config.ProposalAssemblyTime) @@ -334,6 +340,14 @@ func (pv *pseudonodeVerifier) verifierLoop(n *asyncPseudonode) { } } +// populateParticipationKeys refreshes the participation key cache ( as needed ), and updates the +// task with the loaded participation keys. It returns whether we have any participation keys +// for the given round. +func (t *pseudonodeBaseTask) populateParticipationKeys(r round) bool { + t.participation = t.node.loadRoundParticipationKeys(r) + return len(t.participation) > 0 +} + func (t pseudonodeBaseTask) outputChannel() chan externalEvent { return t.out } diff --git a/agreement/pseudonode_test.go b/agreement/pseudonode_test.go index b74b83fd35..ca6b11c426 100644 --- a/agreement/pseudonode_test.go +++ b/agreement/pseudonode_test.go @@ -23,7 +23,10 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/protocol" ) @@ -131,14 +134,13 @@ func compareEventChannels(t *testing.T, ch1, ch2 <-chan externalEvent) bool { func TestPseudonode(t *testing.T) { t.Parallel() - logging.Base().SetLevel(logging.Warn) - // generate a nice, fixed hash. rootSeed := sha256.Sum256([]byte(t.Name())) accounts, balances := createTestAccountsAndBalances(t, 10, rootSeed[:]) ledger := makeTestLedger(balances) - sLogger := serviceLogger{logging.Base()} + sLogger := serviceLogger{logging.NewLogger()} + sLogger.SetLevel(logging.Warn) keyManager := simpleKeyManager(accounts) pb := makePseudonode(pseudonodeParams{ @@ -281,7 +283,8 @@ func (n serializedPseudonode) MakeProposals(ctx context.Context, r round, p peri verifier := makeCryptoVerifier(n.ledger, n.validator, MakeAsyncVoteVerifier(nil), n.log) defer verifier.Quit() - participation := n.getParticipations("serializedPseudonode.MakeProposals", r) + n.loadRoundParticipationKeys(n.ledger.NextRound()) + participation := n.participationKeys proposals, votes := n.makeProposals(r, p, participation) @@ -337,7 +340,8 @@ func (n serializedPseudonode) MakeVotes(ctx context.Context, r round, p period, verifier := makeCryptoVerifier(n.ledger, n.validator, MakeAsyncVoteVerifier(nil), n.log) defer verifier.Quit() - participation := n.getParticipations("serializedPseudonode.MakeVotes", r) + n.loadRoundParticipationKeys(r) + participation := n.participationKeys votes := n.makeVotes(r, p, s, prop, participation) @@ -374,3 +378,67 @@ func (n serializedPseudonode) MakeVotes(ctx context.Context, r round, p period, func (n serializedPseudonode) Quit() { // nothing to do ! this serializedPseudonode is so simplified that no destructor is needed. } + +type KeyManagerProxy struct { + target func(basics.Round, basics.Round) []account.Participation +} + +func (k *KeyManagerProxy) VotingKeys(votingRound, balanceRound basics.Round) []account.Participation { + return k.target(votingRound, balanceRound) +} + +func TestPseudonodeLoadingOfParticipationKeys(t *testing.T) { + t.Parallel() + + // generate a nice, fixed hash. + rootSeed := sha256.Sum256([]byte(t.Name())) + accounts, balances := createTestAccountsAndBalances(t, 10, rootSeed[:]) + ledger := makeTestLedger(balances) + + sLogger := serviceLogger{logging.NewLogger()} + sLogger.SetLevel(logging.Warn) + + keyManager := simpleKeyManager(accounts) + pb := makePseudonode(pseudonodeParams{ + factory: testBlockFactory{Owner: 0}, + validator: testBlockValidator{}, + keys: keyManager, + ledger: ledger, + voteVerifier: MakeAsyncVoteVerifier(nil), + log: sLogger, + monitor: nil, + }).(asyncPseudonode) + // verify start condition - + require.Zero(t, pb.participationKeysRound) + require.Empty(t, pb.participationKeys) + + // check after round 1 + pb.loadRoundParticipationKeys(basics.Round(1)) + require.Equal(t, basics.Round(1), pb.participationKeysRound) + require.NotEmpty(t, pb.participationKeys) + + // check the participationKeys retain their prev valud after a call to loadRoundParticipationKeys with 1. + pb.participationKeys = nil + pb.loadRoundParticipationKeys(basics.Round(1)) + require.Equal(t, basics.Round(1), pb.participationKeysRound) + require.Nil(t, pb.participationKeys) + + // check that it's being updated when asked with a different round number. + returnedPartKeys := pb.loadRoundParticipationKeys(basics.Round(2)) + require.Equal(t, basics.Round(2), pb.participationKeysRound) + require.NotEmpty(t, pb.participationKeys) + require.Equal(t, pb.participationKeys, returnedPartKeys) + + // test to see that loadRoundParticipationKeys is calling VotingKeys with the correct parameters. + keyManagerProxy := &KeyManagerProxy{} + pb.keys = keyManagerProxy + cparams, _ := ledger.ConsensusParams(0) + for rnd := basics.Round(3); rnd < 1000; rnd += 43 { + keyManagerProxy.target = func(votingRound, balanceRnd basics.Round) []account.Participation { + require.Equal(t, rnd, votingRound) + require.Equal(t, balanceRound(rnd, cparams), balanceRnd) + return keyManager.VotingKeys(votingRound, balanceRnd) + } + pb.loadRoundParticipationKeys(basics.Round(rnd)) + } +} diff --git a/agreement/service_test.go b/agreement/service_test.go index b8ff67f5a9..f3db45a859 100644 --- a/agreement/service_test.go +++ b/agreement/service_test.go @@ -106,23 +106,14 @@ func (c *testingClock) fire(d time.Duration) { type simpleKeyManager []account.Participation -func (m simpleKeyManager) Keys() []account.Participation { +func (m simpleKeyManager) VotingKeys(votingRound, _ basics.Round) []account.Participation { var km []account.Participation for _, acc := range m { - km = append(km, acc) - } - return km -} - -// HasLiveKeys returns true if we have any Participation -// keys valid for the specified round range (inclusive) -func (m simpleKeyManager) HasLiveKeys(from, to basics.Round) bool { - for _, acc := range m { - if acc.OverlapsInterval(from, to) { - return true + if acc.OverlapsInterval(votingRound, votingRound) { + km = append(km, acc) } } - return false + return km } func (m simpleKeyManager) DeleteOldKeys(basics.Round) { diff --git a/compactcert/abstractions.go b/compactcert/abstractions.go index faa51fd6a2..6a369dee17 100644 --- a/compactcert/abstractions.go +++ b/compactcert/abstractions.go @@ -54,5 +54,5 @@ type Network interface { // Accounts captures the aspects of the AccountManager that are used by // this package. type Accounts interface { - Keys() []account.Participation + Keys(basics.Round) []account.Participation } diff --git a/compactcert/signer.go b/compactcert/signer.go index ef2ec67e09..7672111304 100644 --- a/compactcert/signer.go +++ b/compactcert/signer.go @@ -93,17 +93,17 @@ func (ccw *Worker) signBlock(hdr bookkeeping.BlockHeader) { return } - keys := ccw.accts.Keys() - if len(keys) == 0 { - // No keys, nothing to do. - return - } - // Compact cert gets signed by the next round after the block, // because by the time agreement is reached on the block, // ephemeral keys for that round could be deleted. sigKeyRound := hdr.Round + 1 + keys := ccw.accts.Keys(sigKeyRound) + if len(keys) == 0 { + // No keys, nothing to do. + return + } + // votersRound is the round containing the merkle root commitment // for the voters that are going to sign this block. votersRound := hdr.Round.SubSaturate(basics.Round(proto.CompactCertRounds)) diff --git a/compactcert/worker_test.go b/compactcert/worker_test.go index a82a6cd282..cd3351e822 100644 --- a/compactcert/worker_test.go +++ b/compactcert/worker_test.go @@ -96,8 +96,13 @@ func (s *testWorkerStubs) addBlock(ccNextRound basics.Round) { } } -func (s *testWorkerStubs) Keys() (out []account.Participation) { - return s.keys +func (s *testWorkerStubs) Keys(rnd basics.Round) (out []account.Participation) { + for _, part := range s.keys { + if part.OverlapsInterval(rnd, rnd) { + out = append(out, part) + } + } + return } func (s *testWorkerStubs) BlockHdr(r basics.Round) (bookkeeping.BlockHeader, error) { diff --git a/data/account/account.go b/data/account/account.go index 6d33331963..f9cd269441 100644 --- a/data/account/account.go +++ b/data/account/account.go @@ -181,12 +181,3 @@ func RestoreParticipation(store db.Accessor) (acc PersistedParticipation, err er acc.Store = store return acc, nil } - -// A ParticipationInterval defines an interval for which a participation account is valid. -type ParticipationInterval struct { - basics.Address - - // FirstValid and LastValid are inclusive. - FirstValid basics.Round - LastValid basics.Round -} diff --git a/data/accountManager.go b/data/accountManager.go index bdde332467..79a57287b1 100644 --- a/data/accountManager.go +++ b/data/accountManager.go @@ -22,6 +22,7 @@ import ( "github.com/algorand/go-deadlock" "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" @@ -30,11 +31,23 @@ import ( "github.com/algorand/go-algorand/protocol" ) +// A ParticipationKeyIdentity defines the parameters that makes a pariticpation key unique. +type ParticipationKeyIdentity struct { + basics.Address // the address this participation key is used to vote for. + + // FirstValid and LastValid are inclusive. + FirstValid basics.Round + LastValid basics.Round + + VoteID crypto.OneTimeSignatureVerifier + SelectionID crypto.VrfPubkey +} + // AccountManager loads and manages accounts for the node type AccountManager struct { mu deadlock.Mutex - partIntervals map[account.ParticipationInterval]account.PersistedParticipation + partKeys map[ParticipationKeyIdentity]account.PersistedParticipation // Map to keep track of accounts for which we've sent // AccountRegistered telemetry events @@ -47,19 +60,21 @@ type AccountManager struct { func MakeAccountManager(log logging.Logger) *AccountManager { manager := &AccountManager{} manager.log = log - manager.partIntervals = make(map[account.ParticipationInterval]account.PersistedParticipation) + manager.partKeys = make(map[ParticipationKeyIdentity]account.PersistedParticipation) manager.registeredAccounts = make(map[string]bool) return manager } // Keys returns a list of Participation accounts. -func (manager *AccountManager) Keys() (out []account.Participation) { +func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participation) { manager.mu.Lock() defer manager.mu.Unlock() - for _, part := range manager.partIntervals { - out = append(out, part.Participation) + for _, part := range manager.partKeys { + if part.OverlapsInterval(rnd, rnd) { + out = append(out, part.Participation) + } } return out } @@ -70,7 +85,7 @@ func (manager *AccountManager) HasLiveKeys(from, to basics.Round) bool { manager.mu.Lock() defer manager.mu.Unlock() - for _, part := range manager.partIntervals { + for _, part := range manager.partKeys { if part.OverlapsInterval(from, to) { return true } @@ -88,19 +103,21 @@ func (manager *AccountManager) AddParticipation(participation account.PersistedP address := participation.Address() first, last := participation.ValidInterval() - interval := account.ParticipationInterval{ - Address: address, - FirstValid: first, - LastValid: last, + partkeyID := ParticipationKeyIdentity{ + Address: address, + FirstValid: first, + LastValid: last, + VoteID: participation.Voting.OneTimeSignatureVerifier, + SelectionID: participation.VRF.PK, } // Check if we already have participation keys for this address in this interval - _, alreadyPresent := manager.partIntervals[interval] + _, alreadyPresent := manager.partKeys[partkeyID] if alreadyPresent { return false } - manager.partIntervals[interval] = participation + manager.partKeys[partkeyID] = participation addressString := address.String() manager.log.EventWithDetails(telemetryspec.Accounts, telemetryspec.PartKeyRegisteredEvent, telemetryspec.PartKeyRegisteredEventDetails{ @@ -127,10 +144,10 @@ func (manager *AccountManager) DeleteOldKeys(latestHdr bookkeeping.BlockHeader, latestProto := config.Consensus[latestHdr.CurrentProtocol] manager.mu.Lock() - pendingItems := make(map[string]<-chan error, len(manager.partIntervals)) + pendingItems := make(map[string]<-chan error, len(manager.partKeys)) func() { defer manager.mu.Unlock() - for _, part := range manager.partIntervals { + for _, part := range manager.partKeys { // We need a key for round r+1 for agreement. nextRound := latestHdr.Round + 1 diff --git a/netdeploy/network.go b/netdeploy/network.go index 513d464651..3295f5116d 100644 --- a/netdeploy/network.go +++ b/netdeploy/network.go @@ -86,10 +86,11 @@ func CreateNetworkFromTemplate(name, rootDir, templateFile, binDir string, impor return n, err } - n.cfg.RelayDirs, n.nodeDirs, n.gen, err = template.createNodeDirectories(rootDir, binDir, importKeys) + n.cfg.RelayDirs, n.nodeDirs, err = template.createNodeDirectories(rootDir, binDir, importKeys) if err != nil { return n, err } + n.gen = template.Genesis err = n.Save(rootDir) n.SetConsensus(binDir, consensus) diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go index 36f53a9a90..ecc7affdc2 100644 --- a/netdeploy/networkTemplate.go +++ b/netdeploy/networkTemplate.go @@ -55,12 +55,8 @@ func (t NetworkTemplate) generateGenesisAndWallets(targetFolder, networkName, bi // Create data folders for all NodeConfigs, configuring relays appropriately and // returning the full path to the 'prime' relay and node folders (the first one created) and the genesis data used in this network. -func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir string, importKeys bool) (relayDirs []string, nodeDirs map[string]string, genData gen.GenesisData, err error) { +func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir string, importKeys bool) (relayDirs []string, nodeDirs map[string]string, err error) { genesisFile := filepath.Join(targetFolder, genesisFileName) - genData, err = gen.LoadGenesisData(genesisFile) - if err != nil { - return - } nodeDirs = make(map[string]string) getGenesisVerCmd := filepath.Join(binDir, "algod") diff --git a/node/netprio.go b/node/netprio.go index 4df0dcc916..c65db60d4f 100644 --- a/node/netprio.go +++ b/node/netprio.go @@ -78,12 +78,7 @@ func (node *AlgorandFullNode) MakePrioResponse(challenge string) []byte { // Use the participation key for 2 rounds in the future, so that // it's unlikely to be deleted from underneath of us. voteRound := latest + 2 - for _, part := range node.accountManager.Keys() { - firstValid, lastValid := part.ValidInterval() - if voteRound < firstValid || voteRound > lastValid { - continue - } - + for _, part := range node.accountManager.Keys(voteRound) { parent := part.Address() data, err := node.ledger.Lookup(latest, parent) if err != nil { diff --git a/node/node.go b/node/node.go index bc6e2d20f8..ebd4ebb6b0 100644 --- a/node/node.go +++ b/node/node.go @@ -248,7 +248,7 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd Ledger: agreementLedger, BlockFactory: node, BlockValidator: blockValidator, - KeyManager: node.accountManager, + KeyManager: node, RandomSource: node, BacklogPool: node.highPriorityCryptoVerificationPool, } @@ -1062,3 +1062,54 @@ func (node *AlgorandFullNode) AssembleBlock(round basics.Round, deadline time.Ti } return validatedBlock{vb: lvb}, nil } + +// VotingKeys implements the key maanger's VotingKeys method, and provides additional validation with the ledger. +// that allows us to load multiple overlapping keys for the same account, and filter these per-round basis. +func (node *AlgorandFullNode) VotingKeys(votingRound, keysRound basics.Round) []account.Participation { + keys := node.accountManager.Keys(votingRound) + + participations := make([]account.Participation, 0, len(keys)) + accountsData := make(map[basics.Address]basics.AccountData, len(keys)) + matchingAccountsKeys := make(map[basics.Address]bool) + mismatchingAccountsKeys := make(map[basics.Address]int) + const bitMismatchingVotingKey = 1 + const bitMismatchingSelectionKey = 2 + for _, part := range keys { + acctData, hasAccountData := accountsData[part.Parent] + if !hasAccountData { + var err error + acctData, _, err = node.ledger.LookupWithoutRewards(keysRound, part.Parent) + if err != nil { + node.log.Warnf("node.VotingKeys: Account %v not participating: cannot locate account for round %d : %v", part.Address(), keysRound, err) + continue + } + accountsData[part.Parent] = acctData + } + + if acctData.VoteID != part.Voting.OneTimeSignatureVerifier { + mismatchingAccountsKeys[part.Address()] = mismatchingAccountsKeys[part.Address()] | bitMismatchingVotingKey + continue + } + if acctData.SelectionID != part.VRF.PK { + mismatchingAccountsKeys[part.Address()] = mismatchingAccountsKeys[part.Address()] | bitMismatchingSelectionKey + continue + } + participations = append(participations, part) + matchingAccountsKeys[part.Address()] = true + } + // write the warnings per account only if we couldn't find a single valid key for that account. + for mismatchingAddr, warningFlags := range mismatchingAccountsKeys { + if matchingAccountsKeys[mismatchingAddr] { + continue + } + if warningFlags&bitMismatchingVotingKey == bitMismatchingVotingKey { + node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain voting key differ from participation voting key for round %d", mismatchingAddr, votingRound, keysRound) + continue + } + if warningFlags&bitMismatchingSelectionKey == bitMismatchingSelectionKey { + node.log.Warnf("node.VotingKeys: Account %v not participating on round %d: on chain selection key differ from participation selection key for round %d", mismatchingAddr, votingRound, keysRound) + continue + } + } + return participations +} diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go new file mode 100644 index 0000000000..e64fecfada --- /dev/null +++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go @@ -0,0 +1,209 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package participation + +import ( + "context" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/data/account" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/data/transactions" + "github.com/algorand/go-algorand/protocol" + "github.com/algorand/go-algorand/test/framework/fixtures" + "github.com/algorand/go-algorand/util/db" +) + +func TestOverlappingParticipationKeys(t *testing.T) { + t.Parallel() + a := require.New(fixtures.SynchronizedTest(t)) + + consensus := make(config.ConsensusProtocols) + shortPartKeysProtocol := config.Consensus[protocol.ConsensusCurrentVersion] + shortPartKeysProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} + shortPartKeysProtocol.SeedLookback = 2 + shortPartKeysProtocol.SeedRefreshInterval = 8 + if runtime.GOARCH == "amd64" { + // amd64 platforms are generally quite capable, so accelerate the round times to make the test run faster. + shortPartKeysProtocol.AgreementFilterTimeoutPeriod0 = 1 * time.Second + shortPartKeysProtocol.AgreementFilterTimeout = 1 * time.Second + } + consensus[protocol.ConsensusVersion("shortpartkeysprotocol")] = shortPartKeysProtocol + + var fixture fixtures.RestClientFixture + fixture.SetConsensus(consensus) + fixture.SetupNoStart(t, filepath.Join("nettemplates", "ShortParticipationKeys.json")) + defer fixture.Shutdown() + + accountsNum := len(fixture.NodeDataDirs()) + for _, dataDir := range fixture.NodeDataDirs() { + cfg, err := config.LoadConfigFromDisk(dataDir) + a.NoError(err) + cfg.ParticipationKeysRefreshInterval = 500 * time.Millisecond + err = cfg.SaveToDisk(dataDir) + a.NoError(err) + } + + genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(fixture.PrimaryDataDir(), "genesis.json")) + a.NoError(err) + genesisHash := crypto.HashObj(genesis) + rootKeys := make(map[int]*account.Root) + regTransactions := make(map[int]transactions.SignedTxn) + lastRound := uint64(64) + + // prepare the participation keys ahead of time. + for round := uint64(1); round < lastRound; round++ { + if (round-1)%10 >= uint64(accountsNum) { + continue + } + acctIdx := (round - 1) % 10 + txStartRound := round + txEndRound := txStartRound + 36 + 10 + regStartRound := round + 32 + regEndRound := regStartRound + 11 + err = prepareParticipationKey(a, &fixture, acctIdx, txStartRound, txEndRound, regStartRound, regEndRound, genesisHash, rootKeys, regTransactions) + a.NoError(err) + } + + fixture.Start() + currentRound := uint64(0) + fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.NC) + for { + err := fixture.WaitForRoundWithTimeout(currentRound + 1) + a.NoError(err) + currentRound++ + if (currentRound-1)%10 < uint64(accountsNum) { + acctIdx := (currentRound - 1) % 10 + startRound := currentRound + 2 + endRound := startRound + 36 + 10 - 2 + regStartRound := currentRound + 32 + regEndRound := regStartRound + 11 + err = addParticipationKey(a, &fixture, acctIdx, startRound, endRound, regTransactions) + a.NoError(err) + t.Logf("[.] Round %d, Added reg key for node %d range [%d..%d]\n", currentRound, acctIdx, regStartRound, regEndRound) + } else { + t.Logf("[.] Round %d\n", currentRound) + } + + if currentRound == lastRound { + break + } + } + +} + +func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, startRound, endRound uint64, regTransactions map[int]transactions.SignedTxn) error { + dataDir := fixture.NodeDataDirs()[acctNum] + nc := fixture.GetNodeControllerForDataDir(dataDir) + genesisDir, err := nc.GetGenesisDir() + + partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", startRound, endRound)) + partKeyNameTarget := filepath.Join(genesisDir, config.PartKeyFilename("Wallet", startRound, endRound)) + + // make the rename in the background to ensure it won't take too long. We have ~32 rounds to complete this. + go os.Rename(partKeyName, partKeyNameTarget) + + signedTxn := regTransactions[int(startRound-2)] + a.NotEmpty(signedTxn.Sig) + _, err = fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn) + if err != nil { + a.NoError(err) + return err + } + return err +} + +func prepareParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, txStartRound, txEndRound, regStartRound, regEndRound uint64, genesisHash crypto.Digest, rootKeys map[int]*account.Root, regTransactions map[int]transactions.SignedTxn) error { + dataDir := fixture.NodeDataDirs()[acctNum] + + nc := fixture.GetNodeControllerForDataDir(dataDir) + genesisDir, err := nc.GetGenesisDir() + if err != nil { + a.NoError(err) + return err + } + var rootAccount account.Root + if _, have := rootKeys[int(acctNum)]; !have { + var rootKeyFilename string + err = filepath.Walk(genesisDir, func(path string, f os.FileInfo, errIn error) error { + if errIn != nil { + return errIn + } + if f.IsDir() { + return nil + } + if config.IsRootKeyFilename(f.Name()) { + rootKeyFilename = path + } + return nil + }) + if err != nil { + a.NoError(err) + return err + } + + rootKeyHandle, err := db.MakeAccessor(rootKeyFilename, false, false) + if err != nil { + a.NoError(err) + return err + } + + // generate a new participation key. + rootAccount, err = account.RestoreRoot(rootKeyHandle) + if err != nil { + a.NoError(err) + return err + } + rootKeys[int(acctNum)] = &rootAccount + rootKeyHandle.Close() + } + rootAccount = *rootKeys[int(acctNum)] + + partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", txStartRound+2, txEndRound)) + + partkeyHandle, err := db.MakeAccessor(partKeyName, false, false) + if err != nil { + a.NoError(err) + return err + } + + persistedPerticipation, err := account.FillDBWithParticipationKeys(partkeyHandle, rootAccount.Address(), basics.Round(regStartRound), basics.Round(regEndRound), fixture.LibGoalFixture.Genesis().PartKeyDilution) + if err != nil { + a.NoError(err) + return err + } + partkeyHandle.Vacuum(context.Background()) + persistedPerticipation.Close() + + unsignedTxn := persistedPerticipation.GenerateRegistrationTransaction(basics.MicroAlgos{Raw: 1000}, basics.Round(txStartRound), basics.Round(txEndRound), [32]byte{}) + copy(unsignedTxn.GenesisHash[:], genesisHash[:]) + if err != nil { + a.NoError(err) + return err + } + regTransactions[int(txStartRound)] = unsignedTxn.Sign(rootAccount.Secrets()) + return err +} diff --git a/test/testdata/nettemplates/ShortParticipationKeys.json b/test/testdata/nettemplates/ShortParticipationKeys.json new file mode 100644 index 0000000000..28d06324f2 --- /dev/null +++ b/test/testdata/nettemplates/ShortParticipationKeys.json @@ -0,0 +1,55 @@ +{ + "Genesis": { + "NetworkName": "shortpartkeys", + "ConsensusProtocol": "shortpartkeysprotocol", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 36, + "PartKeyDilution": 8, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 33, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 33, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 34, + "Online": true + } + ] + }, + "Nodes": [ + { + "Name": "Primary", + "IsRelay": true, + "Wallets": [ + ] + }, + { + "Name": "Node1", + "Wallets": [ + { "Name": "Wallet1", + "ParticipationOnly": false } + ] + }, + { + "Name": "Node2", + "Wallets": [ + { "Name": "Wallet2", + "ParticipationOnly": false } + ] + }, + { + "Name": "Node3", + "Wallets": [ + { "Name": "Wallet3", + "ParticipationOnly": false } + ] + } + ] +} From ade1fead41c1dc298fc7c7640137d37825e1a7ae Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Thu, 6 May 2021 21:16:29 -0400 Subject: [PATCH 204/215] Add tests for ledger/compactcert.go (#2128) Add unit tests to ledger/compactcert --- compactcert/builder.go | 3 +- ledger/compactcert.go | 12 +-- ledger/compactcert_test.go | 168 +++++++++++++++++++++++++++++++++++++ 3 files changed, 177 insertions(+), 6 deletions(-) create mode 100644 ledger/compactcert_test.go diff --git a/compactcert/builder.go b/compactcert/builder.go index 27b2c0117c..f6adbfafb0 100644 --- a/compactcert/builder.go +++ b/compactcert/builder.go @@ -27,6 +27,7 @@ import ( "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/transactions" "github.com/algorand/go-algorand/ledger" + "github.com/algorand/go-algorand/logging" "github.com/algorand/go-algorand/network" "github.com/algorand/go-algorand/protocol" ) @@ -321,7 +322,7 @@ func (ccw *Worker) tryBuilding() { for rnd, b := range ccw.builders { firstValid := ccw.ledger.Latest() + 1 - acceptableWeight := ledger.AcceptableCompactCertWeight(b.votersHdr, firstValid) + acceptableWeight := ledger.AcceptableCompactCertWeight(b.votersHdr, firstValid, logging.Base()) if b.SignedWeight() < acceptableWeight { // Haven't signed enough to build the cert at this time.. continue diff --git a/ledger/compactcert.go b/ledger/compactcert.go index 714a053abf..9d2f2d66f8 100644 --- a/ledger/compactcert.go +++ b/ledger/compactcert.go @@ -33,7 +33,9 @@ import ( // votersHdr specifies the block that contains the Merkle commitment of // the voters for this compact cert (and thus the compact cert is for // votersHdr.Round() + CompactCertRounds). -func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round) uint64 { +// +// logger must not be nil; use at least logging.Base() +func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid basics.Round, logger logging.Logger) uint64 { proto := config.Consensus[votersHdr.CurrentProtocol] certRound := votersHdr.Round + basics.Round(proto.CompactCertRounds) total := votersHdr.CompactCert[protocol.CompactCertBasic].CompactCertVotersTotal @@ -71,7 +73,7 @@ func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid b provenWeight, overflowed := basics.Muldiv(total.ToUint64(), uint64(proto.CompactCertWeightThreshold), 1<<32) if overflowed || provenWeight > total.ToUint64() { // Shouldn't happen, but a safe fallback is to accept a larger cert. - logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight", + logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight", total, proto.CompactCertRounds, certRound, firstValid) return 0 } @@ -83,7 +85,7 @@ func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid b scaledWeight, overflowed := basics.Muldiv(total.ToUint64()-provenWeight, proto.CompactCertRounds/2-uint64(offset), proto.CompactCertRounds/2) if overflowed { // Shouldn't happen, but a safe fallback is to accept a larger cert. - logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow scaledWeight", + logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow scaledWeight", total, proto.CompactCertRounds, certRound, firstValid) return 0 } @@ -91,7 +93,7 @@ func AcceptableCompactCertWeight(votersHdr bookkeeping.BlockHeader, firstValid b w, overflowed := basics.OAdd(provenWeight, scaledWeight) if overflowed { // Shouldn't happen, but a safe fallback is to accept a larger cert. - logging.Base().Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)", + logger.Warnf("AcceptableCompactCertWeight(%d, %d, %d, %d) overflow provenWeight (%d) + scaledWeight (%d)", total, proto.CompactCertRounds, certRound, firstValid, provenWeight, scaledWeight) return 0 } @@ -161,7 +163,7 @@ func validateCompactCert(certHdr bookkeeping.BlockHeader, cert compactcert.Cert, nextCertRnd, certHdr.Round, votersRound) } - acceptableWeight := AcceptableCompactCertWeight(votersHdr, atRound) + acceptableWeight := AcceptableCompactCertWeight(votersHdr, atRound, logging.Base()) if cert.SignedWeight < acceptableWeight { return fmt.Errorf("insufficient weight at %d: %d < %d", atRound, cert.SignedWeight, acceptableWeight) diff --git a/ledger/compactcert_test.go b/ledger/compactcert_test.go new file mode 100644 index 0000000000..6c1c22ad4a --- /dev/null +++ b/ledger/compactcert_test.go @@ -0,0 +1,168 @@ +// Copyright (C) 2019-2021 Algorand, Inc. +// This file is part of go-algorand +// +// go-algorand is free software: you can redistribute it and/or modify +// it under the terms of the GNU Affero General Public License as +// published by the Free Software Foundation, either version 3 of the +// License, or (at your option) any later version. +// +// go-algorand is distributed in the hope that it will be useful, +// but WITHOUT ANY WARRANTY; without even the implied warranty of +// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +// GNU Affero General Public License for more details. +// +// You should have received a copy of the GNU Affero General Public License +// along with go-algorand. If not, see . + +package ledger + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/algorand/go-algorand/config" + "github.com/algorand/go-algorand/crypto/compactcert" + "github.com/algorand/go-algorand/data/basics" + "github.com/algorand/go-algorand/data/bookkeeping" + "github.com/algorand/go-algorand/logging" + "github.com/algorand/go-algorand/protocol" +) + +func TestValidateCompactCert(t *testing.T) { + var certHdr bookkeeping.BlockHeader + var cert compactcert.Cert + var votersHdr bookkeeping.BlockHeader + var nextCertRnd basics.Round + var atRound basics.Round + + // will definitely fail with nothing set up + err := validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + t.Log(err) + require.NotNil(t, err) + + certHdr.CurrentProtocol = "TestValidateCompactCert" + certHdr.Round = 1 + proto := config.Consensus[certHdr.CurrentProtocol] + proto.CompactCertRounds = 2 + config.Consensus[certHdr.CurrentProtocol] = proto + + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + certHdr.Round = 4 + votersHdr.Round = 4 + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + votersHdr.Round = 2 + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + nextCertRnd = 4 + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + votersHdr.CurrentProtocol = certHdr.CurrentProtocol + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + votersHdr.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState) + cc := votersHdr.CompactCert[protocol.CompactCertBasic] + cc.CompactCertVotersTotal.Raw = 100 + votersHdr.CompactCert[protocol.CompactCertBasic] = cc + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + cert.SignedWeight = 101 + err = validateCompactCert(certHdr, cert, votersHdr, nextCertRnd, atRound) + // still err, but a different err case to cover + t.Log(err) + require.NotNil(t, err) + + // Above cases leave validateCompactCert() with 100% coverage. + // crypto/compactcert.Verify has its own tests +} + +func TestAcceptableCompactCertWeight(t *testing.T) { + var votersHdr bookkeeping.BlockHeader + var firstValid basics.Round + logger := logging.TestingLog(t) + + votersHdr.CurrentProtocol = "TestAcceptableCompactCertWeight" + proto := config.Consensus[votersHdr.CurrentProtocol] + proto.CompactCertRounds = 2 + config.Consensus[votersHdr.CurrentProtocol] = proto + out := AcceptableCompactCertWeight(votersHdr, firstValid, logger) + require.Equal(t, uint64(0), out) + + votersHdr.CompactCert = make(map[protocol.CompactCertType]bookkeeping.CompactCertState) + cc := votersHdr.CompactCert[protocol.CompactCertBasic] + cc.CompactCertVotersTotal.Raw = 100 + votersHdr.CompactCert[protocol.CompactCertBasic] = cc + out = AcceptableCompactCertWeight(votersHdr, firstValid, logger) + require.Equal(t, uint64(100), out) + + // this should exercise the second return case + firstValid = basics.Round(5) + out = AcceptableCompactCertWeight(votersHdr, firstValid, logger) + require.Equal(t, uint64(100), out) + + firstValid = basics.Round(6) + proto.CompactCertWeightThreshold = 999999999 + config.Consensus[votersHdr.CurrentProtocol] = proto + out = AcceptableCompactCertWeight(votersHdr, firstValid, logger) + require.Equal(t, uint64(0x17), out) + + proto.CompactCertRounds = 10000 + votersHdr.Round = 10000 + firstValid = basics.Round(29000) + config.Consensus[votersHdr.CurrentProtocol] = proto + cc.CompactCertVotersTotal.Raw = 0x7fffffffffffffff + votersHdr.CompactCert[protocol.CompactCertBasic] = cc + proto.CompactCertWeightThreshold = 0x7fffffff + config.Consensus[votersHdr.CurrentProtocol] = proto + out = AcceptableCompactCertWeight(votersHdr, firstValid, logger) + require.Equal(t, uint64(0x4cd35a85213a92a2), out) + + // Covers everything except "overflow that shouldn't happen" branches +} + +func TestCompactCertParams(t *testing.T) { + var votersHdr bookkeeping.BlockHeader + var hdr bookkeeping.BlockHeader + + res, err := CompactCertParams(votersHdr, hdr) + require.Error(t, err) // not enabled + + votersHdr.CurrentProtocol = "TestCompactCertParams" + proto := config.Consensus[votersHdr.CurrentProtocol] + proto.CompactCertRounds = 2 + config.Consensus[votersHdr.CurrentProtocol] = proto + votersHdr.Round = 1 + res, err = CompactCertParams(votersHdr, hdr) + require.Error(t, err) // wrong round + + votersHdr.Round = 2 + hdr.Round = 3 + res, err = CompactCertParams(votersHdr, hdr) + require.Error(t, err) // wrong round + + hdr.Round = 4 + res, err = CompactCertParams(votersHdr, hdr) + require.Equal(t, hdr.Round+1, res.SigRound) + + // Covers all cases except overflow +} From c0452b2d35273448ffdcca907c50998efdd7ab75 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 7 May 2021 15:10:05 -0400 Subject: [PATCH 205/215] rollback config v17 into v16 (#2140) Previous PRs were advancing the config version from v16 to v17 needlessly. ( i.e. needlessly since the rel/beta is currently at v15 ) This PR merges back the changes from v17 into v16, and removes v17. --- config/config.go | 12 ++-- config/local_defaults.go | 2 +- installer/config.json.example | 2 +- test/testdata/configs/config-v16.json | 5 ++ test/testdata/configs/config-v17.json | 92 --------------------------- 5 files changed, 13 insertions(+), 100 deletions(-) delete mode 100644 test/testdata/configs/config-v17.json diff --git a/config/config.go b/config/config.go index 89865d1ae2..0ab143b89b 100644 --- a/config/config.go +++ b/config/config.go @@ -63,7 +63,7 @@ type Local struct { // Version tracks the current version of the defaults so we can migrate old -> new // This is specifically important whenever we decide to change the default value // for an existing parameter. This field tag must be updated any time we add a new version. - Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16" version[17]:"17"` + Version uint32 `version[0]:"0" version[1]:"1" version[2]:"2" version[3]:"3" version[4]:"4" version[5]:"5" version[6]:"6" version[7]:"7" version[8]:"8" version[9]:"9" version[10]:"10" version[11]:"11" version[12]:"12" version[13]:"13" version[14]:"14" version[15]:"15" version[16]:"16"` // environmental (may be overridden) // When enabled, stores blocks indefinitally, otherwise, only the most recents blocks @@ -401,23 +401,23 @@ type Local struct { // 3 : speed up catchup by skipping necessary validations // 12 : perform all validation methods (normal and additional). These extra tests helps to verify the integrity of the compiled executable against // previously used executabled, and would not provide any additional security guarantees. - CatchupBlockValidateMode int `version[17]:"0"` + CatchupBlockValidateMode int `version[16]:"0"` // Generate AccountUpdates telemetry event - EnableAccountUpdatesStats bool `version[17]:"false"` + EnableAccountUpdatesStats bool `version[16]:"false"` // Time interval in nanoseconds for generating accountUpdates telemetry event - AccountUpdatesStatsInterval time.Duration `version[17]:"5000000000"` + AccountUpdatesStatsInterval time.Duration `version[16]:"5000000000"` // ParticipationKeysRefreshInterval is the duration between two consecutive checks to see if new participation // keys have been placed on the genesis directory. - ParticipationKeysRefreshInterval time.Duration `version[17]:"60000000000"` + ParticipationKeysRefreshInterval time.Duration `version[16]:"60000000000"` // DisableNetworking disables all the incoming and outgoing communication a node would perform. This is useful // when we have a single-node private network, where there is no other nodes that need to be communicated with. // features like catchpoint catchup would be rendered completly non-operational, and many of the node inner // working would be completly dis-functional. - DisableNetworking bool `version[17]:"false"` + DisableNetworking bool `version[16]:"false"` } // Filenames of config files within the configdir (e.g. ~/.algorand) diff --git a/config/local_defaults.go b/config/local_defaults.go index c5b26f65c9..725409e738 100644 --- a/config/local_defaults.go +++ b/config/local_defaults.go @@ -20,7 +20,7 @@ package config var defaultLocal = Local{ - Version: 17, + Version: 16, AccountUpdatesStatsInterval: 5000000000, AccountsRebuildSynchronousMode: 1, AnnounceParticipationKey: true, diff --git a/installer/config.json.example b/installer/config.json.example index a455ad2ec0..42b6361bd7 100644 --- a/installer/config.json.example +++ b/installer/config.json.example @@ -1,5 +1,5 @@ { - "Version": 17, + "Version": 16, "AccountUpdatesStatsInterval": 5000000000, "AccountsRebuildSynchronousMode": 1, "AnnounceParticipationKey": true, diff --git a/test/testdata/configs/config-v16.json b/test/testdata/configs/config-v16.json index e7201905da..42b6361bd7 100644 --- a/test/testdata/configs/config-v16.json +++ b/test/testdata/configs/config-v16.json @@ -1,5 +1,6 @@ { "Version": 16, + "AccountUpdatesStatsInterval": 5000000000, "AccountsRebuildSynchronousMode": 1, "AnnounceParticipationKey": true, "Archival": false, @@ -11,6 +12,7 @@ "CatchpointInterval": 10000, "CatchpointTracking": 0, "CatchupBlockDownloadRetryAttempts": 1000, + "CatchupBlockValidateMode": 0, "CatchupFailurePeerRefreshRate": 10, "CatchupGossipBlockFetchTimeoutSec": 4, "CatchupHTTPBlockFetchTimeoutSec": 4, @@ -22,7 +24,9 @@ "DNSSecurityFlags": 1, "DeadlockDetection": 0, "DisableLocalhostConnectionRateLimit": true, + "DisableNetworking": false, "DisableOutgoingConnectionThrottling": false, + "EnableAccountUpdatesStats": false, "EnableAgreementReporting": false, "EnableAgreementTimeMetrics": false, "EnableAssembleStats": false, @@ -63,6 +67,7 @@ "OptimizeAccountsDatabaseOnStartup": false, "OutgoingMessageFilterBucketCount": 3, "OutgoingMessageFilterBucketSize": 128, + "ParticipationKeysRefreshInterval": 60000000000, "PeerConnectionsUpdateInterval": 3600, "PeerPingPeriodSeconds": 0, "PriorityPeers": {}, diff --git a/test/testdata/configs/config-v17.json b/test/testdata/configs/config-v17.json deleted file mode 100644 index a455ad2ec0..0000000000 --- a/test/testdata/configs/config-v17.json +++ /dev/null @@ -1,92 +0,0 @@ -{ - "Version": 17, - "AccountUpdatesStatsInterval": 5000000000, - "AccountsRebuildSynchronousMode": 1, - "AnnounceParticipationKey": true, - "Archival": false, - "BaseLoggerDebugLevel": 4, - "BlockServiceCustomFallbackEndpoints": "", - "BroadcastConnectionsLimit": -1, - "CadaverSizeTarget": 1073741824, - "CatchpointFileHistoryLength": 365, - "CatchpointInterval": 10000, - "CatchpointTracking": 0, - "CatchupBlockDownloadRetryAttempts": 1000, - "CatchupBlockValidateMode": 0, - "CatchupFailurePeerRefreshRate": 10, - "CatchupGossipBlockFetchTimeoutSec": 4, - "CatchupHTTPBlockFetchTimeoutSec": 4, - "CatchupLedgerDownloadRetryAttempts": 50, - "CatchupParallelBlocks": 16, - "ConnectionsRateLimitingCount": 60, - "ConnectionsRateLimitingWindowSeconds": 1, - "DNSBootstrapID": ".algorand.network", - "DNSSecurityFlags": 1, - "DeadlockDetection": 0, - "DisableLocalhostConnectionRateLimit": true, - "DisableNetworking": false, - "DisableOutgoingConnectionThrottling": false, - "EnableAccountUpdatesStats": false, - "EnableAgreementReporting": false, - "EnableAgreementTimeMetrics": false, - "EnableAssembleStats": false, - "EnableBlockService": false, - "EnableBlockServiceFallbackToArchiver": true, - "EnableCatchupFromArchiveServers": false, - "EnableDeveloperAPI": false, - "EnableGossipBlockService": true, - "EnableIncomingMessageFilter": false, - "EnableLedgerService": false, - "EnableMetricReporting": false, - "EnableOutgoingNetworkMessageFiltering": true, - "EnablePingHandler": true, - "EnableProcessBlockStats": false, - "EnableProfiler": false, - "EnableRequestLogger": false, - "EnableTopAccountsReporting": false, - "EndpointAddress": "127.0.0.1:0", - "FallbackDNSResolverAddress": "", - "ForceRelayMessages": false, - "GossipFanout": 4, - "IncomingConnectionsLimit": 10000, - "IncomingMessageFilterBucketCount": 5, - "IncomingMessageFilterBucketSize": 512, - "IsIndexerActive": false, - "LedgerSynchronousMode": 2, - "LogArchiveMaxAge": "", - "LogArchiveName": "node.archive.log", - "LogSizeLimit": 1073741824, - "MaxCatchpointDownloadDuration": 7200000000000, - "MaxConnectionsPerIP": 30, - "MinCatchpointFileDownloadBytesPerSecond": 20480, - "NetAddress": "", - "NetworkMessageTraceServer": "", - "NetworkProtocolVersion": "", - "NodeExporterListenAddress": ":9100", - "NodeExporterPath": "./node_exporter", - "OptimizeAccountsDatabaseOnStartup": false, - "OutgoingMessageFilterBucketCount": 3, - "OutgoingMessageFilterBucketSize": 128, - "ParticipationKeysRefreshInterval": 60000000000, - "PeerConnectionsUpdateInterval": 3600, - "PeerPingPeriodSeconds": 0, - "PriorityPeers": {}, - "PublicAddress": "", - "ReconnectTime": 60000000000, - "ReservedFDs": 256, - "RestReadTimeoutSeconds": 15, - "RestWriteTimeoutSeconds": 120, - "RunHosted": false, - "SuggestedFeeBlockHistory": 3, - "SuggestedFeeSlidingWindowSize": 50, - "TLSCertFile": "", - "TLSKeyFile": "", - "TelemetryToLog": true, - "TxPoolExponentialIncreaseFactor": 2, - "TxPoolSize": 15000, - "TxSyncIntervalSeconds": 60, - "TxSyncServeResponseSize": 1000000, - "TxSyncTimeoutSeconds": 30, - "UseXForwardedForAddressField": "", - "VerifiedTranscationsCacheSize": 30000 -} From 88c9f49f14d04f9f209b2acbd42c5864cae9bdc6 Mon Sep 17 00:00:00 2001 From: Brian Olson Date: Fri, 7 May 2021 18:02:38 -0400 Subject: [PATCH 206/215] test coverage for ledger/eval.go funcs (#2130) Test coverage for some functions in ledger/eval.go --- data/pools/transactionPool.go | 5 +- data/transactions/signedtxn.go | 10 ++ ledger/cow_test.go | 11 +- ledger/eval_test.go | 279 ++++++++++++++++++++++++++++++++- ledger/perf_test.go | 9 +- 5 files changed, 304 insertions(+), 10 deletions(-) diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index 58cf837c7e..a07627d024 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -556,10 +556,7 @@ func (pool *TransactionPool) addToPendingBlockEvaluatorOnce(txgroup []transactio } } - txgroupad := make([]transactions.SignedTxnWithAD, len(txgroup)) - for i, tx := range txgroup { - txgroupad[i].SignedTxn = tx - } + txgroupad := transactions.WrapSignedTxnsWithAD(txgroup) transactionGroupStartsTime := time.Time{} if recomputing { diff --git a/data/transactions/signedtxn.go b/data/transactions/signedtxn.go index 7a20c3d8a2..8b54300d9a 100644 --- a/data/transactions/signedtxn.go +++ b/data/transactions/signedtxn.go @@ -119,3 +119,13 @@ func (s *SignedTxnInBlock) Hash() crypto.Digest { defer protocol.PutEncodingBuf(enc) return crypto.Hash(enc) } + +// WrapSignedTxnsWithAD takes an array SignedTxn and returns the same as SignedTxnWithAD +// Each txn's ApplyData is the default empty state. +func WrapSignedTxnsWithAD(txgroup []SignedTxn) []SignedTxnWithAD { + txgroupad := make([]SignedTxnWithAD, len(txgroup)) + for i, tx := range txgroup { + txgroupad[i].SignedTxn = tx + } + return txgroupad +} diff --git a/ledger/cow_test.go b/ledger/cow_test.go index a4df0f1c1f..b09c479e82 100644 --- a/ledger/cow_test.go +++ b/ledger/cow_test.go @@ -29,6 +29,8 @@ import ( type mockLedger struct { balanceMap map[basics.Address]basics.AccountData + blocks map[basics.Round]bookkeeping.BlockHeader + blockErr map[basics.Round]error } func (ml *mockLedger) lookup(addr basics.Address) (basics.AccountData, error) { @@ -75,8 +77,13 @@ func (ml *mockLedger) compactCertNext() basics.Round { return 0 } -func (ml *mockLedger) blockHdr(_ basics.Round) (bookkeeping.BlockHeader, error) { - return bookkeeping.BlockHeader{}, nil +func (ml *mockLedger) blockHdr(rnd basics.Round) (bookkeeping.BlockHeader, error) { + err, hit := ml.blockErr[rnd] + if hit { + return bookkeeping.BlockHeader{}, err + } + hdr := ml.blocks[rnd] // default struct is fine if nothing found + return hdr, nil } func checkCow(t *testing.T, cow *roundCowState, accts map[basics.Address]basics.AccountData) { diff --git a/ledger/eval_test.go b/ledger/eval_test.go index cc27fa0836..1035c6ef9a 100644 --- a/ledger/eval_test.go +++ b/ledger/eval_test.go @@ -18,6 +18,7 @@ package ledger import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -31,6 +32,7 @@ import ( "github.com/algorand/go-algorand/agreement" "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/compactcert" "github.com/algorand/go-algorand/data/basics" "github.com/algorand/go-algorand/data/bookkeeping" "github.com/algorand/go-algorand/data/transactions" @@ -85,6 +87,52 @@ func TestBlockEvaluator(t *testing.T) { err = eval.Transaction(st, transactions.ApplyData{}) require.NoError(t, err) + // Broken signature should fail + stbad := st + st.Sig[2] ^= 8 + txgroup := []transactions.SignedTxn{stbad} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + + // Repeat should fail + txgroup = []transactions.SignedTxn{st} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + err = eval.Transaction(st, transactions.ApplyData{}) + require.Error(t, err) + + // out of range should fail + btxn := txn + btxn.FirstValid++ + btxn.LastValid += 2 + st = btxn.Sign(keys[0]) + txgroup = []transactions.SignedTxn{st} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + err = eval.Transaction(st, transactions.ApplyData{}) + require.Error(t, err) + + // bogus group should fail + btxn = txn + btxn.Group[1] = 1 + st = btxn.Sign(keys[0]) + txgroup = []transactions.SignedTxn{st} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + err = eval.Transaction(st, transactions.ApplyData{}) + require.Error(t, err) + + // mixed fields should fail + btxn = txn + btxn.XferAsset = 3 + st = btxn.Sign(keys[0]) + txgroup = []transactions.SignedTxn{st} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + // We don't test eval.Transaction() here because it doesn't check txn.WellFormed(), instead relying on that to have already been checked by the transaction pool. + // err = eval.Transaction(st, transactions.ApplyData{}) + // require.Error(t, err) + selfTxn := transactions.Transaction{ Type: protocol.PaymentTx, Header: transactions.Header{ @@ -99,9 +147,58 @@ func TestBlockEvaluator(t *testing.T) { Amount: basics.MicroAlgos{Raw: 100}, }, } - err = eval.Transaction(selfTxn.Sign(keys[2]), transactions.ApplyData{}) + stxn := selfTxn.Sign(keys[2]) + + // TestTransactionGroup() and Transaction() should have the same outcome, but work slightly different code paths. + txgroup = []transactions.SignedTxn{stxn} + err = eval.TestTransactionGroup(txgroup) + require.NoError(t, err) + + err = eval.Transaction(stxn, transactions.ApplyData{}) require.NoError(t, err) + t3 := txn + t3.Amount.Raw++ + t4 := selfTxn + t4.Amount.Raw++ + + // a group without .Group should fail + s3 := t3.Sign(keys[0]) + s4 := t4.Sign(keys[2]) + txgroup = []transactions.SignedTxn{s3, s4} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + txgroupad := transactions.WrapSignedTxnsWithAD(txgroup) + err = eval.TransactionGroup(txgroupad) + require.Error(t, err) + + // Test a group that should work + var group transactions.TxGroup + group.TxGroupHashes = []crypto.Digest{crypto.HashObj(t3), crypto.HashObj(t4)} + t3.Group = crypto.HashObj(group) + t4.Group = t3.Group + s3 = t3.Sign(keys[0]) + s4 = t4.Sign(keys[2]) + txgroup = []transactions.SignedTxn{s3, s4} + err = eval.TestTransactionGroup(txgroup) + require.NoError(t, err) + + // disagreement on Group id should fail + t4bad := t4 + t4bad.Group[3] ^= 3 + s4bad := t4bad.Sign(keys[2]) + txgroup = []transactions.SignedTxn{s3, s4bad} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + txgroupad = transactions.WrapSignedTxnsWithAD(txgroup) + err = eval.TransactionGroup(txgroupad) + require.Error(t, err) + + // missing part of the group should fail + txgroup = []transactions.SignedTxn{s3} + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) + validatedBlock, err := eval.GenerateBlock() require.NoError(t, err) @@ -420,6 +517,11 @@ ok: }}, }, } + txgroup := []transactions.SignedTxn{stxn1, stxn2} + err = eval.TestTransactionGroup(txgroup) + if err != nil { + return eval, addrs[0], err + } err = eval.transactionGroup(g) return eval, addrs[0], err } @@ -572,3 +674,178 @@ func benchmarkBlockEvaluator(b *testing.B, inMem bool, withCrypto bool) { b.StopTimer() } + +func TestCowCompactCert(t *testing.T) { + var certRnd basics.Round + var certType protocol.CompactCertType + var cert compactcert.Cert + var atRound basics.Round + var validate bool + accts0 := randomAccounts(20, true) + blocks := make(map[basics.Round]bookkeeping.BlockHeader) + blockErr := make(map[basics.Round]error) + ml := mockLedger{balanceMap: accts0, blocks: blocks, blockErr: blockErr} + c0 := makeRoundCowState(&ml, bookkeeping.BlockHeader{}, 0, 0) + + certType = protocol.CompactCertType(1234) // bad cert type + err := c0.compactCert(certRnd, certType, cert, atRound, validate) + require.Error(t, err) + + // no certRnd block + certType = protocol.CompactCertBasic + noBlockErr := errors.New("no block") + blockErr[3] = noBlockErr + certRnd = 3 + err = c0.compactCert(certRnd, certType, cert, atRound, validate) + require.Error(t, err) + + // no votersRnd block + // this is slightly a mess of things that don't quite line up with likely usage + validate = true + var certHdr bookkeeping.BlockHeader + certHdr.CurrentProtocol = "TestCowCompactCert" + certHdr.Round = 1 + proto := config.Consensus[certHdr.CurrentProtocol] + proto.CompactCertRounds = 2 + config.Consensus[certHdr.CurrentProtocol] = proto + blocks[certHdr.Round] = certHdr + + certHdr.Round = 15 + blocks[certHdr.Round] = certHdr + certRnd = certHdr.Round + blockErr[13] = noBlockErr + err = c0.compactCert(certRnd, certType, cert, atRound, validate) + require.Error(t, err) + + // validate fail + certHdr.Round = 1 + certRnd = certHdr.Round + err = c0.compactCert(certRnd, certType, cert, atRound, validate) + require.Error(t, err) + + // fall through to no err + validate = false + err = c0.compactCert(certRnd, certType, cert, atRound, validate) + require.NoError(t, err) + + // 100% coverage +} + +// a couple trivial tests that don't need setup +// see TestBlockEvaluator for more +func TestTestTransactionGroup(t *testing.T) { + var txgroup []transactions.SignedTxn + eval := BlockEvaluator{} + err := eval.TestTransactionGroup(txgroup) + require.NoError(t, err) // nothing to do, no problem + + eval.proto = config.Consensus[protocol.ConsensusCurrentVersion] + txgroup = make([]transactions.SignedTxn, eval.proto.MaxTxGroupSize+1) + err = eval.TestTransactionGroup(txgroup) + require.Error(t, err) // too many +} + +// test BlockEvaluator.transactionGroup() +// some trivial checks that require no setup +func TestPrivateTransactionGroup(t *testing.T) { + var txgroup []transactions.SignedTxnWithAD + eval := BlockEvaluator{} + err := eval.transactionGroup(txgroup) + require.NoError(t, err) // nothing to do, no problem + + eval.proto = config.Consensus[protocol.ConsensusCurrentVersion] + txgroup = make([]transactions.SignedTxnWithAD, eval.proto.MaxTxGroupSize+1) + err = eval.transactionGroup(txgroup) + require.Error(t, err) // too many +} + +// BlockEvaluator.workaroundOverspentRewards() fixed a couple issues on testnet. +// This is now part of history and has to be re-created when running catchup on testnet. So, test to ensure it keeps happenning. +func TestTestnetFixup(t *testing.T) { + eval := &BlockEvaluator{} + var rewardPoolBalance basics.AccountData + rewardPoolBalance.MicroAlgos.Raw = 1234 + var headerRound basics.Round + testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA") + + // not a fixup round, no change + headerRound = 1 + poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound) + require.Equal(t, rewardPoolBalance, poolOld) + require.NoError(t, err) + + eval.genesisHash = testnetGenesisHash + eval.genesisHash[3]++ + + specialRounds := []basics.Round{1499995, 2926564} + for _, headerRound = range specialRounds { + poolOld, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound) + require.Equal(t, rewardPoolBalance, poolOld) + require.NoError(t, err) + } + + for _, headerRound = range specialRounds { + testnetFixupExecution(t, headerRound, 20000000000) + } + // do all the setup and do nothing for not a special round + testnetFixupExecution(t, specialRounds[0]+1, 0) +} + +func testnetFixupExecution(t *testing.T, headerRound basics.Round, poolBonus uint64) { + testnetGenesisHash, _ := crypto.DigestFromString("JBR3KGFEWPEE5SAQ6IWU6EEBZMHXD4CZU6WCBXWGF57XBZIJHIRA") + // big setup so we can move some algos + // boilerplate like TestBlockEvaluator, but pretend to be testnet + genesisInitState, addrs, keys := genesis(10) + genesisInitState.Block.BlockHeader.GenesisHash = testnetGenesisHash + genesisInitState.Block.BlockHeader.GenesisID = "testnet" + genesisInitState.GenesisHash = testnetGenesisHash + + // for addr, adata := range genesisInitState.Accounts { + // t.Logf("%s: %+v", addr.String(), adata) + // } + rewardPoolBalance := genesisInitState.Accounts[testPoolAddr] + nextPoolBalance := rewardPoolBalance.MicroAlgos.Raw + poolBonus + + dbName := fmt.Sprintf("%s.%d", t.Name(), crypto.RandUint64()) + const inMem = true + cfg := config.GetDefaultLocal() + cfg.Archival = true + l, err := OpenLedger(logging.Base(), dbName, inMem, genesisInitState, cfg) + require.NoError(t, err) + defer l.Close() + + newBlock := bookkeeping.MakeBlock(genesisInitState.Block.BlockHeader) + eval, err := l.StartEvaluator(newBlock.BlockHeader, 0) + require.NoError(t, err) + + // won't work before funding bank + if poolBonus > 0 { + _, err = eval.workaroundOverspentRewards(rewardPoolBalance, headerRound) + require.Error(t, err) + } + + bankAddr, _ := basics.UnmarshalChecksumAddress("GD64YIY3TWGDMCNPP553DZPPR6LDUSFQOIJVFDPPXWEG3FVOJCCDBBHU5A") + + // put some algos in the bank so that fixup can pull from this account + txn := transactions.Transaction{ + Type: protocol.PaymentTx, + Header: transactions.Header{ + Sender: addrs[0], + Fee: minFee, + FirstValid: newBlock.Round(), + LastValid: newBlock.Round(), + GenesisHash: testnetGenesisHash, + }, + PaymentTxnFields: transactions.PaymentTxnFields{ + Receiver: bankAddr, + Amount: basics.MicroAlgos{Raw: 20000000000 * 10}, + }, + } + st := txn.Sign(keys[0]) + err = eval.Transaction(st, transactions.ApplyData{}) + require.NoError(t, err) + + poolOld, err := eval.workaroundOverspentRewards(rewardPoolBalance, headerRound) + require.Equal(t, nextPoolBalance, poolOld.MicroAlgos.Raw) + require.NoError(t, err) +} diff --git a/ledger/perf_test.go b/ledger/perf_test.go index c4aee9c2dd..908a90f0a4 100644 --- a/ledger/perf_test.go +++ b/ledger/perf_test.go @@ -47,6 +47,9 @@ func genesis(naccts int) (InitState, []basics.Address, []*crypto.SignatureSecret keys := []*crypto.SignatureSecrets{} accts := make(map[basics.Address]basics.AccountData) + // 10 billion microalgos, across N accounts and pool and sink + amount := 10 * 1000000000 * 1000000 / uint64(naccts+2) + for i := 0; i < naccts; i++ { var seed crypto.Seed crypto.RandBytes(seed[:]) @@ -57,17 +60,17 @@ func genesis(naccts int) (InitState, []basics.Address, []*crypto.SignatureSecret addrs = append(addrs, addr) adata := basics.AccountData{} - adata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 / uint64(naccts) + adata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 / uint64(naccts) accts[addr] = adata } pooldata := basics.AccountData{} - pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 + pooldata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 pooldata.Status = basics.NotParticipating accts[testPoolAddr] = pooldata sinkdata := basics.AccountData{} - sinkdata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000 + sinkdata.MicroAlgos.Raw = amount //1000 * 1000 * 1000 * 1000 sinkdata.Status = basics.NotParticipating accts[testSinkAddr] = sinkdata From d9b223f2f908b4417915a028f08e7e4b7ea77505 Mon Sep 17 00:00:00 2001 From: Tolik Zinovyev Date: Sat, 8 May 2021 22:12:06 -0400 Subject: [PATCH 207/215] Add support for Arch Linux (#2073) `scripts/configure_dev.sh` can now install dependencies for Arch Linux. --- README.md | 6 ++++-- scripts/configure_dev.sh | 9 +++------ scripts/install_linux_deps.sh | 15 +++++++++++++++ 3 files changed, 22 insertions(+), 8 deletions(-) create mode 100755 scripts/install_linux_deps.sh diff --git a/README.md b/README.md index 51a659c6de..bfca903554 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,10 @@ the [official Go documentation website](https://golang.org/doc/). ### Linux / OSX ### We currently strive to support Debian based distributions with Ubuntu 18.04 -being our official release target. Our core engineering team uses Linux and OSX, -so both environments are well supported for development. +being our official release target. +Building on Arch Linux works as well. +Our core engineering team uses Linux and OSX, so both environments are well +supported for development. OSX only: [Homebrew (brew)](https://brew.sh) must be installed before continuing. [Here](https://docs.brew.sh/Installation) are the installation diff --git a/scripts/configure_dev.sh b/scripts/configure_dev.sh index d22468368d..83e06af8f6 100755 --- a/scripts/configure_dev.sh +++ b/scripts/configure_dev.sh @@ -70,12 +70,10 @@ function install_windows_shellcheck() { if [ "${OS}" = "linux" ]; then if ! which sudo > /dev/null; then - apt-get update - apt-get -y install sudo + "$SCRIPTPATH/install_linux_deps.sh" + else + sudo "$SCRIPTPATH/install_linux_deps.sh" fi - - sudo apt-get update - sudo apt-get install -y libboost-all-dev expect jq autoconf shellcheck sqlite3 python3-venv elif [ "${OS}" = "darwin" ]; then brew update brew tap homebrew/cask @@ -103,4 +101,3 @@ if ${SKIP_GO_DEPS}; then fi "$SCRIPTPATH/configure_dev-deps.sh" - diff --git a/scripts/install_linux_deps.sh b/scripts/install_linux_deps.sh new file mode 100755 index 0000000000..ec71513786 --- /dev/null +++ b/scripts/install_linux_deps.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +set -e + +. /etc/os-release +DISTRIB=$ID + +ARCH_DEPS="boost boost-libs expect jq autoconf shellcheck sqlite python-virtualenv" +UBUNTU_DEPS="libboost-all-dev expect jq autoconf shellcheck sqlite3 python3-venv" + +if [ "${DISTRIB}" = "arch" ]; then + pacman -S --refresh --needed --noconfirm $ARCH_DEPS +else + apt-get update + apt-get -y install $UBUNTU_DEPS +fi From 3021d15a8424a34a26064b3e6bd4dabc962035b1 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Fri, 14 May 2021 08:51:51 -0400 Subject: [PATCH 208/215] Fix incorrect datatype testing in recomputeBlockEvaluator (#2155) When the recomputeBlockEvaluator runs, it tests all the pending transactions against the ledger to detect duplicate transactions ( which happen all the time; these aren't an issue ). The block evaluator reports the error by returning `*ledgercore.TransactionInLedgerError` and not `ledgercore.TransactionInLedgerError` as the code was testing against. --- data/pools/transactionPool.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go index a07627d024..c44d127d0b 100644 --- a/data/pools/transactionPool.go +++ b/data/pools/transactionPool.go @@ -695,7 +695,7 @@ func (pool *TransactionPool) recomputeBlockEvaluator(committedTxIds map[transact } switch err.(type) { - case ledgercore.TransactionInLedgerError: + case *ledgercore.TransactionInLedgerError: asmStats.CommittedCount++ stats.RemovedInvalidCount++ case transactions.TxnDeadError: From 009d57f58ceebf787c2900a443770509f56cb369 Mon Sep 17 00:00:00 2001 From: John Jannotti Date: Fri, 14 May 2021 15:23:51 -0400 Subject: [PATCH 209/215] Dynamic teal (#2126) Add support for dynamic TEAL. --- cmd/goal/clerk.go | 7 +- config/consensus.go | 3 + daemon/algod/api/server/v2/dryrun_test.go | 5 +- data/transactions/logic/.gitignore | 2 + data/transactions/logic/README.md | 17 +- data/transactions/logic/README_in.md | 14 +- data/transactions/logic/TEAL_opcodes.md | 44 +- data/transactions/logic/assembler.go | 124 ++--- data/transactions/logic/assembler_test.go | 134 +++-- .../transactions/logic/backwardCompat_test.go | 70 ++- data/transactions/logic/doc.go | 18 +- data/transactions/logic/doc_test.go | 2 +- data/transactions/logic/eval.go | 285 ++++++---- data/transactions/logic/evalStateful_test.go | 86 ++- data/transactions/logic/eval_test.go | 505 ++++++++++++------ data/transactions/logic/opcodes.go | 17 +- data/transactions/verify/txn.go | 5 +- ledger/apply/application.go | 17 +- ledger/apply/application_test.go | 24 +- .../cli/goal/expect/tealConsensusTest.exp | 13 +- test/scripts/e2e_subs/v26/teal-v3-only.sh | 99 ++++ .../nettemplates/TwoNodes50EachV26.json | 29 + 22 files changed, 986 insertions(+), 534 deletions(-) create mode 100644 data/transactions/logic/.gitignore create mode 100755 test/scripts/e2e_subs/v26/teal-v3-only.sh create mode 100644 test/testdata/nettemplates/TwoNodes50EachV26.json diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go index 0f9a32a972..332d245b15 100644 --- a/cmd/goal/clerk.go +++ b/cmd/goal/clerk.go @@ -1080,13 +1080,10 @@ var dryrunCmd = &cobra.Command{ reportErrorf("program size too large: %d > %d", len(txn.Lsig.Logic), params.LogicSigMaxSize) } ep := logic.EvalParams{Txn: &txn, Proto: ¶ms, GroupIndex: i, TxnGroup: txgroup} - cost, err := logic.Check(txn.Lsig.Logic, ep) + err := logic.Check(txn.Lsig.Logic, ep) if err != nil { reportErrorf("program failed Check: %s", err) } - if uint64(cost) > params.LogicSigMaxCost { - reportErrorf("program cost too large: %d > %d", cost, params.LogicSigMaxCost) - } sb := strings.Builder{} ep = logic.EvalParams{ Txn: &txn, @@ -1097,7 +1094,7 @@ var dryrunCmd = &cobra.Command{ } pass, err := logic.Eval(txn.Lsig.Logic, ep) // TODO: optionally include `inspect` output here? - fmt.Fprintf(os.Stdout, "tx[%d] cost=%d trace:\n%s\n", i, cost, sb.String()) + fmt.Fprintf(os.Stdout, "tx[%d] trace:\n%s\n", i, sb.String()) if pass { fmt.Fprintf(os.Stdout, " - pass -\n") } else { diff --git a/config/consensus.go b/config/consensus.go index bf1431c492..b09acf4d0e 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -910,6 +910,9 @@ func initConsensusProtocols() { // Enable transaction Merkle tree. vFuture.PaysetCommit = PaysetCommitMerkle + // Enable TEAL 4 + vFuture.LogicSigVersion = 4 + Consensus[protocol.ConsensusFuture] = vFuture } diff --git a/daemon/algod/api/server/v2/dryrun_test.go b/daemon/algod/api/server/v2/dryrun_test.go index 0a08cc0d03..e1becc775e 100644 --- a/daemon/algod/api/server/v2/dryrun_test.go +++ b/daemon/algod/api/server/v2/dryrun_test.go @@ -346,8 +346,9 @@ func init() { // legder requires proto string and proto params set var proto config.ConsensusParams - proto.LogicSigVersion = 2 - proto.LogicSigMaxCost = 1000 + proto.LogicSigVersion = 4 + proto.LogicSigMaxCost = 20000 + proto.MaxAppProgramCost = 700 proto.MaxAppKeyLen = 64 proto.MaxAppBytesValueLen = 64 diff --git a/data/transactions/logic/.gitignore b/data/transactions/logic/.gitignore new file mode 100644 index 0000000000..24f8b4a361 --- /dev/null +++ b/data/transactions/logic/.gitignore @@ -0,0 +1,2 @@ +langspec.json +teal.tmLanguage.json diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md index 263e58ffbd..96331db349 100644 --- a/data/transactions/logic/README.md +++ b/data/transactions/logic/README.md @@ -27,7 +27,7 @@ A program can either authorize some delegated action on a normal private key sig * If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program. * If the SHA512_256 hash of the program (prefixed by "Program") is equal to the transaction Sender address then this is a contract account wholly controlled by the program. No other signature is necessary or possible. The only way to execute a transaction against the contract account is for the program to approve it. -The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost estimate and the program cost estimate must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have an estimated cost of 1, but a few slow crypto ops are much higher. +The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost and the program cost must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have a cost of 1, but a few slow crypto ops are much higher. Prior to v4, program costs was estimated as the static sum of all opcode costs in a program (ignoring conditionals that might skip some code). Beginning with v4, a program's cost is tracked dynamically, while being evaluated. If the program exceeds its budget, it fails. ## Execution modes @@ -80,11 +80,6 @@ An application transaction must indicate the action to be taken following the ex ## Operations Most operations work with only one type of argument, uint64 or bytes, and panic if the wrong type value is on the stack. -The instruction set was designed to execute calculator-like expressions. -What might be a one line expression with various parenthesized clauses should be efficiently representable in TEAL. - -Looping is not possible, by design, to ensure predictably fast execution. -There is a branch instruction (`bnz`, branch if not zero) which allows forward branching only so that some code may be skipped. Many programs need only a few dozen instructions. The instruction set has some optimization built in. `intc`, `bytec`, and `arg` take an immediate value byte, making a 2-byte op to load a value onto the stack, but they also have single byte versions for loading the most common constant values. Any program will benefit from having a few common values loaded with a smaller one byte opcode. Cryptographic hashes and `ed25519verify` are single byte opcodes with powerful libraries behind them. These operations still take more time than other ops (and this is reflected in the cost of each op and the cost limit of a program) but are efficient in compiled code space. @@ -129,6 +124,7 @@ For two-argument ops, `A` is the previous element on the stack and `B` is the la | `~` | bitwise invert value X | | `mulw` | A times B out to 128-bit long result as low (top) and high uint64 values on the stack | | `addw` | A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack | +| `divw` | Pop four uint64 values. The deepest two are interpreted as a uint128 dividend (deepest value is high word), the top two are interpreted as a uint128 divisor. Four uint64 values are pushed to the stack. The deepest two are the quotient (deeper value is the high uint64). The top two are the remainder, low bits on top. | | `getbit` | pop a target A (integer or byte-array), and index B. Push the Bth bit of A. | | `setbit` | pop a target A, index B, and bit C. Set the Bth bit of A to C, and push the result | | `getbyte` | pop a byte-array A and integer B. Extract the Bth byte of A and push it as an integer | @@ -297,6 +293,8 @@ Asset fields include `AssetHolding` and `AssetParam` fields that are used in `as | `swap` | swaps two last values on stack: A, B -> B, A | | `select` | selects one of two values based on top-of-stack: A, B, C -> (if C != 0 then B else A) | | `assert` | immediately fail unless value X is a non-zero number | +| `callsub target` | branch unconditionally to TARGET, saving the next instruction on the call stack | +| `retsub` | pop the top instruction from the call stack and branch to it | ### State Access @@ -384,12 +382,13 @@ A '[proto-buf style variable length unsigned int](https://developers.google.com/ # What TEAL Cannot Do -Current design and implementation limitations to be aware of. +Design and implementation limitations to be aware of with various versions of TEAL. * TEAL cannot create or change a transaction, only approve or reject. * Stateless TEAL cannot lookup balances of Algos or other assets. (Standard transaction accounting will apply after TEAL has run and authorized a transaction. A TEAL-approved transaction could still be invalid by other accounting rules just as a standard signed transaction could be invalid. e.g. I can't give away money I don't have.) * TEAL cannot access information in previous blocks. TEAL cannot access most information in other transactions in the current block. (TEAL can access fields of the transaction it is attached to and the transactions in an atomic transaction group.) * TEAL cannot know exactly what round the current transaction will commit in (but it is somewhere in FirstValid through LastValid). * TEAL cannot know exactly what time its transaction is committed. -* TEAL cannot loop. Its branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code. -* TEAL cannot recurse. There is no subroutine jump operation. +* TEAL cannot loop prior to v4. In v3 and prior, the branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code. +* Until v4, TEAL had no notion of subroutines (and therefore no recursion). As of v4, use `callsub` and `retsub`. +* TEAL cannot make indirect jumps. `b`, `bz`, `bnz`, and `callsub` jump to an immediately specified address, and `retsub` jumps to the address currently on the top of the call stack, which is manipulated only by previous calls to `callsub`. diff --git a/data/transactions/logic/README_in.md b/data/transactions/logic/README_in.md index ef43d52308..e8961a8d98 100644 --- a/data/transactions/logic/README_in.md +++ b/data/transactions/logic/README_in.md @@ -27,7 +27,7 @@ A program can either authorize some delegated action on a normal private key sig * If the account has signed the program (an ed25519 signature on "Program" concatenated with the program bytes) then if the program returns true the transaction is authorized as if the account had signed it. This allows an account to hand out a signed program so that other users can carry out delegated actions which are approved by the program. * If the SHA512_256 hash of the program (prefixed by "Program") is equal to the transaction Sender address then this is a contract account wholly controlled by the program. No other signature is necessary or possible. The only way to execute a transaction against the contract account is for the program to approve it. -The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost estimate and the program cost estimate must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have an estimated cost of 1, but a few slow crypto ops are much higher. +The TEAL bytecode plus the length of any Args must add up to less than 1000 bytes (consensus parameter LogicSigMaxSize). Each TEAL op has an associated cost and the program cost must total less than 20000 (consensus parameter LogicSigMaxCost). Most ops have a cost of 1, but a few slow crypto ops are much higher. Prior to v4, program costs was estimated as the static sum of all opcode costs in a program (ignoring conditionals that might skip some code). Beginning with v4, a program's cost is tracked dynamically, while being evaluated. If the program exceeds its budget, it fails. ## Execution modes @@ -57,11 +57,6 @@ Constants are pushed onto the stack by `intc`, `intc_[0123]`, `bytec`, and `byte ## Operations Most operations work with only one type of argument, uint64 or bytes, and panic if the wrong type value is on the stack. -The instruction set was designed to execute calculator-like expressions. -What might be a one line expression with various parenthesized clauses should be efficiently representable in TEAL. - -Looping is not possible, by design, to ensure predictably fast execution. -There is a branch instruction (`bnz`, branch if not zero) which allows forward branching only so that some code may be skipped. Many programs need only a few dozen instructions. The instruction set has some optimization built in. `intc`, `bytec`, and `arg` take an immediate value byte, making a 2-byte op to load a value onto the stack, but they also have single byte versions for loading the most common constant values. Any program will benefit from having a few common values loaded with a smaller one byte opcode. Cryptographic hashes and `ed25519verify` are single byte opcodes with powerful libraries behind them. These operations still take more time than other ops (and this is reflected in the cost of each op and the cost limit of a program) but are efficient in compiled code space. @@ -183,12 +178,13 @@ A '[proto-buf style variable length unsigned int](https://developers.google.com/ # What TEAL Cannot Do -Current design and implementation limitations to be aware of. +Design and implementation limitations to be aware of with various versions of TEAL. * TEAL cannot create or change a transaction, only approve or reject. * Stateless TEAL cannot lookup balances of Algos or other assets. (Standard transaction accounting will apply after TEAL has run and authorized a transaction. A TEAL-approved transaction could still be invalid by other accounting rules just as a standard signed transaction could be invalid. e.g. I can't give away money I don't have.) * TEAL cannot access information in previous blocks. TEAL cannot access most information in other transactions in the current block. (TEAL can access fields of the transaction it is attached to and the transactions in an atomic transaction group.) * TEAL cannot know exactly what round the current transaction will commit in (but it is somewhere in FirstValid through LastValid). * TEAL cannot know exactly what time its transaction is committed. -* TEAL cannot loop. Its branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code. -* TEAL cannot recurse. There is no subroutine jump operation. +* TEAL cannot loop prior to v4. In v3 and prior, the branch instructions `bnz` "branch if not zero", `bz` "branch if zero" and `b` "branch" can only branch forward so as to skip some code. +* Until v4, TEAL had no notion of subroutines (and therefore no recursion). As of v4, use `callsub` and `retsub`. +* TEAL cannot make indirect jumps. `b`, `bz`, `bnz`, and `callsub` jump to an immediately specified address, and `retsub` jumps to the address currently on the top of the call stack, which is manipulated only by previous calls to `callsub`. diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md index 14ef15102b..4768c53e66 100644 --- a/data/transactions/logic/TEAL_opcodes.md +++ b/data/transactions/logic/TEAL_opcodes.md @@ -18,7 +18,7 @@ Ops have a 'cost' of 1 unless otherwise specified. - SHA256 hash of value X, yields [32]byte - **Cost**: - 7 (LogicSigVersion = 1) - - 35 (2 <= LogicSigVersion <= 3) + - 35 (2 <= LogicSigVersion <= 4) ## keccak256 @@ -28,7 +28,7 @@ Ops have a 'cost' of 1 unless otherwise specified. - Keccak256 hash of value X, yields [32]byte - **Cost**: - 26 (LogicSigVersion = 1) - - 130 (2 <= LogicSigVersion <= 3) + - 130 (2 <= LogicSigVersion <= 4) ## sha512_256 @@ -38,7 +38,7 @@ Ops have a 'cost' of 1 unless otherwise specified. - SHA512_256 hash of value X, yields [32]byte - **Cost**: - 9 (LogicSigVersion = 1) - - 45 (2 <= LogicSigVersion <= 3) + - 45 (2 <= LogicSigVersion <= 4) ## ed25519verify @@ -74,6 +74,8 @@ Overflow is an error condition which halts execution and fails the transaction. - Pushes: uint64 - A divided by B. Panic if B == 0. +`divw` is available to divide the two-element values produced by `mulw` and `addw`. + ## * - Opcode: 0x0b @@ -219,6 +221,14 @@ Overflow is an error condition which halts execution and fails the transaction. - A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack - LogicSigVersion >= 2 +## divw + +- Opcode: 0x1f +- Pops: *... stack*, {uint64 A}, {uint64 B}, {uint64 C}, {uint64 D} +- Pushes: *... stack*, uint64, uint64, uint64, uint64 +- Pop four uint64 values. The deepest two are interpreted as a uint128 dividend (deepest value is high word), the top two are interpreted as a uint128 divisor. Four uint64 values are pushed to the stack. The deepest two are the quotient (deeper value is the high uint64). The top two are the remainder, low bits on top. +- LogicSigVersion >= 4 + ## intcblock uint ... - Opcode: 0x20 {varuint length} [{varuint value}, ...] @@ -513,18 +523,18 @@ for notes on transaction fields available, see `txn`. If top of stack is _i_, `g ## bnz target -- Opcode: 0x40 {0..0x7fff forward branch offset, big endian} +- Opcode: 0x40 {int16 branch offset, big endian. (negative offsets are illegal before v4)} - Pops: *... stack*, uint64 - Pushes: _None_ - branch to TARGET if value X is not zero -The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping. +The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are limited to forward branches only, 0-0x7fff until v4. v4 treats offset as a signed 16 bit integer allowing for backward branches and looping. At LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.) ## bz target -- Opcode: 0x41 {0..0x7fff forward branch offset, big endian} +- Opcode: 0x41 {int16 branch offset, big endian. (negative offsets are illegal before v4)} - Pops: *... stack*, uint64 - Pushes: _None_ - branch to TARGET if value X is zero @@ -534,7 +544,7 @@ See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`. ## b target -- Opcode: 0x42 {0..0x7fff forward branch offset, big endian} +- Opcode: 0x42 {int16 branch offset, big endian. (negative offsets are illegal before v4)} - Pops: _None_ - Pushes: _None_ - branch unconditionally to TARGET @@ -851,3 +861,23 @@ pushbytes args are not added to the bytecblock during assembly processes - LogicSigVersion >= 3 pushint args are not added to the intcblock during assembly processes + +## callsub target + +- Opcode: 0x88 +- Pops: _None_ +- Pushes: _None_ +- branch unconditionally to TARGET, saving the next instruction on the call stack +- LogicSigVersion >= 4 + +The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.` + +## retsub + +- Opcode: 0x89 +- Pops: _None_ +- Pushes: _None_ +- pop the top instruction from the call stack and branch to it +- LogicSigVersion >= 4 + +The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.` diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go index 5cf8860ef4..5c4d864d01 100644 --- a/data/transactions/logic/assembler.go +++ b/data/transactions/logic/assembler.go @@ -99,7 +99,7 @@ func (ops *OpStream) createLabel(label string) { ops.labels = make(map[string]int) } if _, ok := ops.labels[label]; ok { - ops.errorf("duplicate label %s", label) + ops.errorf("duplicate label %#v", label) } ops.labels[label] = ops.pending.Len() } @@ -590,14 +590,14 @@ func assembleTxn(ops *OpStream, spec *OpSpec, args []string) error { } fs, ok := txnFieldSpecByName[args[0]] if !ok { - return ops.errorf("txn unknown field: %v", args[0]) + return ops.errorf("txn unknown field: %#v", args[0]) } _, ok = txnaFieldSpecByField[fs.field] if ok { - return ops.errorf("found array field %v in txn op", args[0]) + return ops.errorf("found array field %#v in txn op", args[0]) } if fs.version > ops.Version { - return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version) + return ops.errorf("field %#v available in version %d. Missed #pragma version?", args[0], fs.version) } ops.pending.WriteByte(spec.Opcode) ops.pending.WriteByte(uint8(fs.field)) @@ -623,14 +623,14 @@ func assembleTxna(ops *OpStream, spec *OpSpec, args []string) error { } fs, ok := txnFieldSpecByName[args[0]] if !ok { - return ops.errorf("txna unknown field: %v", args[0]) + return ops.errorf("txna unknown field: %#v", args[0]) } _, ok = txnaFieldSpecByField[fs.field] if !ok { - return ops.errorf("txna unknown field: %v", args[0]) + return ops.errorf("txna unknown field: %#v", args[0]) } if fs.version > ops.Version { - return ops.errorf("txna %s available in version %d. Missed #pragma version?", args[0], fs.version) + return ops.errorf("txna %#v available in version %d. Missed #pragma version?", args[0], fs.version) } arrayFieldIdx, err := strconv.ParseUint(args[1], 0, 64) if err != nil { @@ -661,14 +661,14 @@ func assembleGtxn(ops *OpStream, spec *OpSpec, args []string) error { fs, ok := txnFieldSpecByName[args[1]] if !ok { - return ops.errorf("gtxn unknown field: %v", args[1]) + return ops.errorf("gtxn unknown field: %#v", args[1]) } _, ok = txnaFieldSpecByField[fs.field] if ok { - return ops.errorf("found array field %v in gtxn op", args[1]) + return ops.errorf("found array field %#v in gtxn op", args[1]) } if fs.version > ops.Version { - return ops.errorf("field %s available in version %d. Missed #pragma version?", args[1], fs.version) + return ops.errorf("field %#v available in version %d. Missed #pragma version?", args[1], fs.version) } ops.pending.WriteByte(spec.Opcode) @@ -703,14 +703,14 @@ func assembleGtxna(ops *OpStream, spec *OpSpec, args []string) error { fs, ok := txnFieldSpecByName[args[1]] if !ok { - return ops.errorf("gtxna unknown field: %v", args[1]) + return ops.errorf("gtxna unknown field: %#v", args[1]) } _, ok = txnaFieldSpecByField[fs.field] if !ok { - return ops.errorf("gtxna unknown field: %v", args[1]) + return ops.errorf("gtxna unknown field: %#v", args[1]) } if fs.version > ops.Version { - return ops.errorf("gtxna %s available in version %d. Missed #pragma version?", args[1], fs.version) + return ops.errorf("gtxna %#v available in version %d. Missed #pragma version?", args[1], fs.version) } arrayFieldIdx, err := strconv.ParseUint(args[2], 0, 64) if err != nil { @@ -738,14 +738,14 @@ func assembleGtxns(ops *OpStream, spec *OpSpec, args []string) error { } fs, ok := txnFieldSpecByName[args[0]] if !ok { - return ops.errorf("gtxns unknown field: %v", args[0]) + return ops.errorf("gtxns unknown field: %#v", args[0]) } _, ok = txnaFieldSpecByField[fs.field] if ok { - return ops.errorf("found array field %v in gtxns op", args[0]) + return ops.errorf("found array field %#v in gtxns op", args[0]) } if fs.version > ops.Version { - return ops.errorf("field %s available in version %d. Missed #pragma version?", args[0], fs.version) + return ops.errorf("field %#v available in version %d. Missed #pragma version?", args[0], fs.version) } ops.pending.WriteByte(spec.Opcode) @@ -760,14 +760,14 @@ func assembleGtxnsa(ops *OpStream, spec *OpSpec, args []string) error { } fs, ok := txnFieldSpecByName[args[0]] if !ok { - return ops.errorf("gtxnsa unknown field: %v", args[0]) + return ops.errorf("gtxnsa unknown field: %#v", args[0]) } _, ok = txnaFieldSpecByField[fs.field] if !ok { - return ops.errorf("gtxnsa unknown field: %v", args[0]) + return ops.errorf("gtxnsa unknown field: %#v", args[0]) } if fs.version > ops.Version { - return ops.errorf("gtxnsa %s available in version %d. Missed #pragma version?", args[0], fs.version) + return ops.errorf("gtxnsa %#v available in version %d. Missed #pragma version?", args[0], fs.version) } arrayFieldIdx, err := strconv.ParseUint(args[1], 0, 64) if err != nil { @@ -789,7 +789,7 @@ func assembleGlobal(ops *OpStream, spec *OpSpec, args []string) error { } fs, ok := globalFieldSpecByName[args[0]] if !ok { - return ops.errorf("global unknown field: %v", args[0]) + return ops.errorf("global unknown field: %#v", args[0]) } if fs.version > ops.Version { // no return here. we may as well continue to maintain typestack @@ -810,7 +810,7 @@ func assembleAssetHolding(ops *OpStream, spec *OpSpec, args []string) error { } val, ok := assetHoldingFields[args[0]] if !ok { - return ops.errorf("asset_holding_get unknown arg: %v", args[0]) + return ops.errorf("asset_holding_get unknown arg: %#v", args[0]) } ops.pending.WriteByte(spec.Opcode) ops.pending.WriteByte(uint8(val)) @@ -824,7 +824,7 @@ func assembleAssetParams(ops *OpStream, spec *OpSpec, args []string) error { } val, ok := assetParamsFields[args[0]] if !ok { - return ops.errorf("asset_params_get unknown arg: %v", args[0]) + return ops.errorf("asset_params_get unknown arg: %#v", args[0]) } ops.pending.WriteByte(spec.Opcode) ops.pending.WriteByte(uint8(val)) @@ -1033,11 +1033,22 @@ func (ops *OpStream) assemble(fin io.Reader) error { ops.trace("%d: no fields\n", ops.sourceLine) continue } - // we're going to process opcodes, so fix the Version + // we're about to begin processing opcodes, so fix the Version if ops.Version == assemblerNoVersion { ops.Version = AssemblerDefaultVersion } opstring := fields[0] + + if opstring[len(opstring)-1] == ':' { + ops.createLabel(opstring[:len(opstring)-1]) + fields = fields[1:] + if len(fields) == 0 { + // There was a label, not need to ops.trace this + continue + } + opstring = fields[0] + } + spec, ok := OpsByName[ops.Version][opstring] if !ok { spec, ok = keywords[opstring] @@ -1053,10 +1064,6 @@ func (ops *OpStream) assemble(fin io.Reader) error { ops.trace("\n") continue } - if opstring[len(opstring)-1] == ':' { - ops.createLabel(opstring[:len(opstring)-1]) - continue - } // unknown opcode, let's report a good error if version problem spec, ok = OpsByName[AssemblerMaxVersion][opstring] if ok { @@ -1070,7 +1077,7 @@ func (ops *OpStream) assemble(fin io.Reader) error { if ops.Version <= 1 { for label, dest := range ops.labels { if dest == ops.pending.Len() { - ops.errorf("label %v is too far away", label) + ops.errorf("label %#v is too far away", label) } } } @@ -1141,20 +1148,20 @@ func (ops *OpStream) resolveLabels() { dest, ok := ops.labels[lr.label] if !ok { if !reported[lr.label] { - ops.errorf("reference to undefined label %v", lr.label) + ops.errorf("reference to undefined label %#v", lr.label) } reported[lr.label] = true continue } // all branch instructions (currently) are opcode byte and 2 offset bytes, and the destination is relative to the next pc as if the branch was a no-op naturalPc := lr.position + 3 - if dest < naturalPc { - ops.errorf("label %v is before reference but only forward jumps are allowed", lr.label) + if ops.Version < backBranchEnabledVersion && dest < naturalPc { + ops.errorf("label %#v is a back reference, back jump support was introduced in TEAL v4", lr.label) continue } jump := dest - naturalPc if jump > 0x7fff { - ops.errorf("label %v is too far away", lr.label) + ops.errorf("label %#v is too far away", lr.label) continue } raw[lr.position+1] = uint8(jump >> 8) @@ -1376,33 +1383,29 @@ func parseIntcblock(program []byte, pc int) (intc []uint64, nextpc int, err erro return } -func checkIntConstBlock(cx *evalContext) int { +func checkIntConstBlock(cx *evalContext) error { pos := cx.pc + 1 numInts, bytesUsed := binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { - cx.err = fmt.Errorf("could not decode int const block size at pc=%d", pos) - return 1 + return fmt.Errorf("could not decode int const block size at pc=%d", pos) } pos += bytesUsed if numInts > uint64(len(cx.program)) { - cx.err = errTooManyIntc - return 0 + return errTooManyIntc } //intc = make([]uint64, numInts) for i := uint64(0); i < numInts; i++ { if pos >= len(cx.program) { - cx.err = errShortIntcblock - return 0 + return errShortIntcblock } _, bytesUsed = binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { - cx.err = fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos) - return 1 + return fmt.Errorf("could not decode int const[%d] at pc=%d", i, pos) } pos += bytesUsed } cx.nextpc = pos - return 1 + return nil } var errShortBytecblock = errors.New("bytecblock ran past end of program") @@ -1448,44 +1451,38 @@ func parseBytecBlock(program []byte, pc int) (bytec [][]byte, nextpc int, err er return } -func checkByteConstBlock(cx *evalContext) int { +func checkByteConstBlock(cx *evalContext) error { pos := cx.pc + 1 numItems, bytesUsed := binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { - cx.err = fmt.Errorf("could not decode []byte const block size at pc=%d", pos) - return 1 + return fmt.Errorf("could not decode []byte const block size at pc=%d", pos) } pos += bytesUsed if numItems > uint64(len(cx.program)) { - cx.err = errTooManyItems - return 0 + return errTooManyItems } //bytec = make([][]byte, numItems) for i := uint64(0); i < numItems; i++ { if pos >= len(cx.program) { - cx.err = errShortBytecblock - return 0 + return errShortBytecblock } itemLen, bytesUsed := binary.Uvarint(cx.program[pos:]) if bytesUsed <= 0 { - cx.err = fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos) - return 1 + return fmt.Errorf("could not decode []byte const[%d] at pc=%d", i, pos) } pos += bytesUsed if pos >= len(cx.program) { - cx.err = errShortBytecblock - return 0 + return errShortBytecblock } end := uint64(pos) + itemLen if end > uint64(len(cx.program)) || end < uint64(pos) { - cx.err = errShortBytecblock - return 0 + return errShortBytecblock } //bytec[i] = program[pos : pos+int(itemLen)] pos += int(itemLen) } cx.nextpc = pos - return 1 + return nil } func disIntcblock(dis *disassembleState, spec *OpSpec) (string, error) { @@ -1566,7 +1563,7 @@ func guessByteFormat(bytes []byte) string { return fmt.Sprintf("addr %s", short.String()) } if allPrintableASCII(bytes) { - return fmt.Sprintf("\"%s\"", string(bytes)) + return fmt.Sprintf("%#v", string(bytes)) } return "0x" + hex.EncodeToString(bytes) } @@ -1612,9 +1609,9 @@ func disPushInt(dis *disassembleState, spec *OpSpec) (string, error) { dis.nextpc = pos + bytesUsed return fmt.Sprintf("%s %d", spec.Name, val), nil } -func checkPushInt(cx *evalContext) int { +func checkPushInt(cx *evalContext) error { opPushInt(cx) - return 1 + return cx.err } func disPushBytes(dis *disassembleState, spec *OpSpec) (string, error) { @@ -1632,9 +1629,9 @@ func disPushBytes(dis *disassembleState, spec *OpSpec) (string, error) { dis.nextpc = int(end) return fmt.Sprintf("%s 0x%s", spec.Name, hex.EncodeToString(bytes)), nil } -func checkPushBytes(cx *evalContext) int { +func checkPushBytes(cx *evalContext) error { opPushBytes(cx) - return 1 + return cx.err } // This is also used to disassemble gtxns @@ -1723,9 +1720,12 @@ func disBranch(dis *disassembleState, spec *OpSpec) (string, error) { dis.nextpc = dis.pc + 3 offset := (uint(dis.program[dis.pc+1]) << 8) | uint(dis.program[dis.pc+2]) target := int(offset) + dis.pc + 3 + if target > 0xffff { + target -= 0x10000 + } var label string if dis.numericTargets { - label = fmt.Sprintf("+%d", offset+3) // +3 so it's easy to calculate destination from current + label = fmt.Sprintf("%d", target) } else { if known, ok := dis.pendingLabels[target]; ok { label = known diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go index c2d521b79d..1fca9c8494 100644 --- a/data/transactions/logic/assembler_test.go +++ b/data/transactions/logic/assembler_test.go @@ -244,6 +244,36 @@ pushint 1000 pushbytes "john" ` +// Keep in mind, only use existing int and byte constants, or else use +// push* instead. The idea is to not cause the *cblocks to change. +const v4Nonsense = ` +int 1 +pushint 2000 +int 0 +int 2 +divw +callsub stuff +b next +stuff: +retsub +next: +int 1 +` + +var nonsense = map[uint64]string{ + 1: v1Nonsense, + 2: v1Nonsense + v2Nonsense, + 3: v1Nonsense + v2Nonsense + v3Nonsense, + 4: v1Nonsense + v2Nonsense + v3Nonsense + v4Nonsense, +} + +var compiled = map[uint64]string{ + 1: "012008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b1716154000032903494", + 2: "022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f", + 3: "032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e", + 4: "042008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e210581d00f210721061f880003420001892105", +} + func pseudoOp(opcode string) bool { // We don't test every combination of // intcblock,bytecblock,intc*,bytec*,arg* here. Not all of @@ -263,44 +293,33 @@ func TestAssemble(t *testing.T) { // Run test. It should pass. // // This doesn't have to be a sensible program to run, it just has to compile. - for _, spec := range OpSpecs { - // Ensure that we have some basic check of all the ops, except - if !strings.Contains(v1Nonsense+v2Nonsense, spec.Name) && - !pseudoOp(spec.Name) && spec.Version <= 2 { - t.Errorf("v2 nonsense test should contain op %v", spec.Name) - } - } - // First, we test v2, not AssemblerMaxVersion. A higher version is - // allowed to differ (and must, in the first byte). - ops := testProg(t, v1Nonsense+v2Nonsense, 2) - // check that compilation is stable over time and we assemble to the same bytes this month that we did last month. - expectedBytes, _ := hex.DecodeString("022008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f") - if bytes.Compare(expectedBytes, ops.Program) != 0 { - // this print is for convenience if the program has been changed. the hex string can be copy pasted back in as a new expected result. - t.Log(hex.EncodeToString(ops.Program)) - } - require.Equal(t, expectedBytes, ops.Program) - - // We test v3 here, and compare to AssemblerMaxVersion, with - // the intention that the test breaks the next time - // AssemblerMaxVersion is increased. At that point, we would - // add a new test for v4, and leave behind this test for v3. - - for _, spec := range OpSpecs { - // Ensure that we have some basic check of all the ops, except - if !strings.Contains(v1Nonsense+v2Nonsense+v3Nonsense, spec.Name) && - !pseudoOp(spec.Name) && spec.Version <= 3 { - t.Errorf("v3 nonsense test should contain op %v", spec.Name) - } - } - ops = testProg(t, v1Nonsense+v2Nonsense+v3Nonsense, AssemblerMaxVersion) - // check that compilation is stable over time and we assemble to the same bytes this month that we did last month. - expectedBytes, _ = hex.DecodeString("032008b7a60cf8acd19181cf959a12f8acd19181cf951af8acd19181cf15f8acd191810f01020026050212340c68656c6c6f20776f726c6421208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d024242047465737400320032013202320328292929292a0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e0102222324252104082209240a220b230c240d250e230f23102311231223132314181b1c2b171615400003290349483403350222231d4a484848482a50512a63222352410003420000432105602105612105270463484821052b62482b642b65484821052b2106662b21056721072b682b692107210570004848210771004848361c0037001a0031183119311b311d311e311f3120210721051e312131223123312431253126312731283129312a312b312c312d312e312f4478222105531421055427042106552105082106564c4d4b02210538212106391c0081e80780046a6f686e") - if bytes.Compare(expectedBytes, ops.Program) != 0 { - // this print is for convenience if the program has been changed. the hex string can be copy pasted back in as a new expected result. - t.Log(hex.EncodeToString(ops.Program)) + + t.Parallel() + for v := uint64(2); v <= AssemblerMaxVersion; v++ { + t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { + for _, spec := range OpSpecs { + // Make sure our nonsense covers the ops + if !strings.Contains(nonsense[v], spec.Name) && + !pseudoOp(spec.Name) && spec.Version <= v { + t.Errorf("v%d nonsense test should contain op %v", v, spec.Name) + } + } + + ops := testProg(t, nonsense[v], v) + // check that compilation is stable over + // time. we must assemble to the same bytes + // this month that we did last month. + expectedBytes, _ := hex.DecodeString(compiled[v]) + if bytes.Compare(expectedBytes, ops.Program) != 0 { + // this print is for convenience if + // the program has been changed. the + // hex string can be copy pasted back + // in as a new expected result. + t.Log(hex.EncodeToString(ops.Program)) + } + require.Equal(t, expectedBytes, ops.Program) + }) } - require.Equal(t, expectedBytes, ops.Program) } func TestAssembleAlias(t *testing.T) { @@ -395,11 +414,11 @@ func testLine(t *testing.T, line string, ver uint64, expected string) { func TestAssembleTxna(t *testing.T) { testLine(t, "txna Accounts 256", AssemblerMaxVersion, "txna array index beyond 255: 256") testLine(t, "txna ApplicationArgs 256", AssemblerMaxVersion, "txna array index beyond 255: 256") - testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: Sender") + testLine(t, "txna Sender 256", AssemblerMaxVersion, "txna unknown field: \"Sender\"") testLine(t, "gtxna 0 Accounts 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256") testLine(t, "gtxna 0 ApplicationArgs 256", AssemblerMaxVersion, "gtxna array index beyond 255: 256") testLine(t, "gtxna 256 Accounts 0", AssemblerMaxVersion, "gtxna group index beyond 255: 256") - testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: Sender") + testLine(t, "gtxna 0 Sender 256", AssemblerMaxVersion, "gtxna unknown field: \"Sender\"") testLine(t, "txn Accounts 0", 1, "txn expects one argument") testLine(t, "txn Accounts 0 1", 2, "txn expects one or two arguments") testLine(t, "txna Accounts 0 1", AssemblerMaxVersion, "txna expects two arguments") @@ -409,20 +428,20 @@ func TestAssembleTxna(t *testing.T) { testLine(t, "gtxna 0 Accounts 1 2", AssemblerMaxVersion, "gtxna expects three arguments") testLine(t, "gtxna a Accounts 0", AssemblerMaxVersion, "strconv.ParseUint...") testLine(t, "gtxna 0 Accounts a", AssemblerMaxVersion, "strconv.ParseUint...") - testLine(t, "txn ABC", 2, "txn unknown field: ABC") - testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: ABC") + testLine(t, "txn ABC", 2, "txn unknown field: \"ABC\"") + testLine(t, "gtxn 0 ABC", 2, "gtxn unknown field: \"ABC\"") testLine(t, "gtxn a ABC", 2, "strconv.ParseUint...") - testLine(t, "txn Accounts", AssemblerMaxVersion, "found array field Accounts in txn op") - testLine(t, "txn Accounts", 1, "found array field Accounts in txn op") + testLine(t, "txn Accounts", AssemblerMaxVersion, "found array field \"Accounts\" in txn op") + testLine(t, "txn Accounts", 1, "found array field \"Accounts\" in txn op") testLine(t, "txn Accounts 0", AssemblerMaxVersion, "") - testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "found array field Accounts in gtxn op") - testLine(t, "gtxn 0 Accounts", 1, "found array field Accounts in gtxn op") + testLine(t, "gtxn 0 Accounts", AssemblerMaxVersion, "found array field \"Accounts\" in gtxn op") + testLine(t, "gtxn 0 Accounts", 1, "found array field \"Accounts\" in gtxn op") testLine(t, "gtxn 0 Accounts 1", AssemblerMaxVersion, "") } func TestAssembleGlobal(t *testing.T) { testLine(t, "global", AssemblerMaxVersion, "global expects one argument") - testLine(t, "global a", AssemblerMaxVersion, "global unknown field: a") + testLine(t, "global a", AssemblerMaxVersion, "global unknown field: \"a\"") } func TestAssembleDefault(t *testing.T) { @@ -758,9 +777,14 @@ func TestAssembleRejectNegJump(t *testing.T) { int 1 bnz wat int 2` - for v := uint64(1); v <= AssemblerMaxVersion; v++ { + for v := uint64(1); v < backBranchEnabledVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { - testProg(t, source, v, expect{3, "label wat is before reference but only forward jumps are allowed"}) + testProg(t, source, v, expect{3, "label \"wat\" is a back reference..."}) + }) + } + for v := uint64(backBranchEnabledVersion); v <= AssemblerMaxVersion; v++ { + t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { + testProg(t, source, v) }) } } @@ -796,7 +820,7 @@ bnz nowhere int 2` for v := uint64(1); v <= AssemblerMaxVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { - testProg(t, source, v, expect{2, "reference to undefined label nowhere"}) + testProg(t, source, v, expect{2, "reference to undefined label \"nowhere\""}) }) } } @@ -826,8 +850,8 @@ int 2` for v := uint64(1); v <= AssemblerMaxVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { testProg(t, source, v, - expect{2, "reference to undefined label nowhere"}, - expect{4, "txn unknown field: XYZ"}) + expect{2, "reference to undefined label \"nowhere\""}, + expect{4, "txn unknown field: \"XYZ\""}) }) } } @@ -1147,13 +1171,13 @@ func TestAssembleAsset(t *testing.T) { testProg(t, "int 1; int 1; asset_holding_get ABC 1", v, expect{3, "asset_holding_get expects one argument"}) testProg(t, "int 1; int 1; asset_holding_get ABC", v, - expect{3, "asset_holding_get unknown arg: ABC"}) + expect{3, "asset_holding_get unknown arg: \"ABC\""}) testProg(t, "byte 0x1234; asset_params_get ABC 1", v, expect{2, "asset_params_get arg 0 wanted type uint64..."}) testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects one argument") - testLine(t, "asset_params_get ABC", v, "asset_params_get unknown arg: ABC") + testLine(t, "asset_params_get ABC", v, "asset_params_get unknown arg: \"ABC\"") } } @@ -1628,8 +1652,8 @@ func TestErrShortBytecblock(t *testing.T) { var cx evalContext cx.program = ops.Program - checkIntConstBlock(&cx) - require.Equal(t, cx.err, errShortIntcblock) + err = checkIntConstBlock(&cx) + require.Equal(t, err, errShortIntcblock) } func TestBranchAssemblyTypeCheck(t *testing.T) { diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go index 2b3c1cb568..5c6ada5633 100644 --- a/data/transactions/logic/backwardCompat_test.go +++ b/data/transactions/logic/backwardCompat_test.go @@ -267,9 +267,9 @@ func TestBackwardCompatTEALv1(t *testing.T) { require.NoError(t, err) require.Equal(t, program, ops.Program) // ensure the old program is the same as a new one except TEAL version byte - ops, err = AssembleStringWithVersion(sourceTEALv1, AssemblerMaxVersion) + opsV2, err := AssembleStringWithVersion(sourceTEALv1, 2) require.NoError(t, err) - require.Equal(t, program[1:], ops.Program[1:]) + require.Equal(t, program[1:], opsV2.Program[1:]) sig := c.Sign(Msg{ ProgramHash: crypto.HashObj(Program(program)), @@ -285,14 +285,22 @@ func TestBackwardCompatTEALv1(t *testing.T) { txn.Txn.RekeyTo = basics.Address{} // RekeyTo not allowed in TEAL v1 sb := strings.Builder{} - ep := defaultEvalParams(&sb, &txn) + ep := defaultEvalParamsWithVersion(&sb, &txn, 1) ep.TxnGroup = txgroup // ensure v1 program runs well on latest TEAL evaluator require.Equal(t, uint8(1), program[0]) - cost, err := Check(program, ep) + + // Cost should stay exactly 2140 + ep.Proto.LogicSigMaxCost = 2139 + err = Check(program, ep) + require.Error(t, err) + require.Contains(t, err.Error(), "static cost") + + ep.Proto.LogicSigMaxCost = 2140 + err = Check(program, ep) require.NoError(t, err) - require.Equal(t, 2140, cost) + pass, err := Eval(program, ep) if err != nil || !pass { t.Log(hex.EncodeToString(program)) @@ -301,12 +309,19 @@ func TestBackwardCompatTEALv1(t *testing.T) { require.NoError(t, err) require.True(t, pass) - cost2, err := Check(ops.Program, ep) + // Costs for v2 should be higher because of hash opcode cost changes + ep2 := defaultEvalParamsWithVersion(&sb, &txn, 2) + ep2.TxnGroup = txgroup + ep2.Proto.LogicSigMaxCost = 2307 + err = Check(opsV2.Program, ep2) + require.Error(t, err) + require.Contains(t, err.Error(), "static cost") + + ep2.Proto.LogicSigMaxCost = 2308 + err = Check(opsV2.Program, ep2) require.NoError(t, err) - // Costs for v2 should be higher because of hash opcode cost changes - require.Equal(t, 2308, cost2) - pass, err = Eval(ops.Program, ep) + pass, err = Eval(opsV2.Program, ep2) if err != nil || !pass { t.Log(hex.EncodeToString(ops.Program)) t.Log(sb.String()) @@ -315,6 +330,8 @@ func TestBackwardCompatTEALv1(t *testing.T) { require.True(t, pass) // ensure v0 program runs well on latest TEAL evaluator + ep = defaultEvalParams(&sb, &txn) + ep.TxnGroup = txgroup program[0] = 0 sig = c.Sign(Msg{ ProgramHash: crypto.HashObj(Program(program)), @@ -322,12 +339,35 @@ func TestBackwardCompatTEALv1(t *testing.T) { }) txn.Lsig.Logic = program txn.Lsig.Args = [][]byte{data[:], sig[:], pk[:], txn.Txn.Sender[:], txn.Txn.Note} - cost, err = Check(program, ep) + + // Cost remains the same, because v0 does not get dynamic treatment + ep.Proto.LogicSigMaxCost = 2139 + err = Check(program, ep) + require.Error(t, err) + + ep.Proto.LogicSigMaxCost = 2140 + err = Check(program, ep) require.NoError(t, err) - require.Equal(t, 2140, cost) pass, err = Eval(program, ep) require.NoError(t, err) require.True(t, pass) + + // But in v4, cost is now dynamic and exactly 1 less than v2/v3, + // because bnz skips "err". It's caught during Eval + program[0] = 4 + ep.Proto.LogicSigMaxCost = 2306 + err = Check(program, ep) + require.NoError(t, err) + _, err = Eval(program, ep) + require.Error(t, err) + + ep.Proto.LogicSigMaxCost = 2307 + err = Check(program, ep) + require.NoError(t, err) + pass, err = Eval(program, ep) + require.NoError(t, err) + require.True(t, pass) + } // ensure v2 fields error on pre TEAL v2 logicsig version @@ -418,7 +458,7 @@ func TestBackwardCompatTxnFields(t *testing.T) { if _, ok := txnaFieldSpecByField[fs.field]; ok { parts := strings.Split(text, " ") op := parts[0] - asmError = fmt.Sprintf("found array field %s in %s op", field, op) + asmError = fmt.Sprintf("found array field %#v in %s op", field, op) } // check assembler fails if version before introduction testLine(t, text, assemblerNoVersion, asmError) @@ -481,15 +521,15 @@ bnz done done:` t.Run("v=default", func(t *testing.T) { - testProg(t, source, assemblerNoVersion, expect{4, "label done is too far away"}) + testProg(t, source, assemblerNoVersion, expect{4, "label \"done\" is too far away"}) }) t.Run("v=default", func(t *testing.T) { - testProg(t, source, 0, expect{4, "label done is too far away"}) + testProg(t, source, 0, expect{4, "label \"done\" is too far away"}) }) t.Run("v=default", func(t *testing.T) { - testProg(t, source, 1, expect{4, "label done is too far away"}) + testProg(t, source, 1, expect{4, "label \"done\" is too far away"}) }) for v := uint64(2); v <= AssemblerMaxVersion; v++ { diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index 22127adc4b..a64f92511a 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -52,6 +52,7 @@ var opDocByName = map[string]string{ "~": "bitwise invert value X", "mulw": "A times B out to 128-bit long result as low (top) and high uint64 values on the stack", "addw": "A plus B out to 128-bit long result as sum (top) and carry-bit uint64 values on the stack", + "divw": "Pop four uint64 values. The deepest two are interpreted as a uint128 dividend (deepest value is high word), the top two are interpreted as a uint128 divisor. Four uint64 values are pushed to the stack. The deepest two are the quotient (deeper value is the high uint64). The top two are the remainder, low bits on top.", "intcblock": "prepare block of uint64 constants for use by intc", "intc": "push Ith constant from intcblock to stack", "intc_0": "push constant 0 from intcblock to stack", @@ -111,6 +112,8 @@ var opDocByName = map[string]string{ "asset_holding_get": "read from account specified by Txn.Accounts[A] and asset B holding field X (imm arg) => {0 or 1 (top), value}", "asset_params_get": "read from asset Txn.ForeignAssets[A] params field X (imm arg) => {0 or 1 (top), value}", "assert": "immediately fail unless value X is a non-zero number", + "callsub": "branch unconditionally to TARGET, saving the next instruction on the call stack", + "retsub": "pop the top instruction from the call stack and branch to it", } // OpDoc returns a description of the op @@ -133,9 +136,9 @@ var opcodeImmediateNotes = map[string]string{ "gtxna": "{uint8 transaction group index} {uint8 transaction field index} {uint8 transaction field array index}", "gtxnsa": "{uint8 transaction field index} {uint8 transaction field array index}", "global": "{uint8 global field index}", - "bnz": "{0..0x7fff forward branch offset, big endian}", - "bz": "{0..0x7fff forward branch offset, big endian}", - "b": "{0..0x7fff forward branch offset, big endian}", + "bnz": "{int16 branch offset, big endian. (negative offsets are illegal before v4)}", + "bz": "{int16 branch offset, big endian. (negative offsets are illegal before v4)}", + "b": "{int16 branch offset, big endian. (negative offsets are illegal before v4)}", "load": "{uint8 position in scratch space to load from}", "store": "{uint8 position in scratch space to store to}", "substring": "{uint8 start position} {uint8 end position}", @@ -152,13 +155,16 @@ func OpImmediateNote(opName string) string { // further documentation on the function of the opcode var opDocExtras = map[string]string{ "ed25519verify": "The 32 byte public key is the last element on the stack, preceded by the 64 byte signature at the second-to-last element on the stack, preceded by the data which was signed at the third-to-last element on the stack.", - "bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are currently limited to forward branches only, 0-0x7fff. A future expansion might make this a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)", + "bnz": "The `bnz` instruction opcode 0x40 is followed by two immediate data bytes which are a high byte first and low byte second which together form a 16 bit offset which the instruction may branch to. For a bnz instruction at `pc`, if the last element of the stack is not zero then branch to instruction at `pc + 3 + N`, else proceed to next instruction at `pc + 3`. Branch targets must be well aligned instructions. (e.g. Branching to the second byte of a 2 byte op will be rejected.) Branch offsets are limited to forward branches only, 0-0x7fff until v4. v4 treats offset as a signed 16 bit integer allowing for backward branches and looping.\n\nAt LogicSigVersion 2 it became allowed to branch to the end of the program exactly after the last instruction: bnz to byte N (with 0-indexing) was illegal for a TEAL program with N bytes before LogicSigVersion 2, and is legal after it. This change eliminates the need for a last instruction of no-op as a branch target at the end. (Branching beyond the end--in other words, to a byte larger than N--is still illegal and will cause the program to fail.)", "bz": "See `bnz` for details on how branches work. `bz` inverts the behavior of `bnz`.", "b": "See `bnz` for details on how branches work. `b` always jumps to the offset.", + "callsub": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.`", + "retsub": "The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.`", "intcblock": "`intcblock` loads following program bytes into an array of integer constants in the evaluator. These integer constants can be referred to by `intc` and `intc_*` which will push the value onto the stack. Subsequent calls to `intcblock` reset and replace the integer constants available to the script.", "bytecblock": "`bytecblock` loads the following program bytes into an array of byte-array constants in the evaluator. These constants can be referred to by `bytec` and `bytec_*` which will push the value onto the stack. Subsequent calls to `bytecblock` reset and replace the bytes constants available to the script.", "*": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `mulw`.", "+": "Overflow is an error condition which halts execution and fails the transaction. Full precision is available from `addw`.", + "/": "`divw` is available to divide the two-element values produced by `mulw` and `addw`.", "txn": "FirstValidTime causes the program to fail. The field is reserved for future use.", "gtxn": "for notes on transaction fields available, see `txn`. If this transaction is _i_ in the group, `gtxn i field` is equivalent to `txn field`.", "gtxns": "for notes on transaction fields available, see `txn`. If top of stack is _i_, `gtxns field` is equivalent to `gtxn _i_ field`. gtxns exists so that _i_ can be calculated, often based on the index of the current transaction.", @@ -194,9 +200,9 @@ type OpGroup struct { // OpGroupList is groupings of ops for documentation purposes. var OpGroupList = []OpGroup{ - {"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "getbit", "setbit", "getbyte", "setbyte", "concat", "substring", "substring3"}}, + {"Arithmetic", []string{"sha256", "keccak256", "sha512_256", "ed25519verify", "+", "-", "/", "*", "<", ">", "<=", ">=", "&&", "||", "==", "!=", "!", "len", "itob", "btoi", "%", "|", "&", "^", "~", "mulw", "addw", "divw", "getbit", "setbit", "getbyte", "setbyte", "concat", "substring", "substring3"}}, {"Loading Values", []string{"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "txn", "gtxn", "txna", "gtxna", "gtxns", "gtxnsa", "global", "load", "store"}}, - {"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "swap", "select", "assert"}}, + {"Flow Control", []string{"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "swap", "select", "assert", "callsub", "retsub"}}, {"State Access", []string{"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get"}}, } diff --git a/data/transactions/logic/doc_test.go b/data/transactions/logic/doc_test.go index adb136188c..c0aeb45742 100644 --- a/data/transactions/logic/doc_test.go +++ b/data/transactions/logic/doc_test.go @@ -36,7 +36,7 @@ func TestOpDocs(t *testing.T) { } for op, seen := range opsSeen { if !seen { - t.Errorf("error: doc for op %#v missing", op) + t.Errorf("error: doc for op %#v missing from opDocByName", op) } } } diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index 96e1fd5d4d..91557d3bc0 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -28,7 +28,6 @@ import ( "math" "math/big" "runtime" - "sort" "strings" "golang.org/x/crypto/sha3" @@ -176,7 +175,7 @@ type EvalParams struct { } type opEvalFunc func(cx *evalContext) -type opCheckFunc func(cx *evalContext) int +type opCheckFunc func(cx *evalContext) error type runMode uint64 @@ -208,6 +207,13 @@ func (r runMode) String() string { return "Unknown" } +func (ep EvalParams) budget() int { + if ep.runModeFlags == runModeSignature { + return int(ep.Proto.LogicSigMaxCost) + } + return ep.Proto.MaxAppProgramCost +} + func (ep EvalParams) log() logging.Logger { if ep.Logger != nil { return ep.Logger @@ -218,22 +224,28 @@ func (ep EvalParams) log() logging.Logger { type evalContext struct { EvalParams - stack []stackValue - program []byte // txn.Lsig.Logic ? - pc int - nextpc int - err error - intc []uint64 - bytec [][]byte - version uint64 - scratch [256]stackValue - - stepCount int - cost int - - // Ordered set of pc values that a branch could go to. - // If Check pc skips a target, the source branch was invalid! - branchTargets []int + stack []stackValue + callstack []int + program []byte // txn.Lsig.Logic ? + pc int + nextpc int + err error + intc []uint64 + bytec [][]byte + version uint64 + scratch [256]stackValue + + cost int // cost incurred so far + + // Set of PC values that branches we've seen so far might + // go. So, if checkStep() skips one, that branch is trying to + // jump into the middle of a multibyte instruction + branchTargets map[int]bool + + // Set of PC values that we have begun a checkStep() with. So + // if a back jump is going to a value that isn't here, it's + // jumping into the middle of multibyte instruction. + instructionStarts map[int]bool programHashCached crypto.Digest txidCache map[int]transactions.Txid @@ -297,11 +309,7 @@ func EvalStateful(program []byte, params EvalParams) (pass bool, err error) { var cx evalContext cx.EvalParams = params cx.runModeFlags = runModeApplication - - // Evaluate the program - pass, err = eval(program, &cx) - - return pass, err + return eval(program, &cx) } // Eval checks to see if a transaction passes logic @@ -376,8 +384,8 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) { minVersion = *cx.EvalParams.MinTealVersion } if version < minVersion { - err = fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version) - return + cx.err = fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version) + return false, cx.err } cx.version = version @@ -400,10 +408,6 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) { } cx.step() - cx.stepCount++ - if cx.stepCount > len(cx.program) { - return false, errLoopDetected - } } if cx.err != nil { if cx.Trace != nil { @@ -427,26 +431,29 @@ func eval(program []byte, cx *evalContext) (pass bool, err error) { return cx.stack[0].Uint != 0, nil } -// CheckStateful should be faster than EvalStateful. -// Returns 'cost' which is an estimate of relative execution time. -func CheckStateful(program []byte, params EvalParams) (cost int, err error) { +// CheckStateful should be faster than EvalStateful. It can perform +// static checks and reject programs that are invalid. Prior to v4, +// these static checks include a cost estimate that must be low enough +// (controlled by params.Proto). +func CheckStateful(program []byte, params EvalParams) error { params.runModeFlags = runModeApplication return check(program, params) } -// Check should be faster than Eval. -// Returns 'cost' which is an estimate of relative execution time. -func Check(program []byte, params EvalParams) (cost int, err error) { +// Check should be faster than Eval. It can perform static checks and +// reject programs that are invalid. Prior to v4, these static checks +// include a cost estimate that must be low enough (controlled by +// params.Proto). +func Check(program []byte, params EvalParams) error { params.runModeFlags = runModeSignature return check(program, params) } -func check(program []byte, params EvalParams) (cost int, err error) { +func check(program []byte, params EvalParams) (err error) { defer func() { if x := recover(); x != nil { buf := make([]byte, 16*1024) stlen := runtime.Stack(buf, false) - cost = 0 errstr := string(buf[:stlen]) if params.Trace != nil { if sb, ok := params.Trace.(*strings.Builder); ok { @@ -458,22 +465,17 @@ func check(program []byte, params EvalParams) (cost int, err error) { } }() if (params.Proto == nil) || (params.Proto.LogicSigVersion == 0) { - err = errLogicSigNotSupported - return + return errLogicSigNotSupported } - var cx evalContext version, vlen := binary.Uvarint(program) if vlen <= 0 { - cx.err = errors.New("invalid version") - return 0, cx.err + return errors.New("invalid version") } if version > EvalMaxVersion { - err = fmt.Errorf("program version %d greater than max supported version %d", version, EvalMaxVersion) - return + return fmt.Errorf("program version %d greater than max supported version %d", version, EvalMaxVersion) } if version > params.Proto.LogicSigVersion { - err = fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion) - return + return fmt.Errorf("program version %d greater than protocol supported version %d", version, params.Proto.LogicSigVersion) } var minVersion uint64 @@ -483,31 +485,41 @@ func check(program []byte, params EvalParams) (cost int, err error) { minVersion = *params.MinTealVersion } if version < minVersion { - err = fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version) - return + return fmt.Errorf("program version must be >= %d for this transaction group, but have version %d", minVersion, version) } + var cx evalContext cx.version = version cx.pc = vlen cx.EvalParams = params cx.program = program + cx.branchTargets = make(map[int]bool) + cx.instructionStarts = make(map[int]bool) + maxCost := params.budget() + if version >= backBranchEnabledVersion { + maxCost = math.MaxInt32 + } + staticCost := 0 for cx.pc < len(cx.program) { prevpc := cx.pc - cost += cx.checkStep() - if cx.err != nil { - break + stepCost, err := cx.checkStep() + if err != nil { + return fmt.Errorf("pc=%3d %w", cx.pc, err) + } + staticCost += stepCost + if staticCost > maxCost { + return fmt.Errorf("pc=%3d static cost budget of %d exceeded", cx.pc, maxCost) } if cx.pc <= prevpc { - err = fmt.Errorf("pc did not advance, stuck at %d", cx.pc) - return + // Recall, this is advancing through opcodes + // without evaluation. It always goes forward, + // even if we're in v4 and the jump would go + // back. + return fmt.Errorf("pc did not advance, stuck at %d", cx.pc) } } - if cx.err != nil { - err = fmt.Errorf("%3d %s", cx.pc, cx.err) - return - } - return + return nil } func opCompat(expected, got StackType) bool { @@ -568,6 +580,10 @@ func (cx *evalContext) step() { return } cx.cost += deets.Cost + if cx.cost > cx.budget() { + cx.err = fmt.Errorf("pc=%3d dynamic cost budget of %d exceeded, executing %s", cx.pc, cx.budget(), spec.Name) + return + } spec.op(cx) if cx.Trace != nil { // This code used to do a little disassembly on its @@ -631,25 +647,26 @@ func (cx *evalContext) step() { } } -func (cx *evalContext) checkStep() (cost int) { +func (cx *evalContext) checkStep() (int, error) { + cx.instructionStarts[cx.pc] = true opcode := cx.program[cx.pc] spec := &opsByOpcode[cx.version][opcode] if spec.op == nil { - cx.err = fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode) - return 1 + return 0, fmt.Errorf("%3d illegal opcode 0x%02x", cx.pc, opcode) } if (cx.runModeFlags & spec.Modes) == 0 { - cx.err = fmt.Errorf("%s not allowed in current mode", spec.Name) - return + return 0, fmt.Errorf("%s not allowed in current mode", spec.Name) } deets := spec.Details if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) { - cx.err = fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name) - return 1 + return 0, fmt.Errorf("%3d %s program ends short of immediate values", cx.pc, spec.Name) } prevpc := cx.pc if deets.checkFunc != nil { - cost = deets.checkFunc(cx) + err := deets.checkFunc(cx) + if err != nil { + return 0, err + } if cx.nextpc != 0 { cx.pc = cx.nextpc cx.nextpc = 0 @@ -657,26 +674,19 @@ func (cx *evalContext) checkStep() (cost int) { cx.pc += deets.Size } } else { - cost = deets.Cost cx.pc += deets.Size } if cx.Trace != nil { fmt.Fprintf(cx.Trace, "%3d %s\n", prevpc, spec.Name) } - if cx.err != nil { - return 1 - } - if len(cx.branchTargets) > 0 { - if cx.branchTargets[0] < cx.pc { - cx.err = fmt.Errorf("branch target at %d not an aligned instruction", cx.branchTargets[0]) - return 1 - } - for len(cx.branchTargets) > 0 && cx.branchTargets[0] == cx.pc { - // checks okay - cx.branchTargets = cx.branchTargets[1:] + if cx.err == nil { + for pc := prevpc + 1; pc < cx.pc; pc++ { + if _, ok := cx.branchTargets[pc]; ok { + return 0, fmt.Errorf("branch target %d is not an aligned instruction", pc) + } } } - return + return deets.Cost, nil } func opErr(cx *evalContext) { @@ -774,6 +784,41 @@ func opAddw(cx *evalContext) { cx.stack[last].Uint = sum } +func uint128(hi uint64, lo uint64) *big.Int { + whole := new(big.Int).SetUint64(hi) + whole.Lsh(whole, 64) + whole.Add(whole, new(big.Int).SetUint64(lo)) + return whole +} + +func opDivwImpl(hiNum, loNum, hiDen, loDen uint64) (hiQuo uint64, loQuo uint64, hiRem uint64, loRem uint64) { + dividend := uint128(hiNum, loNum) + divisor := uint128(hiDen, loDen) + + quo, rem := new(big.Int).QuoRem(dividend, divisor, new(big.Int)) + return new(big.Int).Rsh(quo, 64).Uint64(), + quo.Uint64(), + new(big.Int).Rsh(rem, 64).Uint64(), + rem.Uint64() +} + +func opDivw(cx *evalContext) { + loDen := len(cx.stack) - 1 + hiDen := loDen - 1 + if cx.stack[loDen].Uint == 0 && cx.stack[hiDen].Uint == 0 { + cx.err = errors.New("/ 0") + return + } + loNum := loDen - 2 + hiNum := loDen - 3 + hiQuo, loQuo, hiRem, loRem := + opDivwImpl(cx.stack[hiNum].Uint, cx.stack[loNum].Uint, cx.stack[hiDen].Uint, cx.stack[loDen].Uint) + cx.stack[hiNum].Uint = hiQuo + cx.stack[loNum].Uint = loQuo + cx.stack[hiDen].Uint = hiRem + cx.stack[loDen].Uint = loRem +} + func opMinus(cx *evalContext) { last := len(cx.stack) - 1 prev := last - 1 @@ -1151,29 +1196,41 @@ func opArg3(cx *evalContext) { opArgN(cx, 3) } -// checks any branch that is {op} {int16 be offset} -func checkBranch(cx *evalContext) int { - offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2]) - if offset > 0x7fff { - cx.err = fmt.Errorf("branch offset %x too large", offset) - return 1 +func branchTarget(cx *evalContext) (int, error) { + offset := int16(uint16(cx.program[cx.pc+1])<<8 | uint16(cx.program[cx.pc+2])) + if offset < 0 && cx.version < backBranchEnabledVersion { + return 0, fmt.Errorf("negative branch offset %x", offset) } - cx.nextpc = cx.pc + 3 - target := cx.nextpc + int(offset) + target := cx.pc + 3 + int(offset) var branchTooFar bool if cx.version >= 2 { // branching to exactly the end of the program (target == len(cx.program)), the next pc after the last instruction, is okay and ends normally - branchTooFar = target > len(cx.program) + branchTooFar = target > len(cx.program) || target < 0 } else { - branchTooFar = target >= len(cx.program) + branchTooFar = target >= len(cx.program) || target < 0 } if branchTooFar { - cx.err = errors.New("branch target beyond end of program") - return 1 + return 0, errors.New("branch target beyond end of program") } - cx.branchTargets = append(cx.branchTargets, target) - sort.Ints(cx.branchTargets) - return 1 + + return target, nil +} + +// checks any branch that is {op} {int16 be offset} +func checkBranch(cx *evalContext) error { + cx.nextpc = cx.pc + 3 + target, err := branchTarget(cx) + if err != nil { + return err + } + if target < cx.nextpc { + // If a branch goes backwards, we should have already noted that an instruction began at that location. + if _, ok := cx.instructionStarts[target]; !ok { + return fmt.Errorf("back branch target %d is not an aligned instruction", target) + } + } + cx.branchTargets[target] = true + return nil } func opBnz(cx *evalContext) { last := len(cx.stack) - 1 @@ -1181,12 +1238,12 @@ func opBnz(cx *evalContext) { isNonZero := cx.stack[last].Uint != 0 cx.stack = cx.stack[:last] // pop if isNonZero { - offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2]) - if offset > 0x7fff { - cx.err = fmt.Errorf("bnz offset %x too large", offset) + target, err := branchTarget(cx) + if err != nil { + cx.err = err return } - cx.nextpc += int(offset) + cx.nextpc = target } } @@ -1196,22 +1253,38 @@ func opBz(cx *evalContext) { isZero := cx.stack[last].Uint == 0 cx.stack = cx.stack[:last] // pop if isZero { - offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2]) - if offset > 0x7fff { - cx.err = fmt.Errorf("bz offset %x too large", offset) + target, err := branchTarget(cx) + if err != nil { + cx.err = err return } - cx.nextpc += int(offset) + cx.nextpc = target } } func opB(cx *evalContext) { - offset := (uint(cx.program[cx.pc+1]) << 8) | uint(cx.program[cx.pc+2]) - if offset > 0x7fff { - cx.err = fmt.Errorf("b offset %x too large", offset) + target, err := branchTarget(cx) + if err != nil { + cx.err = err + return + } + cx.nextpc = target +} + +func opCallSub(cx *evalContext) { + cx.callstack = append(cx.callstack, cx.pc+3) + opB(cx) +} + +func opRetSub(cx *evalContext) { + top := len(cx.callstack) - 1 + if top < 0 { + cx.err = errors.New("retsub with empty callstack") return } - cx.nextpc = cx.pc + 3 + int(offset) + target := cx.callstack[top] + cx.callstack = cx.callstack[:top] + cx.nextpc = target } func opPop(cx *evalContext) { diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go index d5f262b3ac..98c791a1e6 100644 --- a/data/transactions/logic/evalStateful_test.go +++ b/data/transactions/logic/evalStateful_test.go @@ -542,13 +542,13 @@ pop type desc struct { source string eval func([]byte, EvalParams) (bool, error) - check func([]byte, EvalParams) (int, error) + check func([]byte, EvalParams) error } tests := map[runMode]desc{ runModeSignature: { source: opcodesRunModeAny + opcodesRunModeSignature, eval: func(program []byte, ep EvalParams) (bool, error) { return Eval(program, ep) }, - check: func(program []byte, ep EvalParams) (int, error) { return Check(program, ep) }, + check: func(program []byte, ep EvalParams) error { return Check(program, ep) }, }, runModeApplication: { source: opcodesRunModeAny + opcodesRunModeApplication, @@ -556,7 +556,7 @@ pop pass, err := EvalStateful(program, ep) return pass, err }, - check: func(program []byte, ep EvalParams) (int, error) { return CheckStateful(program, ep) }, + check: func(program []byte, ep EvalParams) error { return CheckStateful(program, ep) }, }, } @@ -600,7 +600,7 @@ pop ep.TxnGroup = txgroup ep.Ledger = ledger ep.Txn.Txn.ApplicationID = 100 - _, err := test.check(ops.Program, ep) + err := test.check(ops.Program, ep) require.NoError(t, err) _, err = test.eval(ops.Program, ep) if err != nil { @@ -618,7 +618,7 @@ pop ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) ep := defaultEvalParams(nil, nil) - _, err = test.check(ops.Program, ep) + err = test.check(ops.Program, ep) require.NoError(t, err) _, err = test.eval(ops.Program, ep) require.Error(t, err) @@ -640,7 +640,7 @@ pop ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) ep := defaultEvalParams(nil, nil) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.Error(t, err) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) @@ -666,7 +666,7 @@ pop for _, source := range statefulOpcodeCalls { ops := testProg(t, source, AssemblerMaxVersion) ep := defaultEvalParams(nil, nil) - _, err := Check(ops.Program, ep) + err := Check(ops.Program, ep) require.Error(t, err) _, err = Eval(ops.Program, ep) require.Error(t, err) @@ -713,9 +713,8 @@ int 177 ==` ops, err = AssembleStringWithVersion(text, AssemblerMaxVersion) require.NoError(t, err) - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -743,9 +742,8 @@ int 13 txn.Txn.Sender: 13, }, ) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -755,9 +753,8 @@ func testApp(t *testing.T, program string, ep EvalParams, problems ...string) ba ops := testProg(t, program, AssemblerMaxVersion) sb := &strings.Builder{} ep.Trace = sb - cost, err := CheckStateful(ops.Program, ep) + err := CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) // we only use this to test stateful apps. While, I suppose // it's *legal* to have an app with no stateful ops, this @@ -1025,9 +1022,8 @@ byte 0x414c474f ep := defaultEvalParams(nil, nil) ep.Txn = &txn ep.TxnGroup = txgroup - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), "ledger not available") @@ -1054,9 +1050,8 @@ byte 0x414c474f ledger.applications[100].GlobalState[string(protocol.PaymentTx)] = basics.TealValue{Type: basics.TealBytesType, Bytes: "ALGO"} - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -1242,9 +1237,8 @@ func TestAssets(t *testing.T) { txn := makeSampleTxn() ep := defaultEvalParams(nil, nil) ep.Txn = &txn - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), "ledger not available") @@ -1263,9 +1257,8 @@ func TestAssets(t *testing.T) { ops, err := AssembleStringWithVersion(assetsTestProgram, AssemblerMaxVersion) require.NoError(t, err) - cost, err := CheckStateful(ops.Program, defaultEvalParams(nil, nil)) + err = CheckStateful(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) txn := makeSampleTxn() sb := strings.Builder{} @@ -1291,9 +1284,8 @@ func TestAssets(t *testing.T) { ep := defaultEvalParams(&sb, &txn) ep.Ledger = ledger - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := EvalStateful(ops.Program, ep) if !pass { t.Log(hex.EncodeToString(ops.Program)) @@ -1319,9 +1311,8 @@ int 1 ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) ledger.setHolding(txn.Txn.Sender, 55, 123, false) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -1419,9 +1410,8 @@ int 1 require.NoError(t, err) params.URL = "" ledger.newAsset(txn.Txn.Sender, 55, params) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), "cannot compare ([]byte == uint64)") @@ -1487,9 +1477,8 @@ int 100 ep := defaultEvalParams(nil, nil) ep.Txn = &txn ep.Txn.Txn.ApplicationID = 100 - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), "ledger not available") @@ -1586,9 +1575,8 @@ int 0x77 ` ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -1630,9 +1618,8 @@ int 0x77 ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -1793,9 +1780,8 @@ int 1 ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -1974,9 +1960,8 @@ int 0x77 ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -2091,9 +2076,8 @@ byte 0x414c474f require.NoError(t, err) sb := strings.Builder{} ep.Trace = &sb - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) if !pass { t.Log(hex.EncodeToString(ops.Program)) @@ -2372,9 +2356,8 @@ int 1 ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err := CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := EvalStateful(ops.Program, ep) if !pass { t.Log(hex.EncodeToString(ops.Program)) @@ -2409,9 +2392,8 @@ app_local_get_ex ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -2446,9 +2428,8 @@ app_local_put ` ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -2479,9 +2460,8 @@ int 1 ` ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -2515,9 +2495,8 @@ int 1 ` ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -2551,9 +2530,8 @@ int 1 ` ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - cost, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = EvalStateful(ops.Program, ep) require.NoError(t, err) require.True(t, pass) @@ -2793,7 +2771,7 @@ int 1 require.NoError(t, err) ep := defaultEvalParams(nil, nil) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) @@ -2822,7 +2800,7 @@ int 1 require.NoError(t, err) ep := defaultEvalParams(nil, nil) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) @@ -2852,7 +2830,7 @@ int 42 require.NoError(t, err) ep := defaultEvalParams(nil, nil) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) _, err = EvalStateful(ops.Program, ep) require.Error(t, err) diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index def21c908b..74dcc251fc 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -51,6 +51,7 @@ func defaultEvalProtoWithVersion(version uint64) config.ConsensusParams { return config.ConsensusParams{ LogicSigVersion: version, LogicSigMaxCost: 20000, + MaxAppProgramCost: 700, MaxAppKeyLen: 64, MaxAppBytesValueLen: 64, // These must be identical to keep an old backward compat test working @@ -128,7 +129,7 @@ func TestMinTealVersionParamEvalCheck(t *testing.T) { // set the teal program version to 1 binary.PutUvarint(program, 1) - _, err := Check(program, params) + err := Check(program, params) require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", appsEnabledVersion)) // If the param is read correctly, the eval should fail @@ -197,7 +198,7 @@ func TestWrongProtoVersion(t *testing.T) { proto.LogicSigVersion = 0 ep := defaultEvalParams(&sb, &txn) ep.Proto = &proto - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), "LogicSig not supported") pass, err := Eval(ops.Program, ep) @@ -232,9 +233,8 @@ byte base64 5rZMNsevs5sULO+54aN+OvU6lQ503z2X+SSYUABIx7E= txn.Lsig.Args = [][]byte{[]byte("=0\x97S\x85H\xe9\x91B\xfd\xdb;1\xf5Z\xaec?\xae\xf2I\x93\x08\x12\x94\xaa~\x06\x08\x849b")} sb := strings.Builder{} ep := defaultEvalParams(&sb, &txn) - cost, err := Check(ops.Program, ep) + err = Check(ops.Program, ep) require.NoError(t, err) - require.True(t, cost < 1000) pass, err := Eval(ops.Program, ep) require.True(t, pass) require.NoError(t, err) @@ -294,13 +294,12 @@ func TestTLHC(t *testing.T) { sb := strings.Builder{} block := bookkeeping.Block{} ep := defaultEvalParams(&sb, &txn) - cost, err := Check(ops.Program, ep) + err = Check(ops.Program, ep) if err != nil { t.Log(hex.EncodeToString(ops.Program)) t.Log(sb.String()) } require.NoError(t, err) - require.True(t, cost < 1000) pass, err := Eval(ops.Program, ep) if pass { t.Log(hex.EncodeToString(ops.Program)) @@ -536,34 +535,65 @@ int 1 // ret 1 `, 2) } +func TestUint128(t *testing.T) { + x := uint128(0, 3) + require.Equal(t, x.String(), "3") + x = uint128(0, 0) + require.Equal(t, x.String(), "0") + x = uint128(1, 3) + require.Equal(t, x.String(), "18446744073709551619") + x = uint128(1, 5) + require.Equal(t, x.String(), "18446744073709551621") + x = uint128(^uint64(0), ^uint64(0)) // maximum uint128 = 2^64-1 + require.Equal(t, x.String(), "340282366920938463463374607431768211455") +} + +func TestDivw(t *testing.T) { + t.Parallel() + // 2:0 / 1:0 == 2r0 == 0:2,0:0 + testAccepts(t, `int 2; int 0; int 1; int 0; divw; + int 0; ==; assert; + int 0; ==; assert; + int 2; ==; assert; + int 0; ==; assert; int 1`, 4) + + // 2:0 / 0:1 == 2:0r0 == 2:0,0:0 + testAccepts(t, `int 2; int 0; int 0; int 1; divw; + int 0; ==; assert; + int 0; ==; assert; + int 0; ==; assert; + int 2; ==; assert; int 1`, 4) + + // 0:0 / 0:7 == 0r0 + testAccepts(t, `int 0; int 0; int 0; int 7; divw; + int 0; ==; assert; + int 0; ==; assert; + int 0; ==; assert; + int 0; ==; assert; int 1`, 4) + + // maxu64:maxu64 / maxu64:maxu64 == 1r0 + testAccepts(t, `int 18446744073709551615; int 18446744073709551615; int 18446744073709551615; int 18446744073709551615; + divw; + int 0; ==; assert; + int 0; ==; assert; + int 1; ==; assert; + int 0; ==; assert; int 1`, 4) + + // 0:7777 / 1:0 == 0:0r7777 == 0:0,0:7777 + testAccepts(t, `int 0; int 7777; int 1; int 0; divw; + int 7777; ==; assert; + int 0; ==; assert; + int 0; ==; assert; + int 0; ==; assert; int 1`, 4) + + // 10:0 / 0:0 ==> panic + testPanics(t, `int 10; int 0; int 0; int 0; divw; + pop; pop; pop; pop; int 1`, 4) +} + func TestDivZero(t *testing.T) { t.Parallel() - for v := uint64(1); v <= AssemblerMaxVersion; v++ { - t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { - ops, err := AssembleStringWithVersion(`int 0x111111111 -int 0 -/ -pop -int 1`, v) - require.NoError(t, err) - sb := strings.Builder{} - cost, err := Check(ops.Program, defaultEvalParams(&sb, nil)) - if err != nil { - t.Log(hex.EncodeToString(ops.Program)) - t.Log(sb.String()) - } - require.NoError(t, err) - require.True(t, cost < 1000) - pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) - if pass { - t.Log(hex.EncodeToString(ops.Program)) - t.Log(sb.String()) - } - require.False(t, pass) - require.Error(t, err) - isNotPanic(t, err) - }) - } + testPanics(t, "int 0x11; int 0; /; pop; int 1", 1) } func TestModZero(t *testing.T) { @@ -576,9 +606,8 @@ int 0 pop int 1`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -599,9 +628,8 @@ func TestErr(t *testing.T) { ops, err := AssembleStringWithVersion(`err int 1`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -629,9 +657,8 @@ int 2 int 4 ==`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -653,9 +680,8 @@ int 0 pop`, v) require.NoError(t, err) sb := strings.Builder{} - cost, err := Check(ops.Program, defaultEvalParams(&sb, nil)) + err = Check(ops.Program, defaultEvalParams(&sb, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb = strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -676,9 +702,8 @@ func TestStackLeftover(t *testing.T) { int 1`, v) require.NoError(t, err) sb := strings.Builder{} - cost, err := Check(ops.Program, defaultEvalParams(&sb, nil)) + err = Check(ops.Program, defaultEvalParams(&sb, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb = strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -699,9 +724,8 @@ func TestStackBytesLeftover(t *testing.T) { ops, err := AssembleStringWithVersion(`byte 0x10101010`, v) require.NoError(t, err) sb := strings.Builder{} - cost, err := Check(ops.Program, defaultEvalParams(&sb, nil)) + err = Check(ops.Program, defaultEvalParams(&sb, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb = strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -724,9 +748,8 @@ int 1 pop pop`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -747,9 +770,8 @@ func TestArgTooFar(t *testing.T) { ops, err := AssembleStringWithVersion(`arg_1 btoi`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} var txn transactions.SignedTxn txn.Lsig.Logic = ops.Program @@ -772,9 +794,8 @@ func TestIntcTooFar(t *testing.T) { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops, err := AssembleStringWithVersion(`intc_1`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} var txn transactions.SignedTxn txn.Lsig.Logic = ops.Program @@ -798,9 +819,8 @@ func TestBytecTooFar(t *testing.T) { ops, err := AssembleStringWithVersion(`bytec_1 btoi`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} var txn transactions.SignedTxn txn.Lsig.Logic = ops.Program @@ -820,9 +840,8 @@ btoi`, v) func TestTxnBadField(t *testing.T) { t.Parallel() program := []byte{0x01, 0x31, 0x7f} - cost, err := Check(program, defaultEvalParams(nil, nil)) + err := Check(program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} var txn transactions.SignedTxn txn.Lsig.Logic = program @@ -857,9 +876,8 @@ func TestTxnBadField(t *testing.T) { func TestGtxnBadIndex(t *testing.T) { t.Parallel() program := []byte{0x01, 0x33, 0x1, 0x01} - cost, err := Check(program, defaultEvalParams(nil, nil)) + err := Check(program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} var txn transactions.SignedTxn txn.Lsig.Logic = program @@ -881,9 +899,8 @@ func TestGtxnBadIndex(t *testing.T) { func TestGtxnBadField(t *testing.T) { t.Parallel() program := []byte{0x01, 0x33, 0x0, 0x7f} - cost, err := Check(program, defaultEvalParams(nil, nil)) + err := Check(program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} var txn transactions.SignedTxn txn.Lsig.Logic = program @@ -922,8 +939,7 @@ func TestGtxnBadField(t *testing.T) { func TestGlobalBadField(t *testing.T) { t.Parallel() program := []byte{0x01, 0x32, 0x7f} - cost, err := Check(program, defaultEvalParams(nil, nil)) - require.True(t, cost < 1000) + err := Check(program, defaultEvalParams(nil, nil)) require.NoError(t, err) // Check does not validates opcode args sb := strings.Builder{} var txn transactions.SignedTxn @@ -956,9 +972,8 @@ int 9 < &&`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) var txn transactions.SignedTxn txn.Lsig.Logic = ops.Program txn.Lsig.Args = [][]byte{ @@ -1029,13 +1044,17 @@ addr ` + testAddr + ` && ` +const globalV4TestProgram = globalV3TestProgram + ` +// No new globals in v4 +` + func TestGlobal(t *testing.T) { t.Parallel() type desc struct { lastField GlobalField program string eval func([]byte, EvalParams) (bool, error) - check func([]byte, EvalParams) (int, error) + check func([]byte, EvalParams) error } tests := map[uint64]desc{ 0: {GroupSize, globalV1TestProgram, Eval, Check}, @@ -1048,6 +1067,10 @@ func TestGlobal(t *testing.T) { CreatorAddress, globalV3TestProgram, EvalStateful, CheckStateful, }, + 4: { + CreatorAddress, globalV4TestProgram, + EvalStateful, CheckStateful, + }, } ledger := makeTestLedger(nil) ledger.appID = 42 @@ -1066,9 +1089,8 @@ func TestGlobal(t *testing.T) { } } ops := testProg(t, testProgram, v) - cost, err := check(ops.Program, defaultEvalParams(nil, nil)) + err := check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) var txn transactions.SignedTxn txn.Lsig.Logic = ops.Program txgroup := make([]transactions.SignedTxn, 1) @@ -1078,11 +1100,12 @@ func TestGlobal(t *testing.T) { block.BlockHeader.Round = 999999 block.BlockHeader.TimeStamp = 2069 proto := config.ConsensusParams{ - MinTxnFee: 123, - MinBalance: 1000000, - MaxTxnLife: 999, - LogicSigVersion: LogicVersion, - LogicSigMaxCost: 20000, + MinTxnFee: 123, + MinBalance: 1000000, + MaxTxnLife: 999, + LogicSigVersion: LogicVersion, + LogicSigMaxCost: 20000, + MaxAppProgramCost: 700, } ep := defaultEvalParams(&sb, &txn) ep.TxnGroup = txgroup @@ -1134,9 +1157,8 @@ int %s &&`, symbol, string(tt)) ops, err := AssembleStringWithVersion(text, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) var txn transactions.SignedTxn txn.Txn.Type = tt sb := strings.Builder{} @@ -1540,9 +1562,8 @@ func TestTxn(t *testing.T) { for v, source := range tests { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops := testProg(t, source, v) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) txn := makeSampleTxn() txn.Txn.ApprovalProgram = ops.Program txn.Txn.ClearStateProgram = clearOps.Program @@ -1625,13 +1646,12 @@ return ops, err := AssembleStringWithVersion(cachedTxnProg, 2) require.NoError(t, err) sb := strings.Builder{} - cost, err := Check(ops.Program, defaultEvalParams(&sb, nil)) + err = Check(ops.Program, defaultEvalParams(&sb, nil)) if err != nil { t.Log(hex.EncodeToString(ops.Program)) t.Log(sb.String()) } require.NoError(t, err) - require.True(t, cost < 1000) txn := makeSampleTxn() txgroup := makeSampleTxnGroup(txn) txn.Lsig.Logic = ops.Program @@ -1767,13 +1787,12 @@ func testLogic(t *testing.T, program string, v uint64, ep EvalParams, problems . sb := &strings.Builder{} ep.Trace = sb ep.Txn.Lsig.Logic = ops.Program - cost, err := Check(ops.Program, ep) + err := Check(ops.Program, ep) if err != nil { t.Log(hex.EncodeToString(ops.Program)) t.Log(sb.String()) } require.NoError(t, err) - require.True(t, cost < 1000) pass, err := Eval(ops.Program, ep) if len(problems) == 0 { @@ -1980,9 +1999,8 @@ int 0x300 int 0x310 ==`, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -2024,9 +2042,8 @@ len int 0 == &&`, 2) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -2077,9 +2094,8 @@ concat dup concat len`, 2) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2104,9 +2120,8 @@ int 4 int 2 substring3 len`, 2) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2123,9 +2138,8 @@ int 4 int 0xFFFFFFFFFFFFFFFE substring3 len`, 2) - cost, err = Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) pass, err = Eval(ops.Program, defaultEvalParams(nil, nil)) require.False(t, pass) require.Error(t, err) @@ -2137,9 +2151,8 @@ func TestSubstringRange(t *testing.T) { ops := testProg(t, `byte 0xf000000000000000 substring 2 99 len`, 2) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2170,9 +2183,8 @@ load 0 load 1 + &&`, v) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -2201,9 +2213,8 @@ int 5 for v := uint64(1); v <= AssemblerMaxVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops := testProg(t, progText, v) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -2282,9 +2293,8 @@ func TestCompares(t *testing.T) { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops, err := AssembleStringWithVersion(testCompareProgramText, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(nil, nil)) if !pass { @@ -2313,9 +2323,8 @@ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567 t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops, err := AssembleStringWithVersion(progText, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(nil, nil)) if !pass { @@ -2348,9 +2357,8 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { ops, err := AssembleStringWithVersion(progText, v) require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if !pass { @@ -2363,6 +2371,53 @@ byte 0x98D2C31612EA500279B6753E5F6E780CA63EBA8274049664DAD66A2565ED1D2A } } +func TestSlowLogic(t *testing.T) { + t.Parallel() + fragment := `byte 0x666E6F7264; keccak256 + byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567; ==;` + + // Sanity check. Running a short sequence of these fragments passes in all versions. + source := fragment + strings.Repeat(fragment+"&&;", 5) + testAccepts(t, source, 1) + + // in v1, each repeat costs 30 + v1overspend := fragment + strings.Repeat(fragment+"&&;", 20000/30) + // in v2,v3 each repeat costs 134 + v2overspend := fragment + strings.Repeat(fragment+"&&;", 20000/134) + + // v1overspend fails (on v1) + ops := testProg(t, v1overspend, 1) + err := Check(ops.Program, defaultEvalParamsWithVersion(nil, nil, 1)) + require.Error(t, err) + require.Contains(t, err.Error(), "static cost") + // v2overspend passes Check, even on v2 proto, because cost is "grandfathered" + ops = testProg(t, v2overspend, 1) + err = Check(ops.Program, defaultEvalParamsWithVersion(nil, nil, 2)) + require.NoError(t, err) + + // even the shorter, v2overspend, fails when compiled as v2 code + ops = testProg(t, v2overspend, 2) + err = Check(ops.Program, defaultEvalParamsWithVersion(nil, nil, 2)) + require.Error(t, err) + require.Contains(t, err.Error(), "static cost") + + // in v4 cost is still 134, but only matters in Eval, not Check, so both fail there + ep4 := defaultEvalParamsWithVersion(nil, nil, 4) + ops = testProg(t, v1overspend, 4) + err = Check(ops.Program, ep4) + require.NoError(t, err) + _, err = Eval(ops.Program, ep4) + require.Error(t, err) + require.Contains(t, err.Error(), "dynamic cost") + + ops = testProg(t, v2overspend, 4) + err = Check(ops.Program, ep4) + require.NoError(t, err) + _, err = Eval(ops.Program, ep4) + require.Error(t, err) + require.Contains(t, err.Error(), "dynamic cost") +} + func isNotPanic(t *testing.T, err error) { if err == nil { return @@ -2379,9 +2434,8 @@ func TestStackUnderflow(t *testing.T) { ops, err := AssembleStringWithVersion(`int 1`, v) ops.Program = append(ops.Program, 0x08) // + require.NoError(t, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2401,9 +2455,8 @@ func TestWrongStackTypeRuntime(t *testing.T) { ops, err := AssembleStringWithVersion(`int 1`, v) require.NoError(t, err) ops.Program = append(ops.Program, 0x01, 0x15) // sha256, len - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2424,9 +2477,8 @@ func TestEqMismatch(t *testing.T) { int 1`, v) require.NoError(t, err) ops.Program = append(ops.Program, 0x12) // == - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2447,9 +2499,8 @@ func TestNeqMismatch(t *testing.T) { int 1`, v) require.NoError(t, err) ops.Program = append(ops.Program, 0x13) // != - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) // TODO: Check should know the type stack was wrong - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2470,9 +2521,8 @@ func TestWrongStackTypeRuntime2(t *testing.T) { int 1`, v) require.NoError(t, err) ops.Program = append(ops.Program, 0x08) // + - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, _ := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2497,9 +2547,8 @@ func TestIllegalOp(t *testing.T) { break } } - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2524,9 +2573,8 @@ int 1 require.NoError(t, err) // cut two last bytes - intc_1 and last byte of bnz ops.Program = ops.Program[:len(ops.Program)-2] - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) if pass { @@ -2546,9 +2594,8 @@ intc 0 intc 0 bnz done done:`, 2) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err := Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(ops.Program, defaultEvalParams(&sb, nil)) require.NoError(t, err) @@ -2564,10 +2611,9 @@ func TestShortBytecblock(t *testing.T) { for i := 2; i < len(fullops.Program); i++ { program := fullops.Program[:i] t.Run(hex.EncodeToString(program), func(t *testing.T) { - cost, err := Check(program, defaultEvalParams(nil, nil)) + err := Check(program, defaultEvalParams(nil, nil)) require.Error(t, err) isNotPanic(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(program, defaultEvalParams(&sb, nil)) if pass { @@ -2594,10 +2640,9 @@ func TestShortBytecblock2(t *testing.T) { t.Run(src, func(t *testing.T) { program, err := hex.DecodeString(src) require.NoError(t, err) - cost, err := Check(program, defaultEvalParams(nil, nil)) + err = Check(program, defaultEvalParams(nil, nil)) require.Error(t, err) isNotPanic(t, err) - require.True(t, cost < 1000) sb := strings.Builder{} pass, err := Eval(program, defaultEvalParams(&sb, nil)) if pass { @@ -2615,7 +2660,7 @@ const panicString = "out of memory, buffer overrun, stack overflow, divide by ze func opPanic(cx *evalContext) { panic(panicString) } -func checkPanic(cx *evalContext) int { +func checkPanic(cx *evalContext) error { panic(panicString) } @@ -2641,7 +2686,7 @@ func TestPanic(t *testing.T) { sb := strings.Builder{} params := defaultEvalParams(&sb, nil) params.Logger = log - _, err = Check(ops.Program, params) + err = Check(ops.Program, params) require.Error(t, err) if pe, ok := err.(PanicError); ok { require.Equal(t, panicString, pe.PanicValue) @@ -2677,7 +2722,7 @@ func TestProgramTooNew(t *testing.T) { t.Parallel() var program [12]byte vlen := binary.PutUvarint(program[:], EvalMaxVersion+1) - _, err := Check(program[:vlen], defaultEvalParams(nil, nil)) + err := Check(program[:vlen], defaultEvalParams(nil, nil)) require.Error(t, err) isNotPanic(t, err) pass, err := Eval(program[:vlen], defaultEvalParams(nil, nil)) @@ -2690,7 +2735,7 @@ func TestInvalidVersion(t *testing.T) { t.Parallel() program, err := hex.DecodeString("ffffffffffffffffffffffff") require.NoError(t, err) - _, err = Check(program, defaultEvalParams(nil, nil)) + err = Check(program, defaultEvalParams(nil, nil)) require.Error(t, err) isNotPanic(t, err) pass, err := Eval(program, defaultEvalParams(nil, nil)) @@ -2708,7 +2753,7 @@ func TestProgramProtoForbidden(t *testing.T) { } ep := EvalParams{} ep.Proto = &proto - _, err := Check(program[:vlen], ep) + err := Check(program[:vlen], ep) require.Error(t, err) ep.Txn = &transactions.SignedTxn{} pass, err := Eval(program[:vlen], ep) @@ -2733,13 +2778,29 @@ int 1`, v) require.NoError(t, err) require.Equal(t, ops.Program, canonicalProgramBytes) ops.Program[7] = 3 // clobber the branch offset to be in the middle of the bytecblock - _, err = Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) - require.True(t, strings.Contains(err.Error(), "aligned")) + require.Contains(t, err.Error(), "aligned") pass, err := Eval(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) require.False(t, pass) isNotPanic(t, err) + + // back branches are checked differently, so test misaligned back branch + ops.Program[6] = 0xff // Clobber the two bytes of offset with 0xff 0xff = -1 + ops.Program[7] = 0xff // That jumps into the offset itself (pc + 3 -1) + err = Check(ops.Program, defaultEvalParams(nil, nil)) + require.Error(t, err) + if v < backBranchEnabledVersion { + require.Contains(t, err.Error(), "negative branch") + } else { + require.Contains(t, err.Error(), "back branch") + require.Contains(t, err.Error(), "aligned") + } + pass, err = Eval(ops.Program, defaultEvalParams(nil, nil)) + require.Error(t, err) + require.False(t, pass) + isNotPanic(t, err) }) } } @@ -2760,7 +2821,7 @@ int 1`, v) require.NoError(t, err) require.Equal(t, ops.Program, canonicalProgramBytes) ops.Program[7] = 200 // clobber the branch offset to be beyond the end of the program - _, err = Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) require.True(t, strings.Contains(err.Error(), "beyond end of program")) pass, err := Eval(ops.Program, defaultEvalParams(nil, nil)) @@ -2782,17 +2843,18 @@ done: int 1`, v) require.NoError(t, err) //t.Log(hex.EncodeToString(ops.Program)) + // (br)anch byte, (hi)gh byte of offset, (lo)w byte: brhilo canonicalProgramString := mutateProgVersion(v, "01200101224000112603040123457604ababcdcd04f000baad22") canonicalProgramBytes, err := hex.DecodeString(canonicalProgramString) require.NoError(t, err) require.Equal(t, ops.Program, canonicalProgramBytes) - ops.Program[6] = 0xff // clobber the branch offset - _, err = Check(ops.Program, defaultEvalParams(nil, nil)) + ops.Program[6] = 0x70 // clobber hi byte of branch offset + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) - require.Contains(t, err.Error(), "too large") + require.Contains(t, err.Error(), "beyond") pass, err := Eval(ops.Program, defaultEvalParams(nil, nil)) require.Error(t, err) - require.Contains(t, err.Error(), "too large") + require.Contains(t, err.Error(), "beyond") require.False(t, pass) isNotPanic(t, err) }) @@ -2812,14 +2874,14 @@ int 1 source := fmt.Sprintf(template, line) ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(t, err) - ops.Program[7] = 0xff // clobber the branch offset + ops.Program[7] = 0xf0 // clobber the branch offset - highly negative ops.Program[8] = 0xff // clobber the branch offset - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.Error(t, err) - require.Contains(t, err.Error(), "too large") + require.Contains(t, err.Error(), "beyond") pass, err := Eval(ops.Program, ep) require.Error(t, err) - require.Contains(t, err.Error(), "too large") + require.Contains(t, err.Error(), "beyond") require.False(t, pass) }) } @@ -3104,9 +3166,8 @@ int 142791994204213819 func benchmarkBasicProgram(b *testing.B, source string) { ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(b, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) + err = Check(ops.Program, defaultEvalParams(nil, nil)) require.NoError(b, err) - require.True(b, cost < 2000) //b.Logf("%d bytes of program", len(ops.Program)) //b.Log(hex.EncodeToString(ops.Program)) b.ResetTimer() @@ -3129,9 +3190,9 @@ func benchmarkBasicProgram(b *testing.B, source string) { func benchmarkExpensiveProgram(b *testing.B, source string) { ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion) require.NoError(b, err) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) - require.NoError(b, err) - require.True(b, cost > 1000) + ep := defaultEvalParams(nil, nil) + err = Check(ops.Program, ep) + require.Error(b, err) // excessive cost //b.Logf("%d bytes of program", len(ops.Program)) //b.Log(hex.EncodeToString(ops.Program)) b.ResetTimer() @@ -3330,7 +3391,7 @@ func BenchmarkCheckx5(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { for _, program := range programs { - _, err = Check(program.Program, defaultEvalParams(nil, nil)) + err = Check(program.Program, defaultEvalParams(nil, nil)) if err != nil { require.NoError(b, err) } @@ -3536,7 +3597,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) { ops, err := AssembleStringWithVersion(source, v) require.NoError(t, err) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", appsEnabledVersion)) @@ -3548,7 +3609,7 @@ func TestApplicationsDisallowOldTeal(t *testing.T) { ops, err := AssembleStringWithVersion(source, appsEnabledVersion) require.NoError(t, err) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) _, err = EvalStateful(ops.Program, ep) @@ -3608,7 +3669,7 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) { ops, err := AssembleStringWithVersion(source, v) require.NoError(t, err) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), expected) @@ -3616,7 +3677,7 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), expected) - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), expected) @@ -3630,13 +3691,13 @@ func TestAnyRekeyToOrApplicationRaisesMinTealVersion(t *testing.T) { ops, err := AssembleStringWithVersion(source, v) require.NoError(t, err) - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.NoError(t, err) _, err = EvalStateful(ops.Program, ep) require.NoError(t, err) - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.NoError(t, err) _, err = Eval(ops.Program, ep) @@ -3693,7 +3754,7 @@ func TestAllowedOpcodesV2(t *testing.T) { require.Contains(t, source, spec.Name) ops := testProg(t, source, AssemblerMaxVersion) // all opcodes allowed in stateful mode so use CheckStateful/EvalStateful - _, err := CheckStateful(ops.Program, ep) + err := CheckStateful(ops.Program, ep) require.NoError(t, err, source) _, err = EvalStateful(ops.Program, ep) if spec.Name != "return" { @@ -3704,10 +3765,10 @@ func TestAllowedOpcodesV2(t *testing.T) { for v := byte(0); v <= 1; v++ { ops.Program[0] = v - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.Error(t, err, source) require.Contains(t, err.Error(), "illegal opcode") - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.Error(t, err, source) require.Contains(t, err.Error(), "illegal opcode") _, err = Eval(ops.Program, ep) @@ -3756,7 +3817,7 @@ func TestAllowedOpcodesV3(t *testing.T) { require.Contains(t, source, spec.Name) ops := testProg(t, source, AssemblerMaxVersion) // all opcodes allowed in stateful mode so use CheckStateful/EvalStateful - _, err := CheckStateful(ops.Program, ep) + err := CheckStateful(ops.Program, ep) require.NoError(t, err, source) _, err = EvalStateful(ops.Program, ep) require.Error(t, err, source) @@ -3764,10 +3825,10 @@ func TestAllowedOpcodesV3(t *testing.T) { for v := byte(0); v <= 1; v++ { ops.Program[0] = v - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.Error(t, err, source) require.Contains(t, err.Error(), "illegal opcode") - _, err = CheckStateful(ops.Program, ep) + err = CheckStateful(ops.Program, ep) require.Error(t, err, source) require.Contains(t, err.Error(), "illegal opcode") _, err = Eval(ops.Program, ep) @@ -3797,7 +3858,7 @@ func TestRekeyFailsOnOldVersion(t *testing.T) { ep := defaultEvalParams(&sb, &txn) ep.TxnGroup = []transactions.SignedTxn{txn} ep.Proto = &proto - _, err = Check(ops.Program, ep) + err = Check(ops.Program, ep) require.Error(t, err) require.Contains(t, err.Error(), fmt.Sprintf("program version must be >= %d", rekeyingEnabledVersion)) pass, err := Eval(ops.Program, ep) @@ -3825,24 +3886,36 @@ func testEvaluation(t *testing.T, program string, introduced uint64, tester eval for v := uint64(1); v <= AssemblerMaxVersion; v++ { t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { if v < introduced { - testProg(t, obfuscate(program), v, expect{0, "...opcode was introduced..."}) + testProg(t, obfuscate(program), v, expect{0, "...was introduced..."}) return } ops := testProg(t, program, v) - cost, err := Check(ops.Program, defaultEvalParams(nil, nil)) - require.NoError(t, err) - require.True(t, cost < 1000) - var txn transactions.SignedTxn - txn.Lsig.Logic = ops.Program - sb := strings.Builder{} - pass, err := Eval(ops.Program, defaultEvalParams(&sb, &txn)) - ok := tester(pass, err) - if !ok { - t.Log(hex.EncodeToString(ops.Program)) - t.Log(sb.String()) + // Programs created with a previous assembler + // should still operate properly with future + // EvalParams, so try all forward versions. + for lv := v; lv <= AssemblerMaxVersion; lv++ { + t.Run(fmt.Sprintf("lv=%d", lv), func(t *testing.T) { + sb := strings.Builder{} + err := Check(ops.Program, defaultEvalParamsWithVersion(&sb, nil, lv)) + if err != nil { + t.Log(hex.EncodeToString(ops.Program)) + t.Log(sb.String()) + } + require.NoError(t, err) + var txn transactions.SignedTxn + txn.Lsig.Logic = ops.Program + sb = strings.Builder{} + pass, err := Eval(ops.Program, defaultEvalParamsWithVersion(&sb, &txn, lv)) + ok := tester(pass, err) + if !ok { + t.Log(hex.EncodeToString(ops.Program)) + t.Log(sb.String()) + t.Log(err) + } + require.True(t, ok) + isNotPanic(t, err) // Never want a Go level panic. + }) } - require.True(t, ok) - isNotPanic(t, err) // Never want a Go level panic. }) } } @@ -3978,3 +4051,97 @@ func TestPush(t *testing.T) { ops2 = testProg(t, "int 2; int 3; int 5; int 6; pushint 1", 3) require.Less(t, len(ops2.Program), len(ops1.Program)) } + +func TestLoop(t *testing.T) { + t.Parallel() + // Double until > 10. Should be 16 + testAccepts(t, "int 1; loop: int 2; *; dup; int 10; <; bnz loop; int 16; ==", 4) + + // Why does this label on line with instruction cause trouble? + testAccepts(t, "int 1; loop: int 2; *; dup; int 10; <; bnz loop; int 16; ==", 4) + + // Infinite loop because multiply by one instead of two + testPanics(t, "int 1; loop:; int 1; *; dup; int 10; <; bnz loop; int 16; ==", 4) +} + +func TestSubroutine(t *testing.T) { + t.Parallel() + testAccepts(t, "int 1; callsub double; int 2; ==; return; double: dup; +; retsub;", 4) + testAccepts(t, ` +b main; +fact: + dup + int 2 + < + bz recur + retsub +recur: + dup + int 1 + - + callsub fact + * + retsub + +main: + int 5 + callsub fact + int 120 + == +`, 4) + + // Mutually recursive odd/even. Each is intentionally done in a slightly different way. + testAccepts(t, ` +b main +odd: // If 0, return false, else return !even + dup + bz retfalse + callsub even + ! + retsub + +retfalse: + pop + int 0 + retsub + + +even: // If 0, return true, else decrement and return even + dup + bz rettrue + int 1 + - + callsub odd + retsub + +rettrue: + pop + int 1 + retsub + + +main: + int 1 + callsub odd + assert + + int 0 + callsub even + assert + + int 10 + callsub even + assert + + int 10 + callsub odd + ! + assert + + int 1 +`, 4) + + testPanics(t, "int 1; retsub", 4) + + testPanics(t, "int 1; recur: callsub recur; int 1", 4) +} diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 030bca9874..c9822a2026 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -21,7 +21,7 @@ import ( ) // LogicVersion defines default assembler and max eval versions -const LogicVersion = 3 +const LogicVersion = 4 // rekeyingEnabledVersion is the version of TEAL where RekeyTo functionality // was enabled. This is important to remember so that old TEAL accounts cannot @@ -33,6 +33,10 @@ const rekeyingEnabledVersion = 2 // from being used with applications. Do not edit! const appsEnabledVersion = 2 +// backBranchEabledVersion is the version of TEAL where branches could +// go back (and cost accounting was done during execution) +const backBranchEnabledVersion = 4 + // opDetails records details such as non-standard costs, immediate // arguments, or dynamic layout controlled by a check function. type opDetails struct { @@ -151,6 +155,7 @@ var OpSpecs = []OpSpec{ {0x1c, "~", opBitNot, asmDefault, disDefault, oneInt, oneInt, 1, modeAny, opDefault}, {0x1d, "mulw", opMulw, asmDefault, disDefault, twoInts, twoInts, 1, modeAny, opDefault}, {0x1e, "addw", opAddw, asmDefault, disDefault, twoInts, twoInts, 2, modeAny, opDefault}, + {0x1f, "divw", opDivw, asmDefault, disDefault, twoInts.plus(twoInts), twoInts.plus(twoInts), 4, modeAny, opDefault}, {0x20, "intcblock", opIntConstBlock, assembleIntCBlock, disIntcblock, nil, nil, 1, modeAny, varies(checkIntConstBlock, "uint ...", immInts)}, {0x21, "intc", opIntConstLoad, assembleIntC, disIntc, nil, oneInt, 1, modeAny, immediates("i")}, @@ -226,6 +231,16 @@ var OpSpecs = []OpSpec{ // Immediate bytes and ints. Smaller code size for single use of constant. {0x80, "pushbytes", opPushBytes, asmPushBytes, disPushBytes, nil, oneBytes, 3, modeAny, varies(checkPushBytes, "bytes", immBytes)}, {0x81, "pushint", opPushInt, asmPushInt, disPushInt, nil, oneInt, 3, modeAny, varies(checkPushInt, "uint", immInt)}, + + // "Function oriented" + {0x88, "callsub", opCallSub, assembleBranch, disBranch, nil, nil, 4, modeAny, opBranch}, + {0x89, "retsub", opRetSub, asmDefault, disDefault, nil, nil, 4, modeAny, opDefault}, + // Leave a little room for indirect function calls, or similar + + // More math + // shl, shr + // divw, modw convenience + // expmod } type sortByOpcode []OpSpec diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go index 450f9ee632..ed9ccffa87 100644 --- a/data/transactions/verify/txn.go +++ b/data/transactions/verify/txn.go @@ -214,13 +214,10 @@ func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx * GroupIndex: groupIndex, MinTealVersion: &groupCtx.minTealVersion, } - cost, err := logic.Check(lsig.Logic, ep) + err := logic.Check(lsig.Logic, ep) if err != nil { return err } - if cost > int(groupCtx.consensusParams.LogicSigMaxCost) { - return fmt.Errorf("LogicSig.Logic too slow, %d > %d", cost, groupCtx.consensusParams.LogicSigMaxCost) - } hasMsig := false numSigs := 0 diff --git a/ledger/apply/application.go b/ledger/apply/application.go index e3665e1a7e..261cb379af 100644 --- a/ledger/apply/application.go +++ b/ledger/apply/application.go @@ -274,25 +274,17 @@ func closeOutApplication(balances Balances, sender basics.Address, appIdx basics return nil } -func checkPrograms(ac *transactions.ApplicationCallTxnFields, evalParams *logic.EvalParams, maxCost int) error { - cost, err := logic.CheckStateful(ac.ApprovalProgram, *evalParams) +func checkPrograms(ac *transactions.ApplicationCallTxnFields, evalParams *logic.EvalParams) error { + err := logic.CheckStateful(ac.ApprovalProgram, *evalParams) if err != nil { return fmt.Errorf("check failed on ApprovalProgram: %v", err) } - if cost > maxCost { - return fmt.Errorf("ApprovalProgram too resource intensive. Cost is %d, max %d", cost, maxCost) - } - - cost, err = logic.CheckStateful(ac.ClearStateProgram, *evalParams) + err = logic.CheckStateful(ac.ClearStateProgram, *evalParams) if err != nil { return fmt.Errorf("check failed on ClearStateProgram: %v", err) } - if cost > maxCost { - return fmt.Errorf("ClearStateProgram too resource intensive. Cost is %d, max %d", cost, maxCost) - } - return nil } @@ -344,8 +336,7 @@ func ApplicationCall(ac transactions.ApplicationCallTxnFields, header transactio // If this txn is going to set new programs (either for creation or // update), check that the programs are valid and not too expensive if ac.ApplicationID == 0 || ac.OnCompletion == transactions.UpdateApplicationOC { - maxCost := balances.ConsensusParams().MaxAppProgramCost - err = checkPrograms(&ac, evalParams, maxCost) + err = checkPrograms(&ac, evalParams) if err != nil { return err } diff --git a/ledger/apply/application_test.go b/ledger/apply/application_test.go index 23f1a49090..5ce8b07eda 100644 --- a/ledger/apply/application_test.go +++ b/ledger/apply/application_test.go @@ -349,32 +349,36 @@ func TestAppCallCheckPrograms(t *testing.T) { var ac transactions.ApplicationCallTxnFields var ep logic.EvalParams - proto := config.Consensus[protocol.ConsensusFuture] + // This check is for static costs. v26 is last with static cost checking + proto := config.Consensus[protocol.ConsensusV26] ep.Proto = &proto - err := checkPrograms(&ac, &ep, 1) + proto.MaxAppProgramCost = 1 + err := checkPrograms(&ac, &ep) a.Error(err) a.Contains(err.Error(), "check failed on ApprovalProgram") program := []byte{2, 0x20, 1, 1, 0x22} // version, intcb, int 1 ac.ApprovalProgram = program - err = checkPrograms(&ac, &ep, 1) - a.Error(err) - a.Contains(err.Error(), "ApprovalProgram too resource intensive") + ac.ClearStateProgram = program - err = checkPrograms(&ac, &ep, 10) + err = checkPrograms(&ac, &ep) a.Error(err) - a.Contains(err.Error(), "check failed on ClearStateProgram") + a.Contains(err.Error(), "check failed on ApprovalProgram") + + proto.MaxAppProgramCost = 10 + err = checkPrograms(&ac, &ep) + a.NoError(err) ac.ClearStateProgram = append(ac.ClearStateProgram, program...) ac.ClearStateProgram = append(ac.ClearStateProgram, program...) ac.ClearStateProgram = append(ac.ClearStateProgram, program...) - err = checkPrograms(&ac, &ep, 10) + err = checkPrograms(&ac, &ep) a.Error(err) - a.Contains(err.Error(), "ClearStateProgram too resource intensive") + a.Contains(err.Error(), "check failed on ClearStateProgram") ac.ClearStateProgram = program - err = checkPrograms(&ac, &ep, 10) + err = checkPrograms(&ac, &ep) a.NoError(err) } diff --git a/test/e2e-go/cli/goal/expect/tealConsensusTest.exp b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp index 1f26e78654..c8f7e84a27 100755 --- a/test/e2e-go/cli/goal/expect/tealConsensusTest.exp +++ b/test/e2e-go/cli/goal/expect/tealConsensusTest.exp @@ -35,7 +35,7 @@ if { [catch { set TEST_ROOT_DIR $TEST_ALGO_DIR/root set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/ set NETWORK_NAME test_net_expect_$TIME_STAMP - set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50Each.json" + set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json" # Create network ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR @@ -80,18 +80,19 @@ if { [catch { exec goal clerk send -F "$TEST_ROOT_DIR/small-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/small-sig.tx spawn goal clerk dryrun -t $TEST_ROOT_DIR/small-sig.tx expect { - " - pass -" { puts "smallsig dryrun pass" } + " - pass -" { puts "small-sig dryrun pass" } "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) } - "too large" { ::AlgorandGoal::Abort $expect_out(buffer) } + "static cost budget" { ::AlgorandGoal::Abort $expect_out(buffer) } } teal "$TEST_ROOT_DIR/slow-sig.teal" 2 1 20001 exec goal clerk compile "$TEST_ROOT_DIR/slow-sig.teal" exec goal clerk send -F "$TEST_ROOT_DIR/slow-sig.teal" -t GXBNLU4AXQABPLHXJDMTG2YXSDT4EWUZACT7KTPFXDQW52XPTIUS5OZ5HQ -a 100 -d $TEST_PRIMARY_NODE_DIR -o $TEST_ROOT_DIR/slow-sig.tx - spawn goal clerk dryrun -t $TEST_ROOT_DIR/slow-sig.tx + spawn goal clerk dryrun -P future -t $TEST_ROOT_DIR/slow-sig.tx # Should succeed Check, fail Eval expect { - "program cost too large" {puts "slowsig pass"} - "\n" { ::AlgorandGoal::Abort $expect_out(buffer) } + "dynamic cost budget" { puts "slow-sig dryrun pass" } + " - pass -" { ::AlgorandGoal::Abort $expect_out(buffer) } + "REJECT" { ::AlgorandGoal::Abort $expect_out(buffer) } } # Shutdown the network diff --git a/test/scripts/e2e_subs/v26/teal-v3-only.sh b/test/scripts/e2e_subs/v26/teal-v3-only.sh new file mode 100755 index 0000000000..ee3fe7d865 --- /dev/null +++ b/test/scripts/e2e_subs/v26/teal-v3-only.sh @@ -0,0 +1,99 @@ +#!/bin/bash + +date '+teal-v3-only start %Y%m%d_%H%M%S' + +set -e +set -x +set -o pipefail +export SHELLOPTS + +WALLET=$1 + +gcmd="goal -w ${WALLET}" + +ACCOUNT=$(${gcmd} account list|awk '{ print $3 }') + +# prints: +# Created new account with address UCTHHNBEAUWHDQWQI5DGQCTB7AR4CSVNU5YNPROAYQIT3Y3LKVDFAA5M6Q +ACCOUNTB=$(${gcmd} account new|awk '{ print $6 }') + +cat >${TEMPDIR}/true.teal<&1 | grep "back jump support" +set -o pipefail + +# Although we are in an earlier version, v4 can be compiled, it just can't be used. +cat >${TEMPDIR}/true4.teal<&1 | grep "LogicSig.Logic version too new" +set -o pipefail + + +# Now, ensure it still fails, even if using the v3 program, if the +# retsub opcode is added. (That is, failure based on opcode choice, +# not just on the version marker.) + +${gcmd} clerk compile ${TEMPDIR}/true.teal -o ${TEMPDIR}/true.lsig +# append "retsub" opcode to the true program (won't execute the opcode, but presence should cause fail) +# we can't assemble this, because it would be rejected +(cat ${TEMPDIR}/true.lsig; printf '\x89') > ${TEMPDIR}/retsub.lsig +# compute the escrow account for the retsub program +ACCOUNT_TRUE=$(python -c 'import algosdk, sys; print(algosdk.logic.address(sys.stdin.buffer.read()))' < ${TEMPDIR}/retsub.lsig) +# fund that escrow account +${gcmd} clerk send --amount 1000000 --from ${ACCOUNT} --to ${ACCOUNT_TRUE} +# try, and fail, to lsig with the retsub program +set +o pipefail +${gcmd} clerk send --amount 10 --from-program-bytes ${TEMPDIR}/retsub.lsig --to ${ACCOUNTB} 2>&1 | grep "illegal opcode" +set -o pipefail + + + +date '+teal-v3-only OK %Y%m%d_%H%M%S' diff --git a/test/testdata/nettemplates/TwoNodes50EachV26.json b/test/testdata/nettemplates/TwoNodes50EachV26.json new file mode 100644 index 0000000000..848765da15 --- /dev/null +++ b/test/testdata/nettemplates/TwoNodes50EachV26.json @@ -0,0 +1,29 @@ +{ + "Genesis": { + "NetworkName": "tbd", + "ConsensusProtocol": "https://github.com/algorandfoundation/specs/tree/ac2255d586c4474d4ebcf3809acccb59b7ef34ff", + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 50, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 50, + "Online": true + } + ] + }, + "Nodes": [ + { + "Name": "Primary", + "IsRelay": true, + "Wallets": [{ "Name": "Wallet1", "ParticipationOnly": false }] + }, + { + "Name": "Node", + "Wallets": [{ "Name": "Wallet2", "ParticipationOnly": false }] + } + ] +} From 258ffd96e4a2ba25d471a212b7405f58576e98f6 Mon Sep 17 00:00:00 2001 From: Tsachi Herman Date: Mon, 17 May 2021 18:06:53 -0400 Subject: [PATCH 210/215] add support for more strict key registration transaction verification (#2137) The existing key registration transaction verification allows quite much flexibility on what is written to the account data. This PR adds few more constraints and would allow us to make certain assumptions regarding future key registration transactions. --- config/consensus.go | 11 + data/transactions/transaction.go | 43 +- data/transactions/transaction_test.go | 645 ++++++++++++++++++++++++++ ledger/apply/keyreg.go | 15 +- ledger/apply/keyreg_test.go | 54 ++- ledger/eval.go | 2 +- ledger/ledger_test.go | 14 +- 7 files changed, 770 insertions(+), 14 deletions(-) diff --git a/config/consensus.go b/config/consensus.go index b09acf4d0e..544c736337 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -351,6 +351,15 @@ type ConsensusParams struct { // NoEmptyLocalDeltas updates how ApplyDelta.EvalDelta.LocalDeltas are stored NoEmptyLocalDeltas bool + + // EnableKeyregCoherencyCheck enable the following extra checks on key registration transactions: + // 1. checking that [VotePK/SelectionPK/VoteKeyDilution] are all set or all clear. + // 2. checking that the VoteFirst is less or equal to VoteLast. + // 3. checking that in the case of going offline, both the VoteFirst and VoteLast are clear. + // 4. checking that in the case of going online the VoteLast is non-zero and greater then the current network round. + // 5. checking that in the case of going online the VoteFirst is less or equal to the LastValid+1. + // 6. checking that in the case of going online the VoteFirst is less or equal to the next network round. + EnableKeyregCoherencyCheck bool } // PaysetCommitType enumerates possible ways for the block header to commit to @@ -905,6 +914,8 @@ func initConsensusProtocols() { vFuture.CompactCertWeightThreshold = (1 << 32) * 30 / 100 vFuture.CompactCertSecKQ = 128 + vFuture.EnableKeyregCoherencyCheck = true + // enable the InitialRewardsRateCalculation fix vFuture.InitialRewardsRateCalculation = true // Enable transaction Merkle tree. diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index 4c30d1c3bc..34f52eed67 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -17,6 +17,7 @@ package transactions import ( + "errors" "fmt" "github.com/algorand/go-algorand/config" @@ -242,6 +243,14 @@ func (tx Transaction) MatchAddress(addr basics.Address, spec SpecialAddresses) b return false } +var errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound = errors.New("transaction first voting round need to be less than its last voting round") +var errKeyregTxnNonCoherentVotingKeys = errors.New("the following transaction fields need to be clear/set together : votekey, selkey, votekd") +var errKeyregTxnOfflineTransactionHasVotingRounds = errors.New("on going offline key registration transaction, the vote first and vote last fields should not be set") +var errKeyregTxnUnsupportedSwitchToNonParticipating = errors.New("transaction tries to mark an account as nonparticipating, but that transaction is not supported") +var errKeyregTxnGoingOnlineWithNonParticipating = errors.New("transaction tries to register keys to go online, but nonparticipatory flag is set") +var errKeyregTxnGoingOnlineWithZeroVoteLast = errors.New("transaction tries to register keys to go online, but vote last is set to zero") +var errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid = errors.New("transaction tries to register keys to go online, but first voting round is beyond the round after last valid round") + // WellFormed checks that the transaction looks reasonable on its own (but not necessarily valid against the actual ledger). It does not check signatures. func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusParams) error { switch tx.Type { @@ -253,18 +262,48 @@ func (tx Transaction) WellFormed(spec SpecialAddresses, proto config.ConsensusPa } case protocol.KeyRegistrationTx: + if proto.EnableKeyregCoherencyCheck { + // ensure that the VoteLast is greater or equal to the VoteFirst + if tx.KeyregTxnFields.VoteFirst > tx.KeyregTxnFields.VoteLast { + return errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound + } + + // The trio of [VotePK, SelectionPK, VoteKeyDilution] needs to be all zeros or all non-zero for the transaction to be valid. + if !((tx.KeyregTxnFields.VotePK == crypto.OneTimeSignatureVerifier{} && tx.KeyregTxnFields.SelectionPK == crypto.VRFVerifier{} && tx.KeyregTxnFields.VoteKeyDilution == 0) || + (tx.KeyregTxnFields.VotePK != crypto.OneTimeSignatureVerifier{} && tx.KeyregTxnFields.SelectionPK != crypto.VRFVerifier{} && tx.KeyregTxnFields.VoteKeyDilution != 0)) { + return errKeyregTxnNonCoherentVotingKeys + } + + // if it's a going offline transaction + if tx.KeyregTxnFields.VoteKeyDilution == 0 { + // check that we don't have any VoteFirst/VoteLast fields. + if tx.KeyregTxnFields.VoteFirst != 0 || tx.KeyregTxnFields.VoteLast != 0 { + return errKeyregTxnOfflineTransactionHasVotingRounds + } + } else { + // going online + if tx.KeyregTxnFields.VoteLast == 0 { + return errKeyregTxnGoingOnlineWithZeroVoteLast + } + if tx.KeyregTxnFields.VoteFirst > tx.LastValid+1 { + return errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid + } + } + } + // check that, if this tx is marking an account nonparticipating, // it supplies no key (as though it were trying to go offline) if tx.KeyregTxnFields.Nonparticipation { if !proto.SupportBecomeNonParticipatingTransactions { // if the transaction has the Nonparticipation flag high, but the protocol does not support // that type of transaction, it is invalid. - return fmt.Errorf("transaction tries to mark an account as nonparticipating, but that transaction is not supported") + return errKeyregTxnUnsupportedSwitchToNonParticipating } suppliesNullKeys := tx.KeyregTxnFields.VotePK == crypto.OneTimeSignatureVerifier{} || tx.KeyregTxnFields.SelectionPK == crypto.VRFVerifier{} if !suppliesNullKeys { - return fmt.Errorf("transaction tries to register keys to go online, but nonparticipatory flag is set") + return errKeyregTxnGoingOnlineWithNonParticipating } + } case protocol.AssetConfigTx: diff --git a/data/transactions/transaction_test.go b/data/transactions/transaction_test.go index cb98535422..6ccd835971 100644 --- a/data/transactions/transaction_test.go +++ b/data/transactions/transaction_test.go @@ -17,6 +17,7 @@ package transactions import ( + "flag" "fmt" "testing" @@ -164,3 +165,647 @@ func TestWellFormedErrors(t *testing.T) { require.Equal(t, usecase.expectedError, err) } } + +var generateFlag = flag.Bool("generate", false, "") + +// running test with -generate would generate the matrix used in the test ( without the "correct" errors ) +func TestWellFormedKeyRegistrationTx(t *testing.T) { + flag.Parse() + + // addr has no significance here other than being a normal valid address + addr, err := basics.UnmarshalChecksumAddress("NDQCJNNY5WWWFLP4GFZ7MEF2QJSMZYK6OWIV2AQ7OMAVLEFCGGRHFPKJJA") + require.NoError(t, err) + + tx := generateDummyGoNonparticpatingTransaction(addr) + curProto := config.Consensus[protocol.ConsensusCurrentVersion] + feeSink := basics.Address{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} + spec := SpecialAddresses{FeeSink: feeSink} + if !curProto.SupportBecomeNonParticipatingTransactions { + t.Skipf("Skipping rest of test because current protocol version %v does not support become-nonparticipating transactions", protocol.ConsensusCurrentVersion) + } + + // this tx is well-formed + err = tx.WellFormed(spec, curProto) + require.NoError(t, err) + + type keyRegTestCase struct { + votePK crypto.OneTimeSignatureVerifier + selectionPK crypto.VRFVerifier + voteFirst basics.Round + voteLast basics.Round + lastValid basics.Round + voteKeyDilution uint64 + nonParticipation bool + supportBecomeNonParticipatingTransactions bool + enableKeyregCoherencyCheck bool + err error + } + votePKValue := crypto.OneTimeSignatureVerifier{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} + selectionPKValue := crypto.VRFVerifier{0x7, 0xda, 0xcb, 0x4b, 0x6d, 0x9e, 0xd1, 0x41, 0xb1, 0x75, 0x76, 0xbd, 0x45, 0x9a, 0xe6, 0x42, 0x1d, 0x48, 0x6d, 0xa3, 0xd4, 0xef, 0x22, 0x47, 0xc4, 0x9, 0xa3, 0x96, 0xb8, 0x2e, 0xa2, 0x21} + + runTestCase := func(testCase keyRegTestCase) error { + tx.KeyregTxnFields.VotePK = testCase.votePK + tx.KeyregTxnFields.SelectionPK = testCase.selectionPK + tx.KeyregTxnFields.VoteFirst = testCase.voteFirst + tx.KeyregTxnFields.VoteLast = testCase.voteLast + tx.KeyregTxnFields.VoteKeyDilution = testCase.voteKeyDilution + tx.KeyregTxnFields.Nonparticipation = testCase.nonParticipation + tx.LastValid = testCase.lastValid + + curProto.SupportBecomeNonParticipatingTransactions = testCase.supportBecomeNonParticipatingTransactions + curProto.EnableKeyregCoherencyCheck = testCase.enableKeyregCoherencyCheck + return tx.WellFormed(spec, curProto) + } + + if *generateFlag == true { + fmt.Printf("keyRegTestCases := []keyRegTestCase{\n") + idx := 0 + for _, votePK := range []crypto.OneTimeSignatureVerifier{crypto.OneTimeSignatureVerifier{}, votePKValue} { + for _, selectionPK := range []crypto.VRFVerifier{crypto.VRFVerifier{}, selectionPKValue} { + for _, voteFirst := range []basics.Round{basics.Round(0), basics.Round(5)} { + for _, voteLast := range []basics.Round{basics.Round(0), basics.Round(10)} { + for _, lastValid := range []basics.Round{basics.Round(4), basics.Round(3)} { + for _, voteKeyDilution := range []uint64{0, 10000} { + for _, nonParticipation := range []bool{false, true} { + for _, supportBecomeNonParticipatingTransactions := range []bool{false, true} { + for _, enableKeyregCoherencyCheck := range []bool{false, true} { + outcome := runTestCase(keyRegTestCase{ + votePK, + selectionPK, + voteFirst, + voteLast, + lastValid, + voteKeyDilution, + nonParticipation, + supportBecomeNonParticipatingTransactions, + enableKeyregCoherencyCheck, + nil}) + errStr := "nil" + switch outcome { + case errKeyregTxnUnsupportedSwitchToNonParticipating: + errStr = "errKeyregTxnUnsupportedSwitchToNonParticipating" + case errKeyregTxnGoingOnlineWithNonParticipating: + errStr = "errKeyregTxnGoingOnlineWithNonParticipating" + case errKeyregTxnNonCoherentVotingKeys: + errStr = "errKeyregTxnNonCoherentVotingKeys" + case errKeyregTxnOfflineTransactionHasVotingRounds: + errStr = "errKeyregTxnOfflineTransactionHasVotingRounds" + case errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound: + errStr = "errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound" + case errKeyregTxnGoingOnlineWithZeroVoteLast: + errStr = "errKeyregTxnGoingOnlineWithZeroVoteLast" + case errKeyregTxnGoingOnlineWithNonParticipating: + errStr = "errKeyregTxnGoingOnlineWithNonParticipating" + case errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid: + errStr = "errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid" + default: + require.Nil(t, outcome) + + } + s := "/* %3d */ keyRegTestCase{votePK:" + if votePK == votePKValue { + s += "votePKValue" + } else { + s += "crypto.OneTimeSignatureVerifier{}" + } + s += ", selectionPK:" + if selectionPK == selectionPKValue { + s += "selectionPKValue" + } else { + s += "crypto.VRFVerifier{}" + } + s = fmt.Sprintf("%s, voteFirst:basics.Round(%2d), voteLast:basics.Round(%2d), lastValid:basics.Round(%2d), voteKeyDilution: %5d, nonParticipation: %v,supportBecomeNonParticipatingTransactions:%v, enableKeyregCoherencyCheck:%v, err:%s},\n", + s, voteFirst, voteLast, lastValid, voteKeyDilution, nonParticipation, supportBecomeNonParticipatingTransactions, enableKeyregCoherencyCheck, errStr) + fmt.Printf(s, idx) + idx++ + } + } + } + } + } + } + } + } + } + fmt.Printf("}\n") + return + } + keyRegTestCases := []keyRegTestCase{ + /* 0 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 1 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil}, + /* 2 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 3 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 4 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 5 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 6 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 7 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 8 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 9 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 10 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 11 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 12 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 13 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 14 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 15 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 16 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 17 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil}, + /* 18 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 19 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 20 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 21 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 22 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 23 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 24 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 25 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 26 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 27 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 28 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 29 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 30 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 31 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 32 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 33 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 34 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 35 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 36 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 37 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 38 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 39 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 40 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 41 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 42 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 43 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 44 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 45 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 46 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 47 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 48 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 49 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 50 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 51 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 52 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 53 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 54 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 55 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 56 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 57 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 58 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 59 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 60 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 61 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 62 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 63 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 64 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 65 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 66 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 67 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 68 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 69 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 70 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 71 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 72 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 73 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 74 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 75 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 76 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 77 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 78 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 79 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 80 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 81 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 82 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 83 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 84 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 85 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 86 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 87 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 88 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 89 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 90 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 91 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 92 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 93 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 94 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 95 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 96 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 97 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 98 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 99 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 100 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 101 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 102 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 103 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 104 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 105 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 106 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 107 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 108 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 109 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 110 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 111 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 112 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 113 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 114 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 115 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 116 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 117 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 118 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 119 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnOfflineTransactionHasVotingRounds}, + /* 120 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 121 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 122 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 123 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 124 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 125 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 126 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 127 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 128 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 129 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 130 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 131 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 132 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 133 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 134 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 135 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 136 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 137 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 138 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 139 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 140 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 141 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 142 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 143 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 144 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 145 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 146 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 147 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 148 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 149 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 150 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 151 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 152 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 153 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 154 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 155 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 156 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 157 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 158 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 159 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 160 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 161 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 162 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 163 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 164 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 165 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 166 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 167 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 168 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 169 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 170 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 171 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 172 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 173 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 174 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 175 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 176 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 177 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 178 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 179 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 180 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 181 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 182 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 183 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 184 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 185 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 186 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 187 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 188 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 189 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 190 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 191 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 192 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 193 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 194 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 195 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 196 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 197 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 198 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 199 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 200 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 201 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 202 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 203 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 204 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 205 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 206 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 207 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 208 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 209 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 210 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 211 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 212 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 213 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 214 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 215 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 216 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 217 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 218 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 219 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 220 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 221 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 222 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 223 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 224 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 225 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 226 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 227 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 228 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 229 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 230 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 231 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 232 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 233 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 234 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 235 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 236 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 237 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 238 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 239 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 240 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 241 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 242 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 243 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 244 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 245 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 246 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 247 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 248 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 249 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 250 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 251 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 252 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 253 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 254 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 255 */ keyRegTestCase{votePK: crypto.OneTimeSignatureVerifier{}, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 256 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 257 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 258 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 259 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 260 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 261 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 262 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 263 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 264 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 265 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 266 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 267 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 268 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 269 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 270 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 271 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 272 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 273 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 274 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 275 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 276 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 277 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 278 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 279 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 280 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 281 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 282 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 283 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 284 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 285 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 286 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 287 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 288 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 289 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 290 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 291 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 292 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 293 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 294 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 295 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 296 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 297 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 298 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 299 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 300 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 301 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 302 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 303 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 304 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 305 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 306 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 307 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 308 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 309 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 310 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 311 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 312 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 313 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 314 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 315 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 316 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 317 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 318 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 319 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 320 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 321 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 322 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 323 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 324 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 325 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 326 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 327 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 328 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 329 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 330 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 331 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 332 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 333 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 334 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 335 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 336 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 337 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 338 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 339 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 340 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 341 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 342 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 343 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 344 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 345 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 346 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 347 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 348 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 349 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 350 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 351 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 352 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 353 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 354 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 355 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 356 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 357 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 358 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 359 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 360 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 361 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 362 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 363 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 364 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 365 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 366 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 367 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 368 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 369 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 370 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 371 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 372 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 373 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 374 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 375 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 376 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 377 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 378 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 379 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 380 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 381 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 382 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 383 */ keyRegTestCase{votePK: votePKValue, selectionPK: crypto.VRFVerifier{}, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 384 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 385 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 386 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 387 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 388 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 389 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 390 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 391 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 392 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 393 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 394 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 395 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 396 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 397 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 398 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 399 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 400 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 401 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 402 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 403 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 404 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 405 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 406 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 407 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 408 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 409 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 410 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 411 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 412 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 413 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 414 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 415 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithZeroVoteLast}, + /* 416 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 417 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 418 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 419 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 420 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 421 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 422 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 423 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 424 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 425 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil}, + /* 426 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 427 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 428 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 429 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 430 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 431 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 432 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 433 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 434 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 435 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 436 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 437 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 438 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 439 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 440 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 441 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil}, + /* 442 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 443 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 444 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 445 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 446 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 447 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(0), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 448 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 449 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 450 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 451 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 452 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 453 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 454 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 455 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 456 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 457 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 458 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 459 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 460 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 461 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 462 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 463 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 464 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 465 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 466 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 467 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 468 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 469 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 470 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 471 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 472 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 473 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 474 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 475 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 476 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 477 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 478 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 479 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(0), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnFirstVotingRoundGreaterThanLastVotingRound}, + /* 480 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 481 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 482 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 483 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 484 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 485 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 486 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 487 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 488 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 489 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: nil}, + /* 490 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 491 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: nil}, + /* 492 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 493 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 494 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 495 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(4), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 496 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 497 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 498 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 499 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 500 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 501 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 502 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 503 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 0, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnNonCoherentVotingKeys}, + /* 504 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: nil}, + /* 505 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid}, + /* 506 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: nil}, + /* 507 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: false, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid}, + /* 508 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: false, err: errKeyregTxnUnsupportedSwitchToNonParticipating}, + /* 509 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: false, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid}, + /* 510 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: false, err: errKeyregTxnGoingOnlineWithNonParticipating}, + /* 511 */ keyRegTestCase{votePK: votePKValue, selectionPK: selectionPKValue, voteFirst: basics.Round(5), voteLast: basics.Round(10), lastValid: basics.Round(3), voteKeyDilution: 10000, nonParticipation: true, supportBecomeNonParticipatingTransactions: true, enableKeyregCoherencyCheck: true, err: errKeyregTxnGoingOnlineWithFirstVoteAfterLastValid}, + } + for testcaseIdx, testCase := range keyRegTestCases { + err := runTestCase(testCase) + require.Equalf(t, testCase.err, err, "index: %d\ntest case: %#v", testcaseIdx, testCase) + } +} diff --git a/ledger/apply/keyreg.go b/ledger/apply/keyreg.go index 3926f1ad8d..532bfac517 100644 --- a/ledger/apply/keyreg.go +++ b/ledger/apply/keyreg.go @@ -17,6 +17,7 @@ package apply import ( + "errors" "fmt" "github.com/algorand/go-algorand/crypto" @@ -24,8 +25,11 @@ import ( "github.com/algorand/go-algorand/data/transactions" ) +var errKeyregGoingOnlineExpiredParticipationKey = errors.New("transaction tries to mark an account as online with last voting round in the past") +var errKeyregGoingOnlineFirstVotingInFuture = errors.New("transaction tries to mark an account as online with first voting round beyond the next voting round") + // Keyreg applies a KeyRegistration transaction using the Balances interface. -func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, balances Balances, spec transactions.SpecialAddresses, ad *transactions.ApplyData) error { +func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, balances Balances, spec transactions.SpecialAddresses, ad *transactions.ApplyData, round basics.Round) error { if header.Sender == spec.FeeSink { return fmt.Errorf("cannot register participation key for fee sink's address %v ", header.Sender) } @@ -59,6 +63,15 @@ func Keyreg(keyreg transactions.KeyregTxnFields, header transactions.Header, bal record.VoteLastValid = 0 record.VoteKeyDilution = 0 } else { + + if balances.ConsensusParams().EnableKeyregCoherencyCheck { + if keyreg.VoteLast <= round { + return errKeyregGoingOnlineExpiredParticipationKey + } + if keyreg.VoteFirst > round+1 { + return errKeyregGoingOnlineFirstVotingInFuture + } + } record.Status = basics.Online record.VoteFirstValid = keyreg.VoteFirst record.VoteLastValid = keyreg.VoteLast diff --git a/ledger/apply/keyreg_test.go b/ledger/apply/keyreg_test.go index 044d587a5f..2be6a6b372 100644 --- a/ledger/apply/keyreg_test.go +++ b/ledger/apply/keyreg_test.go @@ -44,7 +44,8 @@ func (balances keyregTestBalances) GetCreator(cidx basics.CreatableIndex, ctype return basics.Address{}, true, nil } -func (balances keyregTestBalances) Put(basics.Address, basics.AccountData) error { +func (balances keyregTestBalances) Put(addr basics.Address, ad basics.AccountData) error { + balances.addrs[addr] = ad return nil } @@ -95,11 +96,11 @@ func TestKeyregApply(t *testing.T) { SelectionPK: vrfSecrets.PK, }, } - err := Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil) + err := Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0)) require.NoError(t, err) tx.Sender = feeSink - err = Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil) + err = Keyreg(tx.KeyregTxnFields, tx.Header, makeMockBalances(protocol.ConsensusCurrentVersion), transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0)) require.Error(t, err) tx.Sender = src @@ -108,19 +109,60 @@ func TestKeyregApply(t *testing.T) { // Going from offline to online should be okay mockBal.addrs[src] = basics.AccountData{Status: basics.Offline} - err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil) + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0)) require.NoError(t, err) // Going from online to nonparticipatory should be okay, if the protocol supports that if mockBal.ConsensusParams().SupportBecomeNonParticipatingTransactions { tx.KeyregTxnFields = transactions.KeyregTxnFields{} tx.KeyregTxnFields.Nonparticipation = true - err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil) + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0)) require.NoError(t, err) // Nonparticipatory accounts should not be able to change status mockBal.addrs[src] = basics.AccountData{Status: basics.NotParticipating} - err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil) + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(0)) require.Error(t, err) } + + mockBal.version = "future" + if mockBal.ConsensusParams().EnableKeyregCoherencyCheck { + tx = transactions.Transaction{ + Type: protocol.KeyRegistrationTx, + Header: transactions.Header{ + Sender: src, + Fee: basics.MicroAlgos{Raw: 1}, + FirstValid: basics.Round(1000), + LastValid: basics.Round(1200), + }, + KeyregTxnFields: transactions.KeyregTxnFields{ + VotePK: crypto.OneTimeSignatureVerifier(secretParticipation.SignatureVerifier), + SelectionPK: vrfSecrets.PK, + VoteKeyDilution: 1000, + VoteFirst: 500, + VoteLast: 1000, + }, + } + mockBal.addrs[src] = basics.AccountData{Status: basics.Offline} + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(999)) + require.NoError(t, err) + + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1000)) + require.Equal(t, errKeyregGoingOnlineExpiredParticipationKey, err) + + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1001)) + require.Equal(t, errKeyregGoingOnlineExpiredParticipationKey, err) + + tx.KeyregTxnFields.VoteFirst = basics.Round(1100) + tx.KeyregTxnFields.VoteLast = basics.Round(1200) + + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1098)) + require.Equal(t, errKeyregGoingOnlineFirstVotingInFuture, err) + + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1099)) + require.NoError(t, err) + + err = Keyreg(tx.KeyregTxnFields, tx.Header, mockBal, transactions.SpecialAddresses{FeeSink: feeSink}, nil, basics.Round(1100)) + require.NoError(t, err) + } } diff --git a/ledger/eval.go b/ledger/eval.go index 48e73ac08a..42208995f4 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -916,7 +916,7 @@ func (eval *BlockEvaluator) applyTransaction(tx transactions.Transaction, balanc err = apply.Payment(tx.PaymentTxnFields, tx.Header, balances, spec, &ad) case protocol.KeyRegistrationTx: - err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, balances, spec, &ad) + err = apply.Keyreg(tx.KeyregTxnFields, tx.Header, balances, spec, &ad, balances.round()) case protocol.AssetConfigTx: err = apply.AssetConfig(tx.AssetConfigTxnFields, tx.Header, balances, spec, &ad, ctr) diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go index c232ecaefa..d8745b7cfa 100644 --- a/ledger/ledger_test.go +++ b/ledger/ledger_test.go @@ -457,8 +457,11 @@ func TestLedgerSingleTx(t *testing.T) { votePK[0] = 1 selPK[0] = 2 correctKeyregFields := transactions.KeyregTxnFields{ - VotePK: votePK, - SelectionPK: selPK, + VotePK: votePK, + SelectionPK: selPK, + VoteKeyDilution: proto.DefaultKeyDilution, + VoteFirst: 0, + VoteLast: 10000, } correctKeyreg := transactions.Transaction{ @@ -1125,8 +1128,11 @@ func testLedgerSingleTxApplyData(t *testing.T, version protocol.ConsensusVersion votePK[0] = 1 selPK[0] = 2 correctKeyregFields := transactions.KeyregTxnFields{ - VotePK: votePK, - SelectionPK: selPK, + VotePK: votePK, + SelectionPK: selPK, + VoteKeyDilution: proto.DefaultKeyDilution, + VoteFirst: 0, + VoteLast: 10000, } correctKeyreg := transactions.Transaction{ From 8c7311cd967b5dee1432ec7bbb970d16c80f949e Mon Sep 17 00:00:00 2001 From: Will Winder Date: Mon, 17 May 2021 18:27:54 -0400 Subject: [PATCH 211/215] Return an empty string instead of null when making a proof for transaction with no siblings. (#2152) A nil array is serialized to null which looks strange in the API response. Initialize to an empty array. --- daemon/algod/api/server/v2/handlers.go | 2 +- test/README.md | 6 +- test/scripts/e2e.sh | 3 + .../e2e_subs/rest-applications-endpoint.sh | 35 ++++++ test/scripts/e2e_subs/rest-assets-endpoint.sh | 27 +++++ .../scripts/e2e_subs/rest-genesis-endpoint.sh | 15 +++ test/scripts/e2e_subs/rest-pprof.sh | 32 ++++++ test/scripts/e2e_subs/rest.sh | 107 ++---------------- .../e2e_subs/serial/rest-proof-endpoint.sh | 25 ++++ 9 files changed, 154 insertions(+), 98 deletions(-) create mode 100755 test/scripts/e2e_subs/rest-applications-endpoint.sh create mode 100755 test/scripts/e2e_subs/rest-assets-endpoint.sh create mode 100755 test/scripts/e2e_subs/rest-genesis-endpoint.sh create mode 100755 test/scripts/e2e_subs/rest-pprof.sh create mode 100755 test/scripts/e2e_subs/serial/rest-proof-endpoint.sh diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go index e3b3edba49..7a74c6b2a7 100644 --- a/daemon/algod/api/server/v2/handlers.go +++ b/daemon/algod/api/server/v2/handlers.go @@ -220,7 +220,7 @@ func (v2 *Handlers) GetProof(ctx echo.Context, round uint64, txid string, params return internalError(ctx, err, "generating proof", v2.Log) } - var proofconcat []byte + proofconcat := make([]byte, 0) for _, proofelem := range proof { proofconcat = append(proofconcat, proofelem[:]...) } diff --git a/test/README.md b/test/README.md index 0061fb0f1d..59e5760f6c 100644 --- a/test/README.md +++ b/test/README.md @@ -29,7 +29,7 @@ Must run from the root project directory, `./test/scripts/e2e.sh` ## scripts/e2e_client_runner.py and scripts/e2e_subs/ -These tests are shell scripts which all run in series against a single private network. +These tests are shell scripts which all run in parallel against a single private network. Each script is provided with a wallet which contains a large supply of algos to use during the test. ``` @@ -48,5 +48,7 @@ optional arguments: To run a specific test: ``` -~$ ./e2e_client_runner.py full/path/to/test_script.sh +~$ ./e2e_client_runner.py /full/path/to/e2e_subs/test_script.sh ``` + +Tests in the `e2e_subs/serial` directory are executed serially instead of in parallel. This should only be used when absolutely necessary. diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh index e1b176b66f..e12ab620d7 100755 --- a/test/scripts/e2e.sh +++ b/test/scripts/e2e.sh @@ -103,6 +103,9 @@ python3 -m venv "${TEMPDIR}/ve" for vdir in "$SRCROOT"/test/scripts/e2e_subs/v??; do "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} --version "$(basename "$vdir")" "$vdir"/*.sh done +for script in "$SRCROOT"/test/scripts/e2e_subs/serial/*; do + "${TEMPDIR}/ve/bin/python3" e2e_client_runner.py ${RUN_KMD_WITH_UNSAFE_SCRYPT} $script +done deactivate # Export our root temp folder as 'TESTDIR' for tests to use as their root test folder diff --git a/test/scripts/e2e_subs/rest-applications-endpoint.sh b/test/scripts/e2e_subs/rest-applications-endpoint.sh new file mode 100755 index 0000000000..6d66e57983 --- /dev/null +++ b/test/scripts/e2e_subs/rest-applications-endpoint.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# TIMEOUT=300 + +my_dir="$(dirname "$0")" +#"$my_dir/rest.sh" "$@" +source "$my_dir/rest.sh" "$@" + +date "+$0 start %Y%m%d_%H%M%S" + +# Create an application +printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal" +APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 2 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }') + +# Good request, non-existant app id +call_and_verify "Should not find app." "/v2/applications/987654321" 404 'application does not exist' +# Good request +call_and_verify "Should contain app data." "/v2/applications/$APPID" 200 '"global-state-schema":{"num-byte-slice":0,"num-uint":2}' +# Good request, pretty response +call_and_verify "Should contain app data." "/v2/applications/$APPID?pretty" 200 ' + "global-state-schema": { + "num-byte-slice": 0, + "num-uint": 2 + }, + "local-state-schema": { + "num-byte-slice": 0, + "num-uint": 0 + } + ' +# Some invalid path parameters +call_and_verify "App parameter parsing error 1." "/v2/applications/-2" 400 "Invalid format for parameter application-id" +call_and_verify "App parameter parsing error 2." "/v2/applications/not-a-number" 400 "Invalid format for parameter application-id" + +# Good request, but invalid query parameters +call_and_verify "App invalid parameter" "/v2/applications/$APPID?this-should-fail=200" 400 'Unknown parameter detected: this-should-fail' + diff --git a/test/scripts/e2e_subs/rest-assets-endpoint.sh b/test/scripts/e2e_subs/rest-assets-endpoint.sh new file mode 100755 index 0000000000..fc6ca6e801 --- /dev/null +++ b/test/scripts/e2e_subs/rest-assets-endpoint.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# TIMEOUT=300 + +my_dir="$(dirname "$0")" +#"$my_dir/rest.sh" "$@" +source "$my_dir/rest.sh" "$@" + +date "+$0 start %Y%m%d_%H%M%S" + +ASSET_ID=$(${gcmd} asset create --creator "${ACCOUNT}" --total 10000 --decimals 19 --name "spanish coin" --unitname "doubloon" | grep "Created asset with asset index" | rev | cut -d ' ' -f 1 | rev) + +# Good request, non-existant asset id +call_and_verify "Should not find asset." "/v2/assets/987654321" 404 'asset does not exist' +# Good request +call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID" 200 '","decimals":19,"default-frozen":false,"freeze":"' +# Good request, pretty response +call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID?pretty" 200 ' + "decimals": 19, + "default-frozen": false, + "freeze": "' +# Some invalid path parameters +call_and_verify "Asset parameter parsing error 1." "/v2/assets/-2" 400 "Invalid format for parameter asset-id" +call_and_verify "Asset parameter parsing error 2." "/v2/assets/not-a-number" 400 "Invalid format for parameter asset-id" + +# Good request, but invalid query parameters +call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-fail=200" 400 'parameter detected: this-should-fail' + diff --git a/test/scripts/e2e_subs/rest-genesis-endpoint.sh b/test/scripts/e2e_subs/rest-genesis-endpoint.sh new file mode 100755 index 0000000000..e87d172bff --- /dev/null +++ b/test/scripts/e2e_subs/rest-genesis-endpoint.sh @@ -0,0 +1,15 @@ +#!/usr/bin/env bash +# TIMEOUT=300 + +my_dir="$(dirname "$0")" +#"$my_dir/rest.sh" "$@" +source "$my_dir/rest.sh" "$@" + +date "+$0 start %Y%m%d_%H%M%S" + +call_and_verify "There should be a genesis endpoint." "/genesis" 200 ' + "id": "v1", + "network": "tbd", + "proto": "future", + "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU" +}' diff --git a/test/scripts/e2e_subs/rest-pprof.sh b/test/scripts/e2e_subs/rest-pprof.sh new file mode 100755 index 0000000000..2fe976191e --- /dev/null +++ b/test/scripts/e2e_subs/rest-pprof.sh @@ -0,0 +1,32 @@ +#!/usr/bin/env bash +# TIMEOUT=300 + +my_dir="$(dirname "$0")" +#"$my_dir/rest.sh" "$@" +source "$my_dir/rest.sh" "$@" + +date "+$0 start %Y%m%d_%H%M%S" + +# URL Auth - valid +CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/$PRIMARY_ADMIN_TOKEN/debug/pprof/block") +if [[ "$CODE" != "200" ]]; then + fail_and_exit "Call pprof with valid token" "/urlAuth/:token/debug/pprof" "Invalid exit code expected 200 (actual $CODE)" +fi + +# URL Auth - invalid +CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/invalid_token/debug/pprof/block") +if [[ "$CODE" != "401" ]]; then + fail_and_exit "Call pprof with invalid token" "/urlAuth/invalid_token/debug/pprof" "Invalid exit code expected 401 (actual $CODE)" +fi + +# Header Auth - valid +CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer $PRIMARY_ADMIN_TOKEN") +if [[ "$CODE" != "200" ]]; then + fail_and_exit "Call pprof with valid token" "/debug/pprof" "Invalid exit code expected 200 (actual $CODE)" +fi + +# Header Auth - invalid +CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer invalid_token") +if [[ "$CODE" != "401" ]]; then + fail_and_exit "Call pprof with invalid token" "/debug/pprof" "Invalid exit code expected 401 (actual $CODE)" +fi diff --git a/test/scripts/e2e_subs/rest.sh b/test/scripts/e2e_subs/rest.sh index 3f915406b0..84b9ed570d 100755 --- a/test/scripts/e2e_subs/rest.sh +++ b/test/scripts/e2e_subs/rest.sh @@ -1,7 +1,17 @@ #!/usr/bin/env bash -# TIMEOUT=300 +# TIMEOUT=50 -date '+rest.sh start %Y%m%d_%H%M%S' +# Helpers for REST API tests. +# Use the following boilerplate code at the top of new REST tests: + +# #!/usr/bin/env bash +# # TIMEOUT=300 +# +# my_dir="$(dirname "$0")" +# #"$my_dir/rest.sh" "$@" +# source "$my_dir/rest.sh" "$@" +# +# date "+$0 start %Y%m%d_%H%M%S" set -ex set -o pipefail @@ -63,96 +73,3 @@ function call_and_verify { fail_and_exit "$1" "$2" "unexpected response. should contain '$4', actual: $RES" fi } - - -function test_applications_endpoint { - # Create an application - printf '#pragma version 2\nint 1' > "${TEMPDIR}/simple.teal" - APPID=$(${gcmd} app create --creator "${ACCOUNT}" --approval-prog "${TEMPDIR}/simple.teal" --clear-prog "${TEMPDIR}/simple.teal" --global-byteslices 0 --global-ints 2 --local-byteslices 0 --local-ints 0 | grep Created | awk '{ print $6 }') - - # Good request, non-existant app id - call_and_verify "Should not find app." "/v2/applications/987654321" 404 'application does not exist' - # Good request - call_and_verify "Should contain app data." "/v2/applications/$APPID" 200 '"global-state-schema":{"num-byte-slice":0,"num-uint":2}' - # Good request, pretty response - call_and_verify "Should contain app data." "/v2/applications/$APPID?pretty" 200 ' - "global-state-schema": { - "num-byte-slice": 0, - "num-uint": 2 - }, - "local-state-schema": { - "num-byte-slice": 0, - "num-uint": 0 - } -' - # Some invalid path parameters - call_and_verify "App parameter parsing error 1." "/v2/applications/-2" 400 "Invalid format for parameter application-id" - call_and_verify "App parameter parsing error 2." "/v2/applications/not-a-number" 400 "Invalid format for parameter application-id" - - # Good request, but invalid query parameters - call_and_verify "App invalid parameter" "/v2/applications/$APPID?this-should-fail=200" 400 'Unknown parameter detected: this-should-fail' -} - - -function test_assets_endpoint { - local ASSET_ID - ASSET_ID=$(${gcmd} asset create --creator "${ACCOUNT}" --total 10000 --decimals 19 --name "spanish coin" --unitname "doubloon" | grep "Created asset with asset index" | rev | cut -d ' ' -f 1 | rev) - - # Good request, non-existant asset id - call_and_verify "Should not find asset." "/v2/assets/987654321" 404 'asset does not exist' - # Good request - call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID" 200 '","decimals":19,"default-frozen":false,"freeze":"' - # Good request, pretty response - call_and_verify "Should contain asset data." "/v2/assets/$ASSET_ID?pretty" 200 ' - "decimals": 19, - "default-frozen": false, - "freeze": "' - # Some invalid path parameters - call_and_verify "Asset parameter parsing error 1." "/v2/assets/-2" 400 "Invalid format for parameter asset-id" - call_and_verify "Asset parameter parsing error 2." "/v2/assets/not-a-number" 400 "Invalid format for parameter asset-id" - - # Good request, but invalid query parameters - call_and_verify "Asset invalid parameter" "/v2/assets/$ASSET_ID?this-should-fail=200" 400 'parameter detected: this-should-fail' -} - -function pprof_test { - # URL Auth - valid - CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/$PRIMARY_ADMIN_TOKEN/debug/pprof/block") - if [[ "$CODE" != "200" ]]; then - fail_and_exit "Call pprof with valid token" "/urlAuth/:token/debug/pprof" "Invalid exit code expected 200 (actual $CODE)" - fi - - # URL Auth - invalid - CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/urlAuth/invalid_token/debug/pprof/block") - if [[ "$CODE" != "401" ]]; then - fail_and_exit "Call pprof with invalid token" "/urlAuth/invalid_token/debug/pprof" "Invalid exit code expected 401 (actual $CODE)" - fi - - # Header Auth - valid - CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer $PRIMARY_ADMIN_TOKEN") - if [[ "$CODE" != "200" ]]; then - fail_and_exit "Call pprof with valid token" "/debug/pprof" "Invalid exit code expected 200 (actual $CODE)" - fi - - # Header Auth - invalid - CODE=$(curl -o "${TEMPDIR}/curl_out.txt" -w "%{http_code}" -q -s "$PRIMARY_NET/debug/pprof/block" -H "Authorization: Bearer invalid_token") - if [[ "$CODE" != "401" ]]; then - fail_and_exit "Call pprof with invalid token" "/debug/pprof" "Invalid exit code expected 401 (actual $CODE)" - fi -} - -function test_genesis_endpoint { - call_and_verify "There should be a genesis endpoint." "/genesis" 200 ' - "id": "v1", - "network": "tbd", - "proto": "future", - "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU" -}' -} - - -# Run the tests. -test_applications_endpoint -test_assets_endpoint -pprof_test -test_genesis_endpoint diff --git a/test/scripts/e2e_subs/serial/rest-proof-endpoint.sh b/test/scripts/e2e_subs/serial/rest-proof-endpoint.sh new file mode 100755 index 0000000000..ba87561efc --- /dev/null +++ b/test/scripts/e2e_subs/serial/rest-proof-endpoint.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# TIMEOUT=60 + +my_dir="$(dirname "$0")" +#"$my_dir/rest.sh" "$@" +source "$my_dir/../rest.sh" "$@" + +date "+$0 start %Y%m%d_%H%M%S" + +NUM_TRANSACTIONS=0 + +# Create a transaction with no siblings +while [[ "${NUM_TRANSACTIONS}" != "1" ]]; do + SEND_OUTPUT=$(${gcmd} clerk send -a 0 -f "${ACCOUNT}" -t "${ACCOUNT}") + TXID=$(echo "$SEND_OUTPUT" | head -n 1 | sed 's/.*transaction ID: \([^.]*\).*/\1/') + ROUND=$(echo "$SEND_OUTPUT" | tail -n 1 | sed 's/.*committed in round \([[:digit:]]*\).*/\1/') + + # check if the transaction was all alone in the round + call_and_verify "Checking block" "/v2/blocks/${ROUND}" 200 'txns' + #TODO: The check with jq can be re-enabled after fixing JSONStrictHandle. + #NUM_TRANSACTIONS=$(cat "${TEMPDIR}/curl_out.txt" | jq '.block.txns | length') + NUM_TRANSACTIONS=$(cat "${TEMPDIR}/curl_out.txt" | grep type | wc -l | tr -d ' ') +done + +call_and_verify "The proof should not be null." "/v2/blocks/${ROUND}/transactions/${TXID}/proof" 200 '"proof":""' From 9f926d87b6f9e3524d5d55571516a2ac6f697f9d Mon Sep 17 00:00:00 2001 From: Tolik Zinovyev Date: Tue, 18 May 2021 12:32:15 -0400 Subject: [PATCH 212/215] ledger: disable minimum balance check when not validating or generating (#2149) ledger: disable minimum balance check when not validating or generating --- ledger/eval.go | 82 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 49 insertions(+), 33 deletions(-) diff --git a/ledger/eval.go b/ledger/eval.go index 42208995f4..100d17df0b 100644 --- a/ledger/eval.go +++ b/ledger/eval.go @@ -768,6 +768,49 @@ func (eval *BlockEvaluator) transactionGroup(txgroup []transactions.SignedTxnWit return nil } +// Check the minimum balance requirement for the modified accounts in `cow`. +func (eval *BlockEvaluator) checkMinBalance(cow *roundCowState) error { + rewardlvl := cow.rewardsLevel() + for _, addr := range cow.modifiedAccounts() { + // Skip FeeSink, RewardsPool, and CompactCertSender MinBalance checks here. + // There's only a few accounts, so space isn't an issue, and we don't + // expect them to have low balances, but if they do, it may cause + // surprises. + if addr == eval.block.FeeSink || addr == eval.block.RewardsPool || + addr == transactions.CompactCertSender { + continue + } + + data, err := cow.lookup(addr) + if err != nil { + return err + } + + // It's always OK to have the account move to an empty state, + // because the accounts DB can delete it. Otherwise, we will + // enforce MinBalance. + if data.IsZero() { + continue + } + + dataNew := data.WithUpdatedRewards(eval.proto, rewardlvl) + effectiveMinBalance := dataNew.MinBalance(&eval.proto) + if dataNew.MicroAlgos.Raw < effectiveMinBalance.Raw { + return fmt.Errorf("account %v balance %d below min %d (%d assets)", + addr, dataNew.MicroAlgos.Raw, effectiveMinBalance.Raw, len(dataNew.Assets)) + } + + // Check if we have exceeded the maximum minimum balance + if eval.proto.MaximumMinimumBalance != 0 { + if effectiveMinBalance.Raw > eval.proto.MaximumMinimumBalance { + return fmt.Errorf("account %v would use too much space after this transaction. Minimum balance requirements would be %d (greater than max %d)", addr, effectiveMinBalance.Raw, eval.proto.MaximumMinimumBalance) + } + } + } + + return nil +} + // transaction tentatively executes a new transaction as part of this block evaluation. // If the transaction cannot be added to the block without violating some constraints, // an error is returned and the block evaluator state is unchanged. @@ -837,40 +880,13 @@ func (eval *BlockEvaluator) transaction(txn transactions.SignedTxn, evalParams * // Check if any affected accounts dipped below MinBalance (unless they are // completely zero, which means the account will be deleted.) - rewardlvl := cow.rewardsLevel() - for _, addr := range cow.modifiedAccounts() { - // Skip FeeSink, RewardsPool, and CompactCertSender MinBalance checks here. - // There's only a few accounts, so space isn't an issue, and we don't - // expect them to have low balances, but if they do, it may cause - // surprises. - if addr == spec.FeeSink || addr == spec.RewardsPool || addr == transactions.CompactCertSender { - continue - } - - data, err := cow.lookup(addr) + // Only do those checks if we are validating or generating. It is useful to skip them + // if we cannot provide account data that contains enough information to + // compute the correct minimum balance (the case with indexer which does not store it). + if eval.validate || eval.generate { + err := eval.checkMinBalance(cow) if err != nil { - return err - } - - // It's always OK to have the account move to an empty state, - // because the accounts DB can delete it. Otherwise, we will - // enforce MinBalance. - if data.IsZero() { - continue - } - - dataNew := data.WithUpdatedRewards(eval.proto, rewardlvl) - effectiveMinBalance := dataNew.MinBalance(&eval.proto) - if dataNew.MicroAlgos.Raw < effectiveMinBalance.Raw { - return fmt.Errorf("transaction %v: account %v balance %d below min %d (%d assets)", - txid, addr, dataNew.MicroAlgos.Raw, effectiveMinBalance.Raw, len(dataNew.Assets)) - } - - // Check if we have exceeded the maximum minimum balance - if eval.proto.MaximumMinimumBalance != 0 { - if effectiveMinBalance.Raw > eval.proto.MaximumMinimumBalance { - return fmt.Errorf("transaction %v: account %v would use too much space after this transaction. Minimum balance requirements would be %d (greater than max %d)", txid, addr, effectiveMinBalance.Raw, eval.proto.MaximumMinimumBalance) - } + return fmt.Errorf("transaction %v: %w", txid, err) } } From 1703bdcb0db504229546924af78cda896291cd08 Mon Sep 17 00:00:00 2001 From: algobarb <78746954+algobarb@users.noreply.github.com> Date: Wed, 19 May 2021 13:25:46 -0400 Subject: [PATCH 213/215] Fix updater script not restarting service or backing up binaries (#2144) A change in 2.5.4 broke the upgrade behavior. When a node is upgraded with the new scripts, then algod -v updates, but the node doesn't update properly when you check curl the /versions endpoint. --- cmd/updater/systemd-setup.sh | 2 +- cmd/updater/update.sh | 60 +++++++++++++----------------------- 2 files changed, 22 insertions(+), 40 deletions(-) diff --git a/cmd/updater/systemd-setup.sh b/cmd/updater/systemd-setup.sh index e0ff788623..bad8745137 100755 --- a/cmd/updater/systemd-setup.sh +++ b/cmd/updater/systemd-setup.sh @@ -17,7 +17,7 @@ setup_root() { systemctl daemon-reload } -if [ "$#" != 2 ]; then +if [ "$#" != 2 ] && [ "$#" != 3 ]; then echo "Usage: $0 username group [bindir]" exit 1 fi diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh index 23693f4a73..9bca96b873 100755 --- a/cmd/updater/update.sh +++ b/cmd/updater/update.sh @@ -367,17 +367,23 @@ function run_systemd_action() { local data_dir=$2 local process_owner + # If the service is system-level, check if it's root or sudo if check_service system "$data_dir"; then process_owner=$(awk '{ print $1 }' <(ps aux | grep "[a]lgod -d ${data_dir}")) - if $IS_ROOT || grep sudo <(groups "$process_owner" &> /dev/null); then + if $IS_ROOT; then if systemctl "$action" "algorand@$(systemd-escape "$data_dir")"; then echo "systemd system service: $action" return 0 fi + elif grep sudo <(groups "$process_owner" &> /dev/null); then + if sudo -n systemctl "$action" "algorand@$(systemd-escape "$data_dir")"; then + echo "sudo -n systemd system service: $action" + return 0 + fi fi - fi - if check_service user "$data_dir"; then + # If the service is user-level then run systemctl --user + elif check_service user "$data_dir"; then if systemctl --user "$action" "algorand@$(systemd-escape "${data_dir}")"; then echo "systemd user service: $action" return 0 @@ -387,37 +393,13 @@ function run_systemd_action() { return 1 } -function shutdown_node() { - echo Stopping node... - if [ "$(pgrep -x algod)" != "" ] || [ "$(pgrep -x kmd)" != "" ] ; then - if [ -f "${BINDIR}/goal" ]; then - for DD in "${DATADIRS[@]}"; do - if [ -f "${DD}/algod.pid" ] || [ -f "${DD}"/**/kmd.pid ] ; then - echo "Stopping node with data directory ${DD} and waiting..." - run_systemd_action stop "${DD}" - "${BINDIR}/goal" node stop -d "${DD}" - sleep 5 - else - echo "Node is running but not in ${DD} - not stopping" - # Clean up zombie (algod|kmd).net files - rm -f "${DD}/algod.net" "${DD}"/**/kmd.net - fi - done - fi - else - echo ... node not running - fi - - RESTART_NODE=1 -} - function backup_binaries() { echo Backing up current binary files... mkdir -p "${BINDIR}/backup" BACKUPFILES="algod kmd carpenter doberman goal update.sh updater diagcfg" # add node_exporter to the files list we're going to backup, but only we if had it previously deployed. [ -f "${BINDIR}/node_exporter" ] && BACKUPFILES="${BACKUPFILES} node_exporter" - tar -zcf "${BINDIR}/backup/bin-v${CURRENTVER}.tar.gz" -C "${BINDIR}" "${BACKUPFILES}" >/dev/null 2>&1 + tar -zcf "${BINDIR}/backup/bin-v${CURRENTVER}.tar.gz" -C "${BINDIR}" ${BACKUPFILES} >/dev/null 2>&1 } function backup_data() { @@ -427,12 +409,12 @@ function backup_data() { echo "Backing up current data files from ${CURDATADIR}..." mkdir -p "${BACKUPDIR}" BACKUPFILES="genesis.json wallet-genesis.id" - tar --no-recursion --exclude='*.log' --exclude='*.log.archive' --exclude='*.tar.gz' -zcf "${BACKUPDIR}/data-v${CURRENTVER}.tar.gz" -C "${CURDATADIR}" "${BACKUPFILES}" >/dev/null 2>&1 + tar --no-recursion --exclude='*.log' --exclude='*.log.archive' --exclude='*.tar.gz' -zcf "${BACKUPDIR}/data-v${CURRENTVER}.tar.gz" -C "${CURDATADIR}" ${BACKUPFILES} >/dev/null 2>&1 } function backup_current_version() { backup_binaries - for DD in "${DATADIRS[@]}"; do + for DD in ${DATADIRS[@]}; do backup_data "${DD}" done } @@ -562,16 +544,16 @@ function startup_node() { fi CURDATADIR=$1 - echo Starting node in ${CURDATADIR}... + echo Restarting node in ${CURDATADIR}... check_install_valid if [ $? -ne 0 ]; then fail_and_exit "Installation does not appear to be valid" fi - if ! run_systemd_action start "${CURDATADIR}"; then - echo "No systemd services, starting node with goal." - ${BINDIR}/goal node start -d "${CURDATADIR}" ${HOSTEDFLAG} + if ! run_systemd_action restart "${CURDATADIR}"; then + echo "No systemd services, restarting node with goal." + ${BINDIR}/goal node restart -d "${CURDATADIR}" ${HOSTEDFLAG} fi } @@ -611,7 +593,7 @@ function apply_fixups() { # Delete obsolete algorand binary - renamed to 'goal' rm "${BINDIR}/algorand" >/dev/null 2>&1 - for DD in "${DATADIRS[@]}"; do + for DD in ${DATADIRS[@]}; do clean_legacy_logs "${DD}" # Purge obsolete cadaver files (now agreement.cdv[.archive]) @@ -679,8 +661,8 @@ else determine_current_version fi -# Shutdown node before backing up so data is consistent and files aren't locked / in-use. -shutdown_node +# Any fail_and_exit beyond this point will run a restart +RESTART_NODE=1 if ! $DRYRUN; then if [ ${SKIP_UPDATE} -eq 0 ]; then @@ -695,7 +677,7 @@ if ! $DRYRUN; then fail_and_exit "Error installing new files" fi - for DD in "${DATADIRS[@]}"; do + for DD in ${DATADIRS[@]}; do if ! install_new_data "${DD}"; then fail_and_exit "Error installing data files into ${DD}" fi @@ -703,7 +685,7 @@ if ! $DRYRUN; then copy_genesis_files - for DD in "${DATADIRS[@]}"; do + for DD in ${DATADIRS[@]}; do if ! check_for_new_ledger "${DD}"; then fail_and_exit "Error updating ledger in ${DD}" fi From 1e1474216421da27008726c44ebe0a5ba2fb6a08 Mon Sep 17 00:00:00 2001 From: algobarb <78746954+algobarb@users.noreply.github.com> Date: Wed, 19 May 2021 18:00:19 -0400 Subject: [PATCH 214/215] fix the systemctl conditional so it restarts with systemd (#2165) --- cmd/updater/update.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh index 9bca96b873..136ce67d51 100755 --- a/cmd/updater/update.sh +++ b/cmd/updater/update.sh @@ -375,7 +375,7 @@ function run_systemd_action() { echo "systemd system service: $action" return 0 fi - elif grep sudo <(groups "$process_owner" &> /dev/null); then + elif grep sudo <(groups "$process_owner") &> /dev/null; then if sudo -n systemctl "$action" "algorand@$(systemd-escape "$data_dir")"; then echo "sudo -n systemd system service: $action" return 0 From 63a895fb6688413066f0cc92e2439a98365f8b34 Mon Sep 17 00:00:00 2001 From: DevOps Service Date: Wed, 19 May 2021 22:07:10 +0000 Subject: [PATCH 215/215] Update the Version, BuildNumber, genesistimestamp.data --- buildnumber.dat | 1 + config/version.go | 2 +- genesistimestamp.dat | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 buildnumber.dat create mode 100644 genesistimestamp.dat diff --git a/buildnumber.dat b/buildnumber.dat new file mode 100644 index 0000000000..573541ac97 --- /dev/null +++ b/buildnumber.dat @@ -0,0 +1 @@ +0 diff --git a/config/version.go b/config/version.go index 57209d55a2..85478a9281 100644 --- a/config/version.go +++ b/config/version.go @@ -33,7 +33,7 @@ const VersionMajor = 2 // VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced. // Not enforced until after initial public release (x > 0). -const VersionMinor = 5 +const VersionMinor = 6 // Version is the type holding our full version information. type Version struct { diff --git a/genesistimestamp.dat b/genesistimestamp.dat new file mode 100644 index 0000000000..c72c6a7795 --- /dev/null +++ b/genesistimestamp.dat @@ -0,0 +1 @@ +1558657885