From 3a92a1fafb50b00445aab5a04f5f23b501111db4 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Sun, 2 May 2021 12:48:26 +0300 Subject: [PATCH] Delete blockchain/v2 until it stabilizes --- blockchain/doc.go | 1 + blockchain/v2/io.go | 141 -- blockchain/v2/metrics.go | 125 -- blockchain/v2/processor.go | 192 --- blockchain/v2/processor_context.go | 100 -- blockchain/v2/processor_test.go | 306 ---- blockchain/v2/reactor.go | 579 ------- blockchain/v2/reactor_test.go | 571 ------- blockchain/v2/routine.go | 166 -- blockchain/v2/routine_test.go | 163 -- blockchain/v2/scheduler.go | 712 --------- blockchain/v2/scheduler_test.go | 2254 ---------------------------- blockchain/v2/types.go | 65 - config/config.go | 6 +- config/config_test.go | 2 +- config/toml.go | 1 - node/node.go | 9 +- test/e2e/pkg/manifest.go | 2 +- test/e2e/pkg/testnet.go | 2 +- 19 files changed, 11 insertions(+), 5386 deletions(-) delete mode 100644 blockchain/v2/io.go delete mode 100644 blockchain/v2/metrics.go delete mode 100644 blockchain/v2/processor.go delete mode 100644 blockchain/v2/processor_context.go delete mode 100644 blockchain/v2/processor_test.go delete mode 100644 blockchain/v2/reactor.go delete mode 100644 blockchain/v2/reactor_test.go delete mode 100644 blockchain/v2/routine.go delete mode 100644 blockchain/v2/routine_test.go delete mode 100644 blockchain/v2/scheduler.go delete mode 100644 blockchain/v2/scheduler_test.go delete mode 100644 blockchain/v2/types.go diff --git a/blockchain/doc.go b/blockchain/doc.go index b5d9858d0a..7eac046957 100644 --- a/blockchain/doc.go +++ b/blockchain/doc.go @@ -4,6 +4,7 @@ Package blockchain provides two implementations of the fast-sync protocol. - v0 was the very first implementation. it's battle tested, but does not have a lot of test coverage. - v2 is the newest implementation, with a focus on testability and readability. +Note: we deleted v2 in LL until it stabilizes / gets simplified. Check out ADR-40 for the formal model and requirements. diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go deleted file mode 100644 index 2251ca0a3d..0000000000 --- a/blockchain/v2/io.go +++ /dev/null @@ -1,141 +0,0 @@ -package v2 - -import ( - "errors" - - bc "github.com/lazyledger/lazyledger-core/blockchain" - "github.com/lazyledger/lazyledger-core/p2p" - bcproto "github.com/lazyledger/lazyledger-core/proto/tendermint/blockchain" - "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/types" -) - -var ( - errPeerQueueFull = errors.New("peer queue full") -) - -type iIO interface { - sendBlockRequest(peer p2p.Peer, height int64) error - sendBlockToPeer(block *types.Block, peer p2p.Peer) error - sendBlockNotFound(height int64, peer p2p.Peer) error - sendStatusResponse(base, height int64, peer p2p.Peer) error - - sendStatusRequest(peer p2p.Peer) error - broadcastStatusRequest() error - - trySwitchToConsensus(state state.State, skipWAL bool) bool -} - -type switchIO struct { - sw *p2p.Switch -} - -func newSwitchIo(sw *p2p.Switch) *switchIO { - return &switchIO{ - sw: sw, - } -} - -const ( - // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) - BlockchainChannel = byte(0x40) -) - -type consensusReactor interface { - // for when we switch from blockchain reactor and fast sync to - // the consensus machine - SwitchToConsensus(state state.State, skipWAL bool) -} - -func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error { - msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height}) - if err != nil { - return err - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { - return errPeerQueueFull - } - return nil -} - -func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base}) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { - if block == nil { - panic("trying to send nil block") - } - - bpb, err := block.ToProto() - if err != nil { - return err - } - - msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb}) - if err != nil { - return err - } - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error { - msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height}) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool { - conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) - if ok { - conR.SwitchToConsensus(state, skipWAL) - } - return ok -} - -func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) broadcastStatusRequest() error { - msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{}) - if err != nil { - return err - } - - // XXX: maybe we should use an io specific peer list here - sio.sw.Broadcast(BlockchainChannel, msgBytes) - - return nil -} diff --git a/blockchain/v2/metrics.go b/blockchain/v2/metrics.go deleted file mode 100644 index c68ec64476..0000000000 --- a/blockchain/v2/metrics.go +++ /dev/null @@ -1,125 +0,0 @@ -package v2 - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. - MetricsSubsystem = "blockchain" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // events_in - EventsIn metrics.Counter - // events_in - EventsHandled metrics.Counter - // events_out - EventsOut metrics.Counter - // errors_in - ErrorsIn metrics.Counter - // errors_handled - ErrorsHandled metrics.Counter - // errors_out - ErrorsOut metrics.Counter - // events_shed - EventsShed metrics.Counter - // events_sent - EventsSent metrics.Counter - // errors_sent - ErrorsSent metrics.Counter - // errors_shed - ErrorsShed metrics.Counter -} - -// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines. -// Can we burn in the routine name here? -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_in", - Help: "Events read from the channel.", - }, labels).With(labelsAndValues...), - EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_handled", - Help: "Events handled", - }, labels).With(labelsAndValues...), - EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_out", - Help: "Events output from routine.", - }, labels).With(labelsAndValues...), - ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_in", - Help: "Errors read from the channel.", - }, labels).With(labelsAndValues...), - ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_handled", - Help: "Errors handled.", - }, labels).With(labelsAndValues...), - ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_out", - Help: "Errors output from routine.", - }, labels).With(labelsAndValues...), - ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_sent", - Help: "Errors sent to routine.", - }, labels).With(labelsAndValues...), - ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_shed", - Help: "Errors dropped from sending.", - }, labels).With(labelsAndValues...), - EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_sent", - Help: "Events sent to routine.", - }, labels).With(labelsAndValues...), - EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_shed", - Help: "Events dropped from sending.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - EventsIn: discard.NewCounter(), - EventsHandled: discard.NewCounter(), - EventsOut: discard.NewCounter(), - ErrorsIn: discard.NewCounter(), - ErrorsHandled: discard.NewCounter(), - ErrorsOut: discard.NewCounter(), - EventsShed: discard.NewCounter(), - EventsSent: discard.NewCounter(), - ErrorsSent: discard.NewCounter(), - ErrorsShed: discard.NewCounter(), - } -} diff --git a/blockchain/v2/processor.go b/blockchain/v2/processor.go deleted file mode 100644 index 0e45c70dbd..0000000000 --- a/blockchain/v2/processor.go +++ /dev/null @@ -1,192 +0,0 @@ -package v2 - -import ( - "fmt" - - "github.com/lazyledger/lazyledger-core/p2p" - tmState "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/types" -) - -// Events generated by the processor: -// block execution failure, event will indicate the peer(s) that caused the error -type pcBlockVerificationFailure struct { - priorityNormal - height int64 - firstPeerID p2p.ID - secondPeerID p2p.ID -} - -func (e pcBlockVerificationFailure) String() string { - return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", - e.height, e.firstPeerID, e.secondPeerID) -} - -// successful block execution -type pcBlockProcessed struct { - priorityNormal - height int64 - peerID p2p.ID -} - -func (e pcBlockProcessed) String() string { - return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) -} - -// processor has finished -type pcFinished struct { - priorityNormal - blocksSynced int - tmState tmState.State -} - -func (p pcFinished) Error() string { - return "finished" -} - -type queueItem struct { - block *types.Block - peerID p2p.ID -} - -type blockQueue map[int64]queueItem - -type pcState struct { - // blocks waiting to be processed - queue blockQueue - - // draining indicates that the next rProcessBlock event with a queue miss constitutes completion - draining bool - - // the number of blocks successfully synced by the processor - blocksSynced int - - // the processorContext which contains the processor dependencies - context processorContext -} - -func (state *pcState) String() string { - return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d", - state.height(), len(state.queue), state.draining, state.blocksSynced) -} - -// newPcState returns a pcState initialized with the last verified block enqueued -func newPcState(context processorContext) *pcState { - return &pcState{ - queue: blockQueue{}, - draining: false, - blocksSynced: 0, - context: context, - } -} - -// nextTwo returns the next two unverified blocks -func (state *pcState) nextTwo() (queueItem, queueItem, error) { - if first, ok := state.queue[state.height()+1]; ok { - if second, ok := state.queue[state.height()+2]; ok { - return first, second, nil - } - } - return queueItem{}, queueItem{}, fmt.Errorf("not found") -} - -// synced returns true when at most the last verified block remains in the queue -func (state *pcState) synced() bool { - return len(state.queue) <= 1 -} - -func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) { - if item, ok := state.queue[height]; ok { - panic(fmt.Sprintf( - "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", - height, block.Hash(), peerID, item.block.Hash(), item.peerID)) - } - - state.queue[height] = queueItem{block: block, peerID: peerID} -} - -func (state *pcState) height() int64 { - return state.context.tmState().LastBlockHeight -} - -// purgePeer moves all unprocessed blocks from the queue -func (state *pcState) purgePeer(peerID p2p.ID) { - // what if height is less than state.height? - for height, item := range state.queue { - if item.peerID == peerID { - delete(state.queue, height) - } - } -} - -// handle processes FSM events -func (state *pcState) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - state.context.setState(event.state) - return noOp, nil - - case scFinishedEv: - if state.synced() { - return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil - } - state.draining = true - return noOp, nil - - case scPeerError: - state.purgePeer(event.peerID) - return noOp, nil - - case scBlockReceived: - if event.block == nil { - return noOp, nil - } - - // enqueue block if height is higher than state height, else ignore it - if event.block.Height > state.height() { - state.enqueue(event.peerID, event.block, event.block.Height) - } - return noOp, nil - - case rProcessBlock: - tmState := state.context.tmState() - firstItem, secondItem, err := state.nextTwo() - if err != nil { - if state.draining { - return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil - } - return noOp, nil - } - - var ( - first, second = firstItem.block, secondItem.block - firstParts = first.MakePartSet(types.BlockPartSizeBytes) - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} - ) - - // verify if +second+ last commit "confirms" +first+ block - err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit) - if err != nil { - state.purgePeer(firstItem.peerID) - if firstItem.peerID != secondItem.peerID { - state.purgePeer(secondItem.peerID) - } - return pcBlockVerificationFailure{ - height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, - nil - } - - state.context.saveBlock(first, firstParts, second.LastCommit) - - if err := state.context.applyBlock(firstID, first); err != nil { - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } - - delete(state.queue, first.Height) - state.blocksSynced++ - - return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } - - return noOp, nil -} diff --git a/blockchain/v2/processor_context.go b/blockchain/v2/processor_context.go deleted file mode 100644 index 6ddef455ec..0000000000 --- a/blockchain/v2/processor_context.go +++ /dev/null @@ -1,100 +0,0 @@ -package v2 - -import ( - "fmt" - - "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/types" -) - -type processorContext interface { - applyBlock(blockID types.BlockID, block *types.Block) error - verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error - saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) - tmState() state.State - setState(state.State) -} - -type pContext struct { - store blockStore - applier blockApplier - state state.State -} - -func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext { - return &pContext{ - store: st, - applier: ex, - state: s, - } -} - -func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block) - pc.state = newState - return err -} - -func (pc pContext) tmState() state.State { - return pc.state -} - -func (pc *pContext) setState(state state.State) { - pc.state = state -} - -func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit) -} - -func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - pc.store.SaveBlock(block, blockParts, seenCommit) -} - -type mockPContext struct { - applicationBL []int64 - verificationBL []int64 - state state.State -} - -func newMockProcessorContext( - state state.State, - verificationBlackList []int64, - applicationBlackList []int64) *mockPContext { - return &mockPContext{ - applicationBL: applicationBlackList, - verificationBL: verificationBlackList, - state: state, - } -} - -func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error { - for _, h := range mpc.applicationBL { - if h == block.Height { - return fmt.Errorf("generic application error") - } - } - mpc.state.LastBlockHeight = block.Height - return nil -} - -func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - for _, h := range mpc.verificationBL { - if h == height { - return fmt.Errorf("generic verification error") - } - } - return nil -} - -func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - -} - -func (mpc *mockPContext) setState(state state.State) { - mpc.state = state -} - -func (mpc *mockPContext) tmState() state.State { - return mpc.state -} diff --git a/blockchain/v2/processor_test.go b/blockchain/v2/processor_test.go deleted file mode 100644 index c7687cbccd..0000000000 --- a/blockchain/v2/processor_test.go +++ /dev/null @@ -1,306 +0,0 @@ -package v2 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/lazyledger/lazyledger-core/p2p" - tmState "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/types" -) - -// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability. -type pcBlock struct { - pid string - height int64 -} - -// params is a test structure used to create processor state. -type params struct { - height int64 - items []pcBlock - blocksSynced int - verBL []int64 - appBL []int64 - draining bool -} - -// makePcBlock makes an empty block. -func makePcBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// makeState takes test parameters and creates a specific processor state. -func makeState(p *params) *pcState { - var ( - tmState = tmState.State{LastBlockHeight: p.height} - context = newMockProcessorContext(tmState, p.verBL, p.appBL) - ) - state := newPcState(context) - - for _, item := range p.items { - state.enqueue(p2p.ID(item.pid), makePcBlock(item.height), item.height) - } - - state.blocksSynced = p.blocksSynced - state.draining = p.draining - return state -} - -func mBlockResponse(peerID p2p.ID, height int64) scBlockReceived { - return scBlockReceived{ - peerID: peerID, - block: makePcBlock(height), - } -} - -type pcFsmMakeStateValues struct { - currentState *params - event Event - wantState *params - wantNextEvent Event - wantErr error - wantPanic bool -} - -type testFields struct { - name string - steps []pcFsmMakeStateValues -} - -func executeProcessorTests(t *testing.T, tests []testFields) { - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var state *pcState - for _, step := range tt.steps { - defer func() { - r := recover() - if (r != nil) != step.wantPanic { - t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic) - } - }() - - // First step must always initialise the currentState as state. - if step.currentState != nil { - state = makeState(step.currentState) - } - if state == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := state.handle(step.event) - t.Log(state) - assert.Equal(t, step.wantErr, err) - assert.Equal(t, makeState(step.wantState), state) - assert.Equal(t, step.wantNextEvent, nextEvent) - // Next step may use the wantedState as their currentState. - state = makeState(step.wantState) - } - }) - } -} - -func TestRProcessPeerError(t *testing.T) { - tests := []testFields{ - { - name: "error for existing peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P2"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, - wantNextEvent: noOp, - }, - }, - }, - { - name: "error for unknown peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P3"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestPcBlockResponse(t *testing.T) { - tests := []testFields{ - { - name: "add one block", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 1), - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp, - }, - }, - }, - - { - name: "add two blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 3), - wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp, - }, - { // use previous wantState as currentState, - event: mBlockResponse("P1", 4), - wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockSuccess(t *testing.T) { - tests := []testFields{ - { - name: "noop - no blocks over current height", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: rProcessBlock{}, - wantState: ¶ms{}, wantNextEvent: noOp, - }, - }, - }, - { - name: "noop - high new blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp, - }, - }, - }, - { - name: "blocks H+1 and H+2 present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present after draining", - steps: []pcFsmMakeStateValues{ - { // some contiguous blocks - on stop check draining is set - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}}, - event: scFinishedEv{}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true}, - wantNextEvent: noOp, - }, - { - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - { // finish when H+1 or/and H+2 are missing - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1}, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockFailures(t *testing.T) { - tests := []testFields{ - { - name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}}, - verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}}, - event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestScFinishedEv(t *testing.T) { - tests := []testFields{ - { - name: "no blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "maxHeight+1 block present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "more blocks present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true}, - wantNextEvent: noOp, - wantErr: nil, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go deleted file mode 100644 index 9b5115d419..0000000000 --- a/blockchain/v2/reactor.go +++ /dev/null @@ -1,579 +0,0 @@ -package v2 - -import ( - "errors" - "fmt" - "time" - - "github.com/lazyledger/lazyledger-core/behaviour" - bc "github.com/lazyledger/lazyledger-core/blockchain" - "github.com/lazyledger/lazyledger-core/libs/log" - tmsync "github.com/lazyledger/lazyledger-core/libs/sync" - "github.com/lazyledger/lazyledger-core/p2p" - bcproto "github.com/lazyledger/lazyledger-core/proto/tendermint/blockchain" - "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/types" -) - -const ( - // chBufferSize is the buffer size of all event channels. - chBufferSize int = 1000 -) - -type blockStore interface { - LoadBlock(height int64) *types.Block - SaveBlock(*types.Block, *types.PartSet, *types.Commit) - Base() int64 - Height() int64 -} - -// BlockchainReactor handles fast sync protocol. -type BlockchainReactor struct { - p2p.BaseReactor - - fastSync bool // if true, enable fast sync on start - stateSynced bool // set to true when SwitchToFastSync is called by state sync - scheduler *Routine - processor *Routine - logger log.Logger - - mtx tmsync.RWMutex - maxPeerHeight int64 - syncHeight int64 - events chan Event // non-nil during a fast sync - - reporter behaviour.Reporter - io iIO - store blockStore -} - -type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) -} - -// XXX: unify naming in this package around tmState -func newReactor(state state.State, store blockStore, reporter behaviour.Reporter, - blockApplier blockApplier, fastSync bool) *BlockchainReactor { - initHeight := state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = state.InitialHeight - } - scheduler := newScheduler(initHeight, time.Now()) - pContext := newProcessorContext(store, blockApplier, state) - // TODO: Fix naming to just newProcesssor - // newPcState requires a processorContext - processor := newPcState(pContext) - - return &BlockchainReactor{ - scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), - processor: newRoutine("processor", processor.handle, chBufferSize), - store: store, - reporter: reporter, - logger: log.NewNopLogger(), - fastSync: fastSync, - } -} - -// NewBlockchainReactor creates a new reactor instance. -func NewBlockchainReactor( - state state.State, - blockApplier blockApplier, - store blockStore, - fastSync bool) *BlockchainReactor { - reporter := behaviour.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, fastSync) -} - -// SetSwitch implements Reactor interface. -func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { - r.Switch = sw - if sw != nil { - r.io = newSwitchIo(sw) - } else { - r.io = nil - } -} - -func (r *BlockchainReactor) setMaxPeerHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - if height > r.maxPeerHeight { - r.maxPeerHeight = height - } -} - -func (r *BlockchainReactor) setSyncHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.syncHeight = height -} - -// SyncHeight returns the height to which the BlockchainReactor has synced. -func (r *BlockchainReactor) SyncHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.syncHeight -} - -// SetLogger sets the logger of the reactor. -func (r *BlockchainReactor) SetLogger(logger log.Logger) { - r.logger = logger - r.scheduler.setLogger(logger) - r.processor.setLogger(logger) -} - -// Start implements cmn.Service interface -func (r *BlockchainReactor) Start() error { - r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch) - if r.fastSync { - err := r.startSync(nil) - if err != nil { - return fmt.Errorf("failed to start fast sync: %w", err) - } - } - return nil -} - -// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil, -// the scheduler and processor is updated with this state on startup. -func (r *BlockchainReactor) startSync(state *state.State) error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - return errors.New("fast sync already in progress") - } - r.events = make(chan Event, chBufferSize) - go r.scheduler.start() - go r.processor.start() - if state != nil { - <-r.scheduler.ready() - <-r.processor.ready() - r.scheduler.send(bcResetState{state: *state}) - r.processor.send(bcResetState{state: *state}) - } - go r.demux(r.events) - return nil -} - -// endSync ends a fast sync -func (r *BlockchainReactor) endSync() { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - close(r.events) - } - r.events = nil - r.scheduler.stop() - r.processor.stop() -} - -// SwitchToFastSync is called by the state sync reactor when switching to fast sync. -func (r *BlockchainReactor) SwitchToFastSync(state state.State) error { - r.stateSynced = true - state = state.Copy() - return r.startSync(&state) -} - -// reactor generated ticker events: -// ticker for cleaning peers -type rTryPrunePeer struct { - priorityHigh - time time.Time -} - -func (e rTryPrunePeer) String() string { - return fmt.Sprintf("rTryPrunePeer{%v}", e.time) -} - -// ticker event for scheduling block requests -type rTrySchedule struct { - priorityHigh - time time.Time -} - -func (e rTrySchedule) String() string { - return fmt.Sprintf("rTrySchedule{%v}", e.time) -} - -// ticker for block processing -type rProcessBlock struct { - priorityNormal -} - -func (e rProcessBlock) String() string { - return "rProcessBlock" -} - -// reactor generated events based on blockchain related messages from peers: -// blockResponse message received from a peer -type bcBlockResponse struct { - priorityNormal - time time.Time - peerID p2p.ID - size int64 - block *types.Block -} - -func (resp bcBlockResponse) String() string { - return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", - resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) -} - -// blockNoResponse message received from a peer -type bcNoBlockResponse struct { - priorityNormal - time time.Time - peerID p2p.ID - height int64 -} - -func (resp bcNoBlockResponse) String() string { - return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", - resp.peerID, resp.height, resp.time) -} - -// statusResponse message received from a peer -type bcStatusResponse struct { - priorityNormal - time time.Time - peerID p2p.ID - base int64 - height int64 -} - -func (resp bcStatusResponse) String() string { - return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", - resp.peerID, resp.height, resp.base, resp.time) -} - -// new peer is connected -type bcAddNewPeer struct { - priorityNormal - peerID p2p.ID -} - -func (resp bcAddNewPeer) String() string { - return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) -} - -// existing peer is removed -type bcRemovePeer struct { - priorityHigh - peerID p2p.ID - reason interface{} -} - -func (resp bcRemovePeer) String() string { - return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) -} - -// resets the scheduler and processor state, e.g. following a switch from state syncing -type bcResetState struct { - priorityHigh - state state.State -} - -func (e bcResetState) String() string { - return fmt.Sprintf("bcResetState{%v}", e.state) -} - -// Takes the channel as a parameter to avoid race conditions on r.events. -func (r *BlockchainReactor) demux(events <-chan Event) { - var lastRate = 0.0 - var lastHundred = time.Now() - - var ( - processBlockFreq = 20 * time.Millisecond - doProcessBlockCh = make(chan struct{}, 1) - doProcessBlockTk = time.NewTicker(processBlockFreq) - ) - defer doProcessBlockTk.Stop() - - var ( - prunePeerFreq = 1 * time.Second - doPrunePeerCh = make(chan struct{}, 1) - doPrunePeerTk = time.NewTicker(prunePeerFreq) - ) - defer doPrunePeerTk.Stop() - - var ( - scheduleFreq = 20 * time.Millisecond - doScheduleCh = make(chan struct{}, 1) - doScheduleTk = time.NewTicker(scheduleFreq) - ) - defer doScheduleTk.Stop() - - var ( - statusFreq = 10 * time.Second - doStatusCh = make(chan struct{}, 1) - doStatusTk = time.NewTicker(statusFreq) - ) - defer doStatusTk.Stop() - doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers - - // Memoize the scSchedulerFail error to avoid printing it every scheduleFreq. - var scSchedulerFailErr error - - // XXX: Extract timers to make testing atemporal - for { - select { - // Pacers: send at most per frequency but don't saturate - case <-doProcessBlockTk.C: - select { - case doProcessBlockCh <- struct{}{}: - default: - } - case <-doPrunePeerTk.C: - select { - case doPrunePeerCh <- struct{}{}: - default: - } - case <-doScheduleTk.C: - select { - case doScheduleCh <- struct{}{}: - default: - } - case <-doStatusTk.C: - select { - case doStatusCh <- struct{}{}: - default: - } - - // Tickers: perform tasks periodically - case <-doScheduleCh: - r.scheduler.send(rTrySchedule{time: time.Now()}) - case <-doPrunePeerCh: - r.scheduler.send(rTryPrunePeer{time: time.Now()}) - case <-doProcessBlockCh: - r.processor.send(rProcessBlock{}) - case <-doStatusCh: - if err := r.io.broadcastStatusRequest(); err != nil { - r.logger.Error("Error broadcasting status request", "err", err) - } - - // Events from peers. Closing the channel signals event loop termination. - case event, ok := <-events: - if !ok { - r.logger.Info("Stopping event processing") - return - } - switch event := event.(type) { - case bcStatusResponse: - r.setMaxPeerHeight(event.height) - r.scheduler.send(event) - case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: - r.scheduler.send(event) - default: - r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from scheduler - case event := <-r.scheduler.next(): - switch event := event.(type) { - case scBlockReceived: - r.processor.send(event) - case scPeerError: - r.processor.send(event) - if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil { - r.logger.Error("Error reporting peer", "err", err) - } - case scBlockRequest: - peer := r.Switch.Peers().Get(event.peerID) - if peer == nil { - r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID) - continue - } - if err := r.io.sendBlockRequest(peer, event.height); err != nil { - r.logger.Error("Error sending block request", "err", err) - } - case scFinishedEv: - r.processor.send(event) - r.scheduler.stop() - case scSchedulerFail: - if scSchedulerFailErr != event.reason { - r.logger.Error("Scheduler failure", "err", event.reason.Error()) - scSchedulerFailErr = event.reason - } - case scPeersPruned: - // Remove peers from the processor. - for _, peerID := range event.peers { - r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) - } - r.logger.Debug("Pruned peers", "count", len(event.peers)) - case noOpEvent: - default: - r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from processor - case event := <-r.processor.next(): - switch event := event.(type) { - case pcBlockProcessed: - r.setSyncHeight(event.height) - if r.syncHeight%100 == 0 { - lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - r.logger.Info("Fast Sync Rate", "height", r.syncHeight, - "max_peer_height", r.maxPeerHeight, "blocks/s", lastRate) - lastHundred = time.Now() - } - r.scheduler.send(event) - case pcBlockVerificationFailure: - r.scheduler.send(event) - case pcFinished: - r.logger.Info("Fast sync complete, switching to consensus") - if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { - r.logger.Error("Failed to switch to consensus reactor") - } - r.endSync() - return - case noOpEvent: - default: - r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) - } - - // Terminal event from scheduler - case err := <-r.scheduler.final(): - switch err { - case nil: - r.logger.Info("Scheduler stopped") - default: - r.logger.Error("Scheduler aborted with error", "err", err) - } - - // Terminal event from processor - case err := <-r.processor.final(): - switch err { - case nil: - r.logger.Info("Processor stopped") - default: - r.logger.Error("Processor aborted with error", "err", err) - } - } - } -} - -// Stop implements cmn.Service interface. -func (r *BlockchainReactor) Stop() error { - r.logger.Info("reactor stopping") - r.endSync() - r.logger.Info("reactor stopped") - return nil -} - -// Receive implements Reactor by handling different message types. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - logger := r.logger.With("src", src.ID(), "chID", chID) - - msg, err := bc.DecodeMsg(msgBytes) - if err != nil { - logger.Error("error decoding message", "err", err) - _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) - return - } - - if err = bc.ValidateMsg(msg); err != nil { - logger.Error("peer sent us invalid msg", "msg", msg, "err", err) - _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) - return - } - - r.logger.Debug("Receive", "msg", msg) - - switch msg := msg.(type) { - case *bcproto.StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil { - logger.Error("Could not send status message to src peer") - } - - case *bcproto.BlockRequest: - block := r.store.LoadBlock(msg.Height) - if block != nil { - if err = r.io.sendBlockToPeer(block, src); err != nil { - logger.Error("Could not send block message to src peer", "err", err) - } - } else { - logger.Info("peer asking for a block we don't have", "height", msg.Height) - if err = r.io.sendBlockNotFound(msg.Height, src); err != nil { - logger.Error("Couldn't send block not found msg", "err", err) - } - } - - case *bcproto.StatusResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} - } - r.mtx.RUnlock() - - case *bcproto.BlockResponse: - r.mtx.RLock() - bi, err := types.BlockFromProto(msg.Block) - if err != nil { - logger.Error("error transitioning block from protobuf", "err", err) - _ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error())) - return - } - if r.events != nil { - r.events <- bcBlockResponse{ - peerID: src.ID(), - block: bi, - size: int64(len(msgBytes)), - time: time.Now(), - } - } - r.mtx.RUnlock() - - case *bcproto.NoBlockResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()} - } - r.mtx.RUnlock() - } -} - -// AddPeer implements Reactor interface -func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer) - if err != nil { - r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err) - } - - err = r.io.sendStatusRequest(peer) - if err != nil { - r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcAddNewPeer{peerID: peer.ID()} - } -} - -// RemovePeer implements Reactor interface. -func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcRemovePeer{ - peerID: peer.ID(), - reason: reason, - } - } -} - -// GetChannels implements Reactor -func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlockchainChannel, - Priority: 10, - SendQueueCapacity: 2000, - RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: bc.MaxMsgSize, - }, - } -} diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go deleted file mode 100644 index 187b38b9d4..0000000000 --- a/blockchain/v2/reactor_test.go +++ /dev/null @@ -1,571 +0,0 @@ -package v2 - -import ( - "crypto/sha256" - "fmt" - "net" - "os" - "sort" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/lazyledger/lazyledger-core/abci/types" - "github.com/lazyledger/lazyledger-core/behaviour" - bc "github.com/lazyledger/lazyledger-core/blockchain" - cfg "github.com/lazyledger/lazyledger-core/config" - "github.com/lazyledger/lazyledger-core/libs/db/memdb" - "github.com/lazyledger/lazyledger-core/libs/log" - "github.com/lazyledger/lazyledger-core/libs/service" - "github.com/lazyledger/lazyledger-core/mempool/mock" - "github.com/lazyledger/lazyledger-core/p2p" - "github.com/lazyledger/lazyledger-core/p2p/conn" - bcproto "github.com/lazyledger/lazyledger-core/proto/tendermint/blockchain" - "github.com/lazyledger/lazyledger-core/proxy" - sm "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/store" - "github.com/lazyledger/lazyledger-core/types" - tmtime "github.com/lazyledger/lazyledger-core/types/time" -) - -type mockPeer struct { - service.Service - id p2p.ID -} - -func (mp mockPeer) FlushStop() {} -func (mp mockPeer) ID() p2p.ID { return mp.id } -func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } -func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } - -func (mp mockPeer) IsOutbound() bool { return true } -func (mp mockPeer) IsPersistent() bool { return true } -func (mp mockPeer) CloseConn() error { return nil } - -func (mp mockPeer) NodeInfo() p2p.NodeInfo { - return p2p.DefaultNodeInfo{ - DefaultNodeID: "", - ListenAddr: "", - } -} -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } - -func (mp mockPeer) Send(byte, []byte) bool { return true } -func (mp mockPeer) TrySend(byte, []byte) bool { return true } - -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return struct{}{} } - -//nolint:unused -type mockBlockStore struct { - blocks map[int64]*types.Block -} - -//nolint:unused -func (ml *mockBlockStore) Height() int64 { - return int64(len(ml.blocks)) -} - -//nolint:unused -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { - return ml.blocks[height] -} - -//nolint:unused -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { - ml.blocks[block.Height] = block -} - -type mockBlockApplier struct { -} - -// XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock( - state sm.State, blockID types.BlockID, block *types.Block, -) (sm.State, int64, error) { - state.LastBlockHeight++ - return state, 0, nil -} - -type mockSwitchIo struct { - mtx sync.Mutex - switchedToConsensus bool - numStatusResponse int - numBlockResponse int - numNoBlockResponse int - numStatusRequest int -} - -var _ iIO = (*mockSwitchIo)(nil) - -func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error { - return nil -} - -func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numNoBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.switchedToConsensus = true - return true -} - -func (sio *mockSwitchIo) broadcastStatusRequest() error { - return nil -} - -func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusRequest++ - return nil -} - -type testReactorParams struct { - logger log.Logger - genDoc *types.GenesisDoc - privVals []types.PrivValidator - startHeight int64 - mockA bool -} - -func newTestReactor(p testReactorParams) *BlockchainReactor { - store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight) - reporter := behaviour.NewMockReporter() - - var appl blockApplier - - if p.mockA { - appl = &mockBlockApplier{} - } else { - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - db := memdb.NewDB() - stateStore := sm.NewStore(db) - appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}) - if err = stateStore.Save(state); err != nil { - panic(err) - } - } - - r := newReactor(state, store, reporter, appl, true) - logger := log.TestingLogger() - r.SetLogger(logger.With("module", "blockchain")) - - return r -} - -// This test is left here and not deleted to retain the termination cases for -// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). -// func TestReactorTerminationScenarios(t *testing.T) { - -// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") -// defer os.RemoveAll(config.RootDir) -// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) -// refStore, _, _ := newReactorStore(genDoc, privVals, 20) - -// params := testReactorParams{ -// logger: log.TestingLogger(), -// genDoc: genDoc, -// privVals: privVals, -// startHeight: 10, -// bufferSize: 100, -// mockA: true, -// } - -// type testEvent struct { -// evType string -// peer string -// height int64 -// } - -// tests := []struct { -// name string -// params testReactorParams -// msgs []testEvent -// }{ -// { -// name: "simple termination on max peer height - one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 12}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "simple termination on max peer height - two peers", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, noBlock error", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveNB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, remove one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "RemovePeer", peer: "P1"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// } - -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// reactor := newTestReactor(params) -// reactor.Start() -// reactor.reporter = behaviour.NewMockReporter() -// mockSwitch := &mockSwitchIo{switchedToConsensus: false} -// reactor.io = mockSwitch -// // time for go routines to start -// time.Sleep(time.Millisecond) - -// for _, step := range tt.msgs { -// switch step.evType { -// case "AddPeer": -// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) -// case "RemovePeer": -// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) -// case "ReceiveS": -// reactor.scheduler.send(bcStatusResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "ReceiveB": -// reactor.scheduler.send(bcBlockResponse{ -// peerID: p2p.ID(step.peer), -// block: refStore.LoadBlock(step.height), -// size: 10, -// time: time.Now(), -// }) -// case "ReceiveNB": -// reactor.scheduler.send(bcNoBlockResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "BlockReq": -// reactor.scheduler.send(rTrySchedule{time: time.Now()}) -// case "Process": -// reactor.processor.send(rProcessBlock{}) -// } -// // give time for messages to propagate between routines -// time.Sleep(time.Millisecond) -// } - -// // time for processor to finish and reactor to switch to consensus -// time.Sleep(20 * time.Millisecond) -// assert.True(t, mockSwitch.hasSwitchedToConsensus()) -// reactor.Stop() -// }) -// } -// } - -func TestReactorHelperMode(t *testing.T) { - var ( - channelID = byte(0x40) - ) - - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) - - params := testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - startHeight: 20, - mockA: true, - } - - type testEvent struct { - peer string - event interface{} - } - - tests := []struct { - name string - params testReactorParams - msgs []testEvent - }{ - { - name: "status request", - params: params, - msgs: []testEvent{ - {"P1", bcproto.StatusRequest{}}, - {"P1", bcproto.BlockRequest{Height: 13}}, - {"P1", bcproto.BlockRequest{Height: 20}}, - {"P1", bcproto.BlockRequest{Height: 22}}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - reactor := newTestReactor(params) - mockSwitch := &mockSwitchIo{switchedToConsensus: false} - reactor.io = mockSwitch - err := reactor.Start() - require.NoError(t, err) - - for i := 0; i < len(tt.msgs); i++ { - step := tt.msgs[i] - switch ev := step.event.(type) { - case bcproto.StatusRequest: - old := mockSwitch.numStatusResponse - msg, err := bc.EncodeMsg(&ev) - assert.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) - assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcproto.BlockRequest: - if ev.Height > params.startHeight { - old := mockSwitch.numNoBlockResponse - msg, err := bc.EncodeMsg(&ev) - assert.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) - assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) - } else { - old := mockSwitch.numBlockResponse - msg, err := bc.EncodeMsg(&ev) - assert.NoError(t, err) - assert.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg) - assert.Equal(t, old+1, mockSwitch.numBlockResponse) - } - } - } - err = reactor.Stop() - require.NoError(t, err) - }) - } -} - -func TestReactorSetSwitchNil(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) - - reactor := newTestReactor(testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - }) - reactor.SetSwitch(nil) - - assert.Nil(t, reactor.Switch) - assert.Nil(t, reactor.io) -} - -//---------------------------------------------- -// utility funcs - -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < 10; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), nil, - nil, types.Messages{}, lastCommit, state.Validators.GetProposer().Address) - return block -} - -type testApp struct { - abci.BaseApplication -} - -func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) ( - *types.GenesisDoc, []types.PrivValidator) { - validators := make([]types.GenesisValidator, numValidators) - privValidators := make([]types.PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privVal := types.RandValidator(randPower, minPower) - validators[i] = types.GenesisValidator{ - PubKey: val.PubKey, - Power: val.VotingPower, - } - privValidators[i] = privVal - } - sort.Sort(types.PrivValidatorsByAddress(privValidators)) - - return &types.GenesisDoc{ - GenesisTime: tmtime.Now(), - ChainID: chainID, - Validators: validators, - }, privValidators -} - -// Why are we importing the entire blockExecutor dependency graph here -// when we have the facilities to -func newReactorStore( - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, - maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) { - if len(privVals) != 1 { - panic("only support one validator") - } - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - - stateDB := memdb.NewDB() - blockStore := store.NewBlockStore(memdb.NewDB()) - stateStore := sm.NewStore(stateDB) - state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc) - if err != nil { - panic(fmt.Errorf("error constructing state from genesis file: %w", err)) - } - - db := memdb.NewDB() - stateStore = sm.NewStore(db) - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.EmptyEvidencePool{}) - if err = stateStore.Save(state); err != nil { - panic(err) - } - - var hash = make([]byte, 32) - hh := sha256.Sum256([]byte("Headerhash")) - copy(hash, hh[:]) - - // add blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote, err := types.MakeVote( - lastBlock.Header.Height, - lastBlockMeta.BlockID, - state.Validators, - privVals[0], - lastBlock.Header.ChainID, - time.Now(), - ) - if err != nil { - panic(err) - } - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) - } - - thisBlock := makeBlock(blockHeight, state, lastCommit) - - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) - if err != nil { - panic(fmt.Errorf("error apply block: %w", err)) - } - - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) - } - return blockStore, state, blockExec -} diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go deleted file mode 100644 index b783ff6718..0000000000 --- a/blockchain/v2/routine.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "fmt" - "strings" - "sync/atomic" - - "github.com/Workiva/go-datastructures/queue" - - "github.com/lazyledger/lazyledger-core/libs/log" -) - -type handleFunc = func(event Event) (Event, error) - -const historySize = 25 - -// Routine is a structure that models a finite state machine as serialized -// stream of events processed by a handle function. This Routine structure -// handles the concurrency and messaging guarantees. Events are sent via -// `send` are handled by the `handle` function to produce an iterator -// `next()`. Calling `stop()` on a routine will conclude processing of all -// sent events and produce `final()` event representing the terminal state. -type Routine struct { - name string - handle handleFunc - queue *queue.PriorityQueue - history []Event - out chan Event - fin chan error - rdy chan struct{} - running *uint32 - logger log.Logger - metrics *Metrics -} - -func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { - return &Routine{ - name: name, - handle: handleFunc, - queue: queue.NewPriorityQueue(bufferSize, true), - history: make([]Event, 0, historySize), - out: make(chan Event, bufferSize), - rdy: make(chan struct{}, 1), - fin: make(chan error, 1), - running: new(uint32), - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } -} - -func (rt *Routine) setLogger(logger log.Logger) { - rt.logger = logger -} - -// nolint:unused -func (rt *Routine) setMetrics(metrics *Metrics) { - rt.metrics = metrics -} - -func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) - running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) - if !running { - panic(fmt.Sprintf("%s is already running", rt.name)) - } - close(rt.rdy) - defer func() { - if r := recover(); r != nil { - var ( - b strings.Builder - j int - ) - for i := len(rt.history) - 1; i >= 0; i-- { - fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) - j++ - } - panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) - } - stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) - if !stopped { - panic(fmt.Sprintf("%s is failed to stop", rt.name)) - } - }() - - for { - events, err := rt.queue.Get(1) - if err == queue.ErrDisposed { - rt.terminate(nil) - return - } else if err != nil { - rt.terminate(err) - return - } - oEvent, err := rt.handle(events[0].(Event)) - rt.metrics.EventsHandled.With("routine", rt.name).Add(1) - if err != nil { - rt.terminate(err) - return - } - rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) - - // Skip rTrySchedule and rProcessBlock events as they clutter the history - // due to their frequency. - switch events[0].(type) { - case rTrySchedule: - case rProcessBlock: - default: - rt.history = append(rt.history, events[0].(Event)) - if len(rt.history) > historySize { - rt.history = rt.history[1:] - } - } - - rt.out <- oEvent - } -} - -// XXX: look into returning OpError in the net package -func (rt *Routine) send(event Event) bool { - rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) - if !rt.isRunning() { - return false - } - err := rt.queue.Put(event) - if err != nil { - rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) - return false - } - - rt.metrics.EventsSent.With("routine", rt.name).Add(1) - return true -} - -func (rt *Routine) isRunning() bool { - return atomic.LoadUint32(rt.running) == 1 -} - -func (rt *Routine) next() chan Event { - return rt.out -} - -func (rt *Routine) ready() chan struct{} { - return rt.rdy -} - -func (rt *Routine) stop() { - if !rt.isRunning() { // XXX: this should check rt.queue.Disposed() - return - } - - rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) - rt.queue.Dispose() // this should block until all queue items are free? -} - -func (rt *Routine) final() chan error { - return rt.fin -} - -// XXX: Maybe get rid of this -func (rt *Routine) terminate(reason error) { - // We don't close the rt.out channel here, to avoid spinning on the closed channel - // in the event loop. - rt.fin <- reason -} diff --git a/blockchain/v2/routine_test.go b/blockchain/v2/routine_test.go deleted file mode 100644 index 8f92bee3ee..0000000000 --- a/blockchain/v2/routine_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package v2 - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type eventA struct { - priorityNormal -} - -var errDone = fmt.Errorf("done") - -func simpleHandler(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - return noOp, errDone - } - return noOp, nil -} - -func TestRoutineFinal(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.isRunning(), - "expected an initialized routine to not be running") - go routine.start() - <-routine.ready() - assert.True(t, routine.isRunning(), - "expected an started routine") - - assert.True(t, routine.send(eventA{}), - "expected sending to a ready routine to succeed") - - assert.Equal(t, errDone, <-routine.final(), - "expected the final event to be done") - - assert.False(t, routine.isRunning(), - "expected an completed routine to no longer be running") -} - -func TestRoutineStop(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.send(eventA{}), - "expected sending to an unstarted routine to fail") - - go routine.start() - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a running routine to succeed") - - routine.stop() - - assert.False(t, routine.send(eventA{}), - "expected sending to a stopped routine to fail") -} - -type finalCount struct { - count int -} - -func (f finalCount) Error() string { - return "end" -} - -func genStatefulHandler(maxCount int) handleFunc { - counter := 0 - return func(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - counter++ - if counter >= maxCount { - return noOp, finalCount{counter} - } - - return eventA{}, nil - } - return noOp, nil - } -} - -func feedback(r *Routine) { - for event := range r.next() { - r.send(event) - } -} - -func TestStatefulRoutine(t *testing.T) { - var ( - count = 10 - handler = genStatefulHandler(count) - bufferSize = 20 - routine = newRoutine("statefulRoutine", handler, bufferSize) - ) - - go routine.start() - go feedback(routine) - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a started routine to succeed") - - final := <-routine.final() - if fnl, ok := final.(finalCount); ok { - assert.Equal(t, count, fnl.count, - "expected the routine to count to 10") - } else { - t.Fail() - } -} - -type lowPriorityEvent struct { - priorityLow -} - -type highPriorityEvent struct { - priorityHigh -} - -func handleWithPriority(event Event) (Event, error) { - switch event.(type) { - case lowPriorityEvent: - return noOp, nil - case highPriorityEvent: - return noOp, errDone - } - return noOp, nil -} - -func TestPriority(t *testing.T) { - var ( - bufferSize = 20 - routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize) - ) - - go routine.start() - <-routine.ready() - go func() { - for { - routine.send(lowPriorityEvent{}) - time.Sleep(1 * time.Millisecond) - } - }() - time.Sleep(10 * time.Millisecond) - - assert.True(t, routine.isRunning(), - "expected an started routine") - assert.True(t, routine.send(highPriorityEvent{}), - "expected send to succeed even when saturated") - - assert.Equal(t, errDone, <-routine.final()) - assert.False(t, routine.isRunning(), - "expected an started routine") -} diff --git a/blockchain/v2/scheduler.go b/blockchain/v2/scheduler.go deleted file mode 100644 index 5986004fb5..0000000000 --- a/blockchain/v2/scheduler.go +++ /dev/null @@ -1,712 +0,0 @@ -package v2 - -import ( - "bytes" - "errors" - "fmt" - "math" - "sort" - "time" - - "github.com/lazyledger/lazyledger-core/p2p" - "github.com/lazyledger/lazyledger-core/types" -) - -// Events generated by the scheduler: -// all blocks have been processed -type scFinishedEv struct { - priorityNormal - reason string -} - -func (e scFinishedEv) String() string { - return fmt.Sprintf("scFinishedEv{%v}", e.reason) -} - -// send a blockRequest message -type scBlockRequest struct { - priorityNormal - peerID p2p.ID - height int64 -} - -func (e scBlockRequest) String() string { - return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) -} - -// a block has been received and validated by the scheduler -type scBlockReceived struct { - priorityNormal - peerID p2p.ID - block *types.Block -} - -func (e scBlockReceived) String() string { - return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) -} - -// scheduler detected a peer error -type scPeerError struct { - priorityHigh - peerID p2p.ID - reason error -} - -func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) -} - -// scheduler removed a set of peers (timed out or slow peer) -type scPeersPruned struct { - priorityHigh - peers []p2p.ID -} - -func (e scPeersPruned) String() string { - return fmt.Sprintf("scPeersPruned{%v}", e.peers) -} - -// XXX: make this fatal? -// scheduler encountered a fatal error -type scSchedulerFail struct { - priorityHigh - reason error -} - -func (e scSchedulerFail) String() string { - return fmt.Sprintf("scSchedulerFail{%v}", e.reason) -} - -type blockState int - -const ( - blockStateUnknown blockState = iota + 1 // no known peer has this block - blockStateNew // indicates that a peer has reported having this block - blockStatePending // indicates that this block has been requested from a peer - blockStateReceived // indicates that this block has been received by a peer - blockStateProcessed // indicates that this block has been applied -) - -func (e blockState) String() string { - switch e { - case blockStateUnknown: - return "Unknown" - case blockStateNew: - return "New" - case blockStatePending: - return "Pending" - case blockStateReceived: - return "Received" - case blockStateProcessed: - return "Processed" - default: - return fmt.Sprintf("invalid blockState: %d", e) - } -} - -type peerState int - -const ( - peerStateNew = iota + 1 - peerStateReady - peerStateRemoved -) - -func (e peerState) String() string { - switch e { - case peerStateNew: - return "New" - case peerStateReady: - return "Ready" - case peerStateRemoved: - return "Removed" - default: - panic(fmt.Sprintf("unknown peerState: %d", e)) - } -} - -type scPeer struct { - peerID p2p.ID - - // initialized as New when peer is added, updated to Ready when statusUpdate is received, - // updated to Removed when peer is removed - state peerState - - base int64 // updated when statusResponse is received - height int64 // updated when statusResponse is received - lastTouched time.Time - lastRate int64 // last receive rate in bytes -} - -func (p scPeer) String() string { - return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) -} - -func newScPeer(peerID p2p.ID) *scPeer { - return &scPeer{ - peerID: peerID, - state: peerStateNew, - base: -1, - height: -1, - lastTouched: time.Time{}, - } -} - -// The scheduler keep track of the state of each block and each peer. The -// scheduler will attempt to schedule new block requests with `trySchedule` -// events and remove slow peers with `tryPrune` events. -type scheduler struct { - initHeight int64 - - // next block that needs to be processed. All blocks with smaller height are - // in Processed state. - height int64 - - // lastAdvance tracks the last time a block execution happened. - // syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing. - // This covers the cases where there are no peers or all peers have a lower height. - lastAdvance time.Time - syncTimeout time.Duration - - // a map of peerID to scheduler specific peer struct `scPeer` used to keep - // track of peer specific state - peers map[p2p.ID]*scPeer - peerTimeout time.Duration // maximum response time from a peer otherwise prune - minRecvRate int64 // minimum receive rate from peer otherwise prune - - // the maximum number of blocks that should be New, Received or Pending at any point - // in time. This is used to enforce a limit on the blockStates map. - targetPending int - // a list of blocks to be scheduled (New), Pending or Received. Its length should be - // smaller than targetPending. - blockStates map[int64]blockState - - // a map of heights to the peer we are waiting a response from - pendingBlocks map[int64]p2p.ID - - // the time at which a block was put in blockStatePending - pendingTime map[int64]time.Time - - // a map of heights to the peers that put the block in blockStateReceived - receivedBlocks map[int64]p2p.ID -} - -func (sc scheduler) String() string { - return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v", - sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks) -} - -func newScheduler(initHeight int64, startTime time.Time) *scheduler { - sc := scheduler{ - initHeight: initHeight, - lastAdvance: startTime, - syncTimeout: 60 * time.Second, - height: initHeight, - blockStates: make(map[int64]blockState), - peers: make(map[p2p.ID]*scPeer), - pendingBlocks: make(map[int64]p2p.ID), - pendingTime: make(map[int64]time.Time), - receivedBlocks: make(map[int64]p2p.ID), - targetPending: 10, // TODO - pass as param - peerTimeout: 15 * time.Second, // TODO - pass as param - minRecvRate: 0, // int64(7680), TODO - pass as param - } - - return &sc -} - -func (sc *scheduler) ensurePeer(peerID p2p.ID) *scPeer { - if _, ok := sc.peers[peerID]; !ok { - sc.peers[peerID] = newScPeer(peerID) - } - return sc.peers[peerID] -} - -func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state) - } - - peer.lastTouched = time - - return nil -} - -func (sc *scheduler) removePeer(peerID p2p.ID) { - peer, ok := sc.peers[peerID] - if !ok { - return - } - if peer.state == peerStateRemoved { - return - } - - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.pendingTime, height) - delete(sc.pendingBlocks, height) - } - } - - for height, rcvPeerID := range sc.receivedBlocks { - if rcvPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.receivedBlocks, height) - } - } - - // remove the blocks from blockStates if the peer removal causes the max peer height to be lower. - peer.state = peerStateRemoved - maxPeerHeight := int64(0) - for _, otherPeer := range sc.peers { - if otherPeer.state != peerStateReady { - continue - } - if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight { - maxPeerHeight = otherPeer.height - } - } - for h := range sc.blockStates { - if h > maxPeerHeight { - delete(sc.blockStates, h) - } - } -} - -// check if the blockPool is running low and add new blocks in New state to be requested. -// This function is called when there is an increase in the maximum peer height or when -// blocks are processed. -func (sc *scheduler) addNewBlocks() { - if len(sc.blockStates) >= sc.targetPending { - return - } - - for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ { - if i > sc.maxHeight() { - break - } - if sc.getStateAtHeight(i) == blockStateUnknown { - sc.setStateAtHeight(i, blockStateNew) - } - } -} - -func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error { - peer := sc.ensurePeer(peerID) - - if peer.state == peerStateRemoved { - return nil // noop - } - - if height < peer.height { - sc.removePeer(peerID) - return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) - } - - if base > height { - sc.removePeer(peerID) - return fmt.Errorf("cannot set peer base higher than its height") - } - - peer.base = base - peer.height = height - peer.state = peerStateReady - - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) getStateAtHeight(height int64) blockState { - if height < sc.height { - return blockStateProcessed - } else if state, ok := sc.blockStates[height]; ok { - return state - } else { - return blockStateUnknown - } -} - -func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID { - peers := make([]p2p.ID, 0) - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if peer.base <= height && peer.height >= height { - peers = append(peers, peer.peerID) - } - } - return peers -} - -func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID { - prunable := make([]p2p.ID, 0) - for peerID, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { - prunable = append(prunable, peerID) - } - } - // Tests for handleTryPrunePeer() may fail without sort due to range non-determinism - sort.Sort(PeerByID(prunable)) - return prunable -} - -func (sc *scheduler) setStateAtHeight(height int64, state blockState) { - sc.blockStates[height] = state -} - -// CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error { - peer := sc.peers[peerID] - - if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { - return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) - } - - pendingTime, ok := sc.pendingTime[height] - if !ok || now.Sub(pendingTime) <= 0 { - return fmt.Errorf("clock error: block %d received at %s but requested at %s", - height, pendingTime, now) - } - - peer.lastRate = size / now.Sub(pendingTime).Nanoseconds() - - sc.setStateAtHeight(height, blockStateReceived) - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - - sc.receivedBlocks[height] = peerID - - return nil -} - -func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) error { - state := sc.getStateAtHeight(height) - if state != blockStateNew { - return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state) - } - - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("cannot find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state) - } - - if height > peer.height { - return fmt.Errorf("cannot request height %d from peer %s that is at height %d", - height, peerID, peer.height) - } - - if height < peer.base { - return fmt.Errorf("cannot request height %d for peer %s with base %d", - height, peerID, peer.base) - } - - sc.setStateAtHeight(height, blockStatePending) - sc.pendingBlocks[height] = peerID - sc.pendingTime[height] = time - - return nil -} - -func (sc *scheduler) markProcessed(height int64) error { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, so - // when pcBlockProcessed event is received, the block had been requested - // again => don't check the block state. - sc.lastAdvance = time.Now() - sc.height = height + 1 - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - delete(sc.receivedBlocks, height) - delete(sc.blockStates, height) - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) allBlocksProcessed() bool { - if len(sc.peers) == 0 { - return false - } - return sc.height >= sc.maxHeight() -} - -// returns max peer height or the last processed block, i.e. sc.height -func (sc *scheduler) maxHeight() int64 { - max := sc.height - 1 - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if max < peer.height { - max = peer.height - } - } - return max -} - -// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks -func (sc *scheduler) nextHeightToSchedule() int64 { - var min int64 = math.MaxInt64 - for height, state := range sc.blockStates { - if state == blockStateNew && height < min { - min = height - } - } - if min == math.MaxInt64 { - min = -1 - } - return min -} - -func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 { - var heights []int64 - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - heights = append(heights, height) - } - } - return heights -} - -func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) { - peers := sc.getPeersWithHeight(height) - if len(peers) == 0 { - return "", fmt.Errorf("cannot find peer for height %d", height) - } - - // create a map from number of pending requests to a list - // of peers having that number of pending requests. - pendingFrom := make(map[int][]p2p.ID) - for _, peerID := range peers { - numPending := len(sc.pendingFrom(peerID)) - pendingFrom[numPending] = append(pendingFrom[numPending], peerID) - } - - // find the set of peers with minimum number of pending requests. - var minPending int64 = math.MaxInt64 - for mp := range pendingFrom { - if int64(mp) < minPending { - minPending = int64(mp) - } - } - - sort.Sort(PeerByID(pendingFrom[int(minPending)])) - return pendingFrom[int(minPending)][0], nil -} - -// PeerByID is a list of peers sorted by peerID. -type PeerByID []p2p.ID - -func (peers PeerByID) Len() int { - return len(peers) -} -func (peers PeerByID) Less(i, j int) bool { - return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1 -} - -func (peers PeerByID) Swap(i, j int) { - peers[i], peers[j] = peers[j], peers[i] -} - -// Handlers - -// This handler gets the block, performs some validation and then passes it on to the processor. -func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { - err := sc.touchPeer(event.peerID, event.time) - if err != nil { - // peer does not exist OR not ready - return noOp, nil - } - - err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) - if err != nil { - sc.removePeer(event.peerID) - return scPeerError{peerID: event.peerID, reason: err}, nil - } - - return scBlockReceived{peerID: event.peerID, block: event.block}, nil -} - -func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - // No such peer or peer was removed. - peer, ok := sc.peers[event.peerID] - if !ok || peer.state == peerStateRemoved { - return noOp, nil - } - - // The peer may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.peerID) - - return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", - event.peerID, peer.base, peer.height, event.height)}, nil -} - -func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { - if event.height != sc.height { - panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) - } - - err := sc.markProcessed(event.height) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "processed all blocks"}, nil - } - - return noOp, nil -} - -// Handles an error from the processor. The processor had already cleaned the blocks from -// the peers included in this event. Just attempt to remove the peers. -func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - // The peers may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.firstPeerID) - if event.firstPeerID != event.secondPeerID { - sc.removePeer(event.secondPeerID) - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "error on last block"}, nil - } - - return noOp, nil -} - -func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { - sc.ensurePeer(event.peerID) - return noOp, nil -} - -func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - sc.removePeer(event.peerID) - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "removed peer"}, nil - } - - // Return scPeerError so the peer (and all associated blocks) is removed from - // the processor. - return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil -} - -func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. - timeHeightAsked, ok := sc.pendingTime[sc.height] - if ok && time.Since(timeHeightAsked) > sc.peerTimeout { - // A request was sent to a peer for block at sc.height but a response was not received - // from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer - // will be timed out even if it sends blocks at higher heights but prevents progress by - // not sending the block at current height. - sc.removePeer(sc.pendingBlocks[sc.height]) - } - - prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time) - if len(prunablePeers) == 0 { - return noOp, nil - } - for _, peerID := range prunablePeers { - sc.removePeer(peerID) - } - - // If all blocks are processed we should finish. - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "after try prune"}, nil - } - - return scPeersPruned{peers: prunablePeers}, nil -} - -func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { - initHeight := event.state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = event.state.InitialHeight - } - sc.initHeight = initHeight - sc.height = initHeight - sc.lastAdvance = time.Now() - sc.addNewBlocks() - return noOp, nil -} - -func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { - if time.Since(sc.lastAdvance) > sc.syncTimeout { - return scFinishedEv{reason: "timeout, no advance"}, nil - } - - nextHeight := sc.nextHeightToSchedule() - if nextHeight == -1 { - return noOp, nil - } - - bestPeerID, err := sc.selectPeer(nextHeight) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil { - return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate - } - return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil - -} - -func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerRange(event.peerID, event.base, event.height) - if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil - } - return noOp, nil -} - -func (sc *scheduler) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - nextEvent, err := sc.handleResetState(event) - return nextEvent, err - case bcStatusResponse: - nextEvent, err := sc.handleStatusResponse(event) - return nextEvent, err - case bcBlockResponse: - nextEvent, err := sc.handleBlockResponse(event) - return nextEvent, err - case bcNoBlockResponse: - nextEvent, err := sc.handleNoBlockResponse(event) - return nextEvent, err - case rTrySchedule: - nextEvent, err := sc.handleTrySchedule(event) - return nextEvent, err - case bcAddNewPeer: - nextEvent, err := sc.handleAddNewPeer(event) - return nextEvent, err - case bcRemovePeer: - nextEvent, err := sc.handleRemovePeer(event) - return nextEvent, err - case rTryPrunePeer: - nextEvent, err := sc.handleTryPrunePeer(event) - return nextEvent, err - case pcBlockProcessed: - nextEvent, err := sc.handleBlockProcessed(event) - return nextEvent, err - case pcBlockVerificationFailure: - nextEvent, err := sc.handleBlockProcessError(event) - return nextEvent, err - default: - return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil - } -} diff --git a/blockchain/v2/scheduler_test.go b/blockchain/v2/scheduler_test.go deleted file mode 100644 index bf4b320f56..0000000000 --- a/blockchain/v2/scheduler_test.go +++ /dev/null @@ -1,2254 +0,0 @@ -package v2 - -import ( - "fmt" - "math" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/lazyledger/lazyledger-core/p2p" - "github.com/lazyledger/lazyledger-core/state" - "github.com/lazyledger/lazyledger-core/types" -) - -type scTestParams struct { - peers map[string]*scPeer - initHeight int64 - height int64 - allB []int64 - pending map[int64]p2p.ID - pendingTime map[int64]time.Time - received map[int64]p2p.ID - peerTimeout time.Duration - minRecvRate int64 - targetPending int - startTime time.Time - syncTimeout time.Duration -} - -func verifyScheduler(sc *scheduler) { - missing := 0 - if sc.maxHeight() >= sc.height { - missing = int(math.Min(float64(sc.targetPending), float64(sc.maxHeight()-sc.height+1))) - } - if len(sc.blockStates) != missing { - panic(fmt.Sprintf("scheduler block length %d different than target %d", len(sc.blockStates), missing)) - } -} - -func newTestScheduler(params scTestParams) *scheduler { - peers := make(map[p2p.ID]*scPeer) - var maxHeight int64 - - initHeight := params.initHeight - if initHeight == 0 { - initHeight = 1 - } - sc := newScheduler(initHeight, params.startTime) - if params.height != 0 { - sc.height = params.height - } - - for id, peer := range params.peers { - peer.peerID = p2p.ID(id) - peers[p2p.ID(id)] = peer - if maxHeight < peer.height { - maxHeight = peer.height - } - } - for _, h := range params.allB { - sc.blockStates[h] = blockStateNew - } - for h, pid := range params.pending { - sc.blockStates[h] = blockStatePending - sc.pendingBlocks[h] = pid - } - for h, tm := range params.pendingTime { - sc.pendingTime[h] = tm - } - for h, pid := range params.received { - sc.blockStates[h] = blockStateReceived - sc.receivedBlocks[h] = pid - } - - sc.peers = peers - sc.peerTimeout = params.peerTimeout - if params.syncTimeout == 0 { - sc.syncTimeout = 10 * time.Second - } else { - sc.syncTimeout = params.syncTimeout - } - - if params.targetPending == 0 { - sc.targetPending = 10 - } else { - sc.targetPending = params.targetPending - } - - sc.minRecvRate = params.minRecvRate - - verifyScheduler(sc) - - return sc -} - -func TestScInit(t *testing.T) { - var ( - initHeight int64 = 5 - sc = newScheduler(initHeight, time.Now()) - ) - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) -} - -func TestScMaxHeights(t *testing.T) { - - tests := []struct { - name string - sc scheduler - wantMax int64 - }{ - { - name: "no peers", - sc: scheduler{height: 11}, - wantMax: 10, - }, - { - name: "one ready peer", - sc: scheduler{ - height: 3, - peers: map[p2p.ID]*scPeer{"P1": {height: 6, state: peerStateReady}}, - }, - wantMax: 6, - }, - { - name: "ready and removed peers", - sc: scheduler{ - height: 1, - peers: map[p2p.ID]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 4, - }, - { - name: "removed peers", - sc: scheduler{ - height: 1, - peers: map[p2p.ID]*scPeer{ - "P1": {height: 4, state: peerStateRemoved}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 0, - }, - { - name: "new peers", - sc: scheduler{ - height: 1, - peers: map[p2p.ID]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}, - }, - wantMax: 0, - }, - { - name: "mixed peers", - sc: scheduler{ - height: 1, - peers: map[p2p.ID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 20, state: peerStateRemoved}, - "P4": {height: 22, state: peerStateReady}, - }, - }, - wantMax: 22, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // maxHeight() should not mutate the scheduler - wantSc := tt.sc - - resMax := tt.sc.maxHeight() - assert.Equal(t, tt.wantMax, resMax) - assert.Equal(t, wantSc, tt.sc) - }) - } -} - -func TestScEnsurePeer(t *testing.T) { - - type args struct { - peerID p2p.ID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - }{ - { - name: "add first peer", - fields: scTestParams{}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add duplicate peer is fine", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "add duplicate peer with existing peer in Ready state is noop", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.ensurePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScTouchPeer(t *testing.T) { - now := time.Now() - - type args struct { - peerID p2p.ID - time time.Time - } - - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt to touch non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - wantErr: true, - }, - { - name: "attempt to touch peer in state New", - fields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - wantErr: true, - }, - { - name: "attempt to touch peer in state Removed", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - wantErr: true, - }, - { - name: "touch peer in state Ready", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, lastTouched: now}}}, - args: args{peerID: "P1", time: now.Add(3 * time.Second)}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {state: peerStateReady, lastTouched: now.Add(3 * time.Second)}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.touchPeer(tt.args.peerID, tt.args.time); (err != nil) != tt.wantErr { - t.Errorf("touchPeer() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScPrunablePeers(t *testing.T) { - now := time.Now() - - type args struct { - threshold time.Duration - time time.Time - minSpeed int64 - } - - tests := []struct { - name string - fields scTestParams - args args - wantResult []p2p.ID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []p2p.ID{}, - }, - { - name: "mixed peers", - fields: scTestParams{peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90}, - }}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []p2p.ID{"P4", "P5", "P6"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // peersSlowerThan should not mutate the scheduler - wantSc := sc - res := sc.prunablePeers(tt.args.threshold, tt.args.minSpeed, tt.args.time) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScRemovePeer(t *testing.T) { - - type args struct { - peerID p2p.ID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "remove non existing peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "remove single New peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}}}, - }, - { - name: "remove one of two New peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}, "P2": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}, "P2": {height: -1}}}, - }, - { - name: "remove one Ready peer, all peers removed", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateRemoved}}, - }, - }, - { - name: "attempt to remove already removed peer", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}}, - }, - { - name: "remove Ready peer with blocks requested", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]p2p.ID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]p2p.ID{}, - }, - }, - { - name: "remove Ready peer with blocks received", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - received: map[int64]p2p.ID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - received: map[int64]p2p.ID{}, - }, - }, - { - name: "remove Ready peer with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{1: "P1", 3: "P1"}, - received: map[int64]p2p.ID{2: "P1", 4: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]p2p.ID{}, - received: map[int64]p2p.ID{}, - }, - }, - { - name: "remove Ready peer from multiple peers set, with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateReady}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]p2p.ID{1: "P1", 3: "P2", 6: "P1"}, - received: map[int64]p2p.ID{2: "P1", 4: "P2", 5: "P2"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateRemoved}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]p2p.ID{3: "P2"}, - received: map[int64]p2p.ID{4: "P2", 5: "P2"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.removePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScSetPeerRange(t *testing.T) { - - type args struct { - peerID p2p.ID - base int64 - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 2, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "increase height of removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - }, - { - name: "decrease height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 2}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}}, - wantErr: true, - }, - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with huge height 10**10 ", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: -1, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P2", height: 10000000000}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with base > height should error", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", base: 6, height: 5}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "add peer with base == height is fine", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P1", base: 6, height: 6}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) - if (err != nil) != tt.wantErr { - t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScGetPeersWithHeight(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult []p2p.ID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: []p2p.ID{}, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: []p2p.ID{}, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: []p2p.ID{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: []p2p.ID{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []p2p.ID{"P1"}, - }, - { - name: "one Ready higher peer", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []p2p.ID{"P1"}, - }, - { - name: "one Ready higher peer at base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []p2p.ID{"P1"}, - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []p2p.ID{}, - }, - { - name: "multiple mixed peers", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 5, state: peerStateReady}, - "P4": {height: 20, state: peerStateRemoved}, - "P5": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{height: 8}, - wantResult: []p2p.ID{"P2", "P5"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // getPeersWithHeight should not mutate the scheduler - wantSc := sc - res := sc.getPeersWithHeight(tt.args.height) - sort.Sort(PeerByID(res)) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkPending(t *testing.T) { - now := time.Now() - - type args struct { - peerID p2p.ID - height int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt mark pending an unknown block above height", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "attempt mark pending an unknown block below base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - wantErr: true, - }, - { - name: "attempt mark pending from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "mark pending from Removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "mark pending from New peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 2, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending from short peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - }, - args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markPending(tt.args.peerID, tt.args.height, tt.args.tm); (err != nil) != tt.wantErr { - t.Errorf("markPending() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkReceived(t *testing.T) { - now := time.Now() - - type args struct { - peerID p2p.ID - height int64 - size int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "received from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "received from removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "received from unsolicited peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - wantErr: true, - }, - { - name: "received but blockRequest not sent", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{}, - }, - wantErr: true, - }, - { - name: "received with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - wantErr: true, - }, - { - name: "received all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - received: map[int64]p2p.ID{2: "P1"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markReceived( - tt.args.peerID, - tt.args.height, - tt.args.size, - now.Add(time.Second)); (err != nil) != tt.wantErr { - t.Errorf("markReceived() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkProcessed(t *testing.T) { - now := time.Now() - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "processed an unreceived block", - fields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - targetPending: 1, - }, - args: args{height: 2}, - wantFields: scTestParams{ - height: 3, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{3}, - targetPending: 1, - }, - }, - { - name: "mark processed success", - fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]p2p.ID{1: "P1"}}, - args: args{height: 1}, - wantFields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - oldBlockState := sc.getStateAtHeight(tt.args.height) - if err := sc.markProcessed(tt.args.height); (err != nil) != tt.wantErr { - t.Errorf("markProcessed() wantErr %v, error = %v", tt.wantErr, err) - } - if tt.wantErr { - assert.Equal(t, oldBlockState, sc.getStateAtHeight(tt.args.height)) - } else { - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(tt.args.height)) - } - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScResetState(t *testing.T) { - tests := []struct { - name string - fields scTestParams - state state.State - wantFields scTestParams - }{ - { - name: "updates height and initHeight", - fields: scTestParams{ - height: 0, - initHeight: 0, - }, - state: state.State{LastBlockHeight: 7}, - wantFields: scTestParams{ - height: 8, - initHeight: 8, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - e, err := sc.handleResetState(bcResetState{state: tt.state}) - require.NoError(t, err) - assert.Equal(t, e, noOp) - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScAllBlocksProcessed(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantResult bool - }{ - { - name: "no blocks, no peers", - fields: scTestParams{}, - wantResult: false, - }, - { - name: "only New blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantResult: false, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantResult: false, - }, - { - name: "only Received blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantResult: false, - }, - { - name: "only Processed blocks plus highest is received", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}}, - allB: []int64{4}, - received: map[int64]p2p.ID{4: "P1"}, - }, - wantResult: true, - }, - { - name: "mixed block states", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{2: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{2: now, 4: now}, - }, - wantResult: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // allBlocksProcessed() should not mutate the scheduler - wantSc := sc - res := sc.allBlocksProcessed() - assert.Equal(t, tt.wantResult, res) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScNextHeightToSchedule(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantHeight int64 - }{ - { - name: "no blocks", - fields: scTestParams{initHeight: 11, height: 11}, - wantHeight: -1, - }, - { - name: "only New blocks", - fields: scTestParams{ - initHeight: 3, - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{3, 4, 5, 6}, - }, - wantHeight: 3, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantHeight: -1, - }, - { - name: "only Received blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantHeight: -1, - }, - { - name: "only Processed blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantHeight: 1, - }, - { - name: "mixed block states", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]p2p.ID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - }, - wantHeight: 1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // nextHeightToSchedule() should not mutate the scheduler - wantSc := sc - - resMin := sc.nextHeightToSchedule() - assert.Equal(t, tt.wantHeight, resMin) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScSelectPeer(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult p2p.ID - wantError bool - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready equal peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 3}, - wantResult: "", - wantError: true, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 9, state: peerStateReady}}, - allB: []int64{4, 5, 6, 7, 8, 9}, - pending: map[int64]p2p.ID{ - 4: "P1", 6: "P1", - 5: "P2", - }, - }, - args: args{height: 4}, - wantResult: "P2", - }, - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P2": {height: 20, state: peerStateReady}, - "P1": {height: 15, state: peerStateReady}, - "P3": {height: 15, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - pending: map[int64]p2p.ID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{height: 7}, - wantResult: "P1", - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // selectPeer should not mutate the scheduler - wantSc := sc - res, err := sc.selectPeer(tt.args.height) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, tt.wantError, err != nil) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -// makeScBlock makes an empty block. -func makeScBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// used in place of assert.Equal(t, want, actual) to avoid failures due to -// scheduler.lastAdvanced timestamp inequalities. -func checkSameScheduler(t *testing.T, want *scheduler, actual *scheduler) { - assert.Equal(t, want.initHeight, actual.initHeight) - assert.Equal(t, want.height, actual.height) - assert.Equal(t, want.peers, actual.peers) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.pendingBlocks, actual.pendingBlocks) - assert.Equal(t, want.pendingTime, actual.pendingTime) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.receivedBlocks, actual.receivedBlocks) - assert.Equal(t, want.blockStates, actual.blockStates) -} - -// checkScResults checks scheduler handler test results -func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, event Event) { - if (err != nil) != wantErr { - t.Errorf("error = %v, wantErr %v", err, wantErr) - return - } - if !assert.IsType(t, wantEvent, event) { - t.Log(fmt.Sprintf("Wrong type received, got: %v", event)) - } - switch wantEvent := wantEvent.(type) { - case scPeerError: - assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID) - assert.Equal(t, wantEvent.reason != nil, event.(scPeerError).reason != nil) - case scBlockReceived: - assert.Equal(t, wantEvent.peerID, event.(scBlockReceived).peerID) - assert.Equal(t, wantEvent.block, event.(scBlockReceived).block) - case scSchedulerFail: - assert.Equal(t, wantEvent.reason != nil, event.(scSchedulerFail).reason != nil) - } -} - -func TestScHandleBlockResponse(t *testing.T) { - now := time.Now() - block6FromP1 := bcBlockResponse{ - time: now.Add(time.Millisecond), - peerID: p2p.ID("P1"), - size: 100, - block: makeScBlock(6), - } - - type args struct { - event bcBlockResponse - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "block from wrong peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now.Add(time.Second)}, - }, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "good block, accept", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleNoBlockResponse(t *testing.T) { - now := time.Now() - noBlock6FromP1 := bcNoBlockResponse{ - time: now.Add(time.Millisecond), - peerID: p2p.ID("P1"), - height: 6, - } - - tests := []struct { - name string - fields scTestParams - wantEvent Event - wantFields scTestParams - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{}, - }, - { - name: "noBlock from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "for block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "noBlock from peer we don't have", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: noOpEvent{}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - }, - { - name: "noBlock from existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleNoBlockResponse(noBlock6FromP1) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScHandleBlockProcessed(t *testing.T) { - now := time.Now() - processed6FromP1 := pcBlockProcessed{ - peerID: p2p.ID("P1"), - height: 6, - } - - type args struct { - event pcBlockProcessed - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{height: 6}, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block we don't have", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]p2p.ID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block ok, we processed all blocks", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]p2p.ID{6: "P1", 7: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: scFinishedEv{}, - }, - { - name: "processed block ok, we still have blocks to process", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]p2p.ID{7: "P1", 8: "P1"}, - received: map[int64]p2p.ID{6: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessed(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleBlockVerificationFailure(t *testing.T) { - now := time.Now() - - type args struct { - event pcBlockVerificationFailure - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block we don't have, single peer is still removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]p2p.ID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block we don't have, one of two peers are removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]p2p.ID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, all blocks are processed after removal", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]p2p.ID{6: "P1", 7: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]p2p.ID{7: "P1", 8: "P1"}, - received: map[int64]p2p.ID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, H+1 and H+2 delivered by different peers, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}, - }, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]p2p.ID{7: "P1", 8: "P1"}, - received: map[int64]p2p.ID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessError(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleAddNewPeer(t *testing.T) { - addP1 := bcAddNewPeer{ - peerID: p2p.ID("P1"), - } - type args struct { - event bcAddNewPeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "add P1 to empty scheduler", - fields: scTestParams{}, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add duplicate peer", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add P1 to non empty scheduler", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleAddNewPeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTryPrunePeer(t *testing.T) { - now := time.Now() - - pruneEv := rTryPrunePeer{ - time: now.Add(time.Second + time.Millisecond), - } - type args struct { - event rTryPrunePeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{}, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "no prunable peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}}, - peerTimeout: time.Second, - }, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "mixed peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{1, 2, 3, 4, 5, 6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scPeersPruned{peers: []p2p.ID{"P4", "P5", "P6"}}, - }, - { - name: "mixed peers, finish after pruning", - fields: scTestParams{ - minRecvRate: 100, - height: 6, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scFinishedEv{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTryPrunePeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTrySchedule(t *testing.T) { - now := time.Now() - tryEv := rTrySchedule{ - time: now.Add(time.Second + time.Millisecond), - } - - type args struct { - event rTrySchedule - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only new peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only Removed peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - startTime: now, - height: 6, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - pending: map[int64]p2p.ID{ - 1: "P1", 2: "P1", - 3: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P2", height: 4}, - }, - - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P2": {height: 8, state: peerStateReady}, - "P1": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]p2p.ID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 7}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTrySchedule(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleStatusResponse(t *testing.T) { - now := time.Now() - statusRespP1Ev := bcStatusResponse{ - time: now.Add(time.Second + time.Millisecond), - peerID: "P1", - height: 6, - } - - type args struct { - event bcStatusResponse - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "increase height of removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "decrease height of single peer", - fields: scTestParams{ - height: 5, - peers: map[string]*scPeer{"P1": {height: 10, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8, 9, 10}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleStatusResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandle(t *testing.T) { - now := time.Now() - - type unknownEv struct { - priorityNormal - } - - block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3) - - t0 := time.Now() - tick := make([]time.Time, 100) - for i := range tick { - tick[i] = t0.Add(time.Duration(i) * time.Millisecond) - } - - type args struct { - event Event - } - type scStep struct { - currentSc *scTestParams - args args - wantEvent Event - wantErr bool - wantSc *scTestParams - } - tests := []struct { - name string - steps []scStep - }{ - { - name: "unknown event", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{}, - args: args{event: unknownEv{}}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, - wantSc: &scTestParams{}, - }, - }, - }, - { - name: "single peer, sync 3 blocks", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{startTime: now, peers: map[string]*scPeer{}, height: 1}, - args: args{event: bcAddNewPeer{peerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, - }, - { // set height of P1 - args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - height: 1, - }, - }, - { // schedule block 1 - args: args{event: rTrySchedule{time: tick[1]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]p2p.ID{1: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1]}, - height: 1, - }, - }, - { // schedule block 2 - args: args{event: rTrySchedule{time: tick[2]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]}, - height: 1, - }, - }, - { // schedule block 3 - args: args{event: rTrySchedule{time: tick[3]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]}, - height: 1, - }, - }, - { // block response 1 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}}, - wantEvent: scBlockReceived{peerID: "P1", block: block1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]p2p.ID{2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]}, - received: map[int64]p2p.ID{1: "P1"}, - height: 1, - }, - }, - { // block response 2 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}}, - wantEvent: scBlockReceived{peerID: "P1", block: block2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]p2p.ID{3: "P1"}, - pendingTime: map[int64]time.Time{3: tick[3]}, - received: map[int64]p2p.ID{1: "P1", 2: "P1"}, - height: 1, - }, - }, - { // block response 3 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}}, - wantEvent: scBlockReceived{peerID: "P1", block: block3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - }, - { // processed block 1 - args: args{event: pcBlockProcessed{peerID: p2p.ID("P1"), height: 1}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{2, 3}, - received: map[int64]p2p.ID{2: "P1", 3: "P1"}, - height: 2, - }, - }, - { // processed block 2 - args: args{event: pcBlockProcessed{peerID: p2p.ID("P1"), height: 2}}, - wantEvent: scFinishedEv{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{3}, - received: map[int64]p2p.ID{3: "P1"}, - height: 3, - }, - }, - }, - }, - { - name: "block verification failure", - steps: []scStep{ - { // failure processing block 1 - currentSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]p2p.ID{}, - height: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var sc *scheduler - for i, step := range tt.steps { - // First step must always initialise the currentState as state. - if step.currentSc != nil { - sc = newTestScheduler(*step.currentSc) - } - if sc == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := sc.handle(step.args.event) - wantSc := newTestScheduler(*step.wantSc) - - t.Logf("step %d(%v): %s", i, step.args.event, sc) - checkSameScheduler(t, wantSc, sc) - - checkScResults(t, step.wantErr, err, step.wantEvent, nextEvent) - - // Next step may use the wantedState as their currentState. - sc = newTestScheduler(*step.wantSc) - } - }) - } -} diff --git a/blockchain/v2/types.go b/blockchain/v2/types.go deleted file mode 100644 index 7a73728e46..0000000000 --- a/blockchain/v2/types.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2 - -import ( - "github.com/Workiva/go-datastructures/queue" -) - -// Event is the type that can be added to the priority queue. -type Event queue.Item - -type priority interface { - Compare(other queue.Item) int - Priority() int -} - -type priorityLow struct{} -type priorityNormal struct{} -type priorityHigh struct{} - -func (p priorityLow) Priority() int { - return 1 -} - -func (p priorityNormal) Priority() int { - return 2 -} - -func (p priorityHigh) Priority() int { - return 3 -} - -func (p priorityLow) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityNormal) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityHigh) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -type noOpEvent struct { - priorityLow -} - -var noOp = noOpEvent{} diff --git a/config/config.go b/config/config.go index ab1af804ad..5f8551b436 100644 --- a/config/config.go +++ b/config/config.go @@ -755,7 +755,7 @@ type FastSyncConfig struct { // DefaultFastSyncConfig returns a default configuration for the fast sync service func DefaultFastSyncConfig() *FastSyncConfig { return &FastSyncConfig{ - Version: "v2", + Version: "v0", } } @@ -769,8 +769,8 @@ func (cfg *FastSyncConfig) ValidateBasic() error { switch cfg.Version { case "v0": return nil - case "v2": - return nil + // case "v2": + // return nil default: return fmt.Errorf("unknown fastsync version %s", cfg.Version) } diff --git a/config/config_test.go b/config/config_test.go index 78657d848d..93cdf622b5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -133,7 +133,7 @@ func TestFastSyncConfigValidateBasic(t *testing.T) { assert.NoError(t, cfg.ValidateBasic()) // tamper with version - cfg.Version = "v2" + cfg.Version = "v0" assert.NoError(t, cfg.ValidateBasic()) cfg.Version = "invalid" diff --git a/config/toml.go b/config/toml.go index c39cdcaffb..13a338f295 100644 --- a/config/toml.go +++ b/config/toml.go @@ -353,7 +353,6 @@ temp-dir = "{{ .StateSync.TempDir }}" # Fast Sync version to use: # 1) "v0" (default) - the legacy fast sync implementation -# 2) "v2" - complete redesign of v0, optimized for testability & readability version = "{{ .FastSync.Version }}" ####################################################### diff --git a/node/node.go b/node/node.go index 1e3b88f7fb..8af8c37e0e 100644 --- a/node/node.go +++ b/node/node.go @@ -24,7 +24,6 @@ import ( abci "github.com/lazyledger/lazyledger-core/abci/types" bcv0 "github.com/lazyledger/lazyledger-core/blockchain/v0" - bcv2 "github.com/lazyledger/lazyledger-core/blockchain/v2" cfg "github.com/lazyledger/lazyledger-core/config" cs "github.com/lazyledger/lazyledger-core/consensus" "github.com/lazyledger/lazyledger-core/crypto" @@ -392,8 +391,8 @@ func createBlockchainReactor(config *cfg.Config, switch config.FastSync.Version { case "v0": bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) - case "v2": - bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + // case "v2": + // bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) default: return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } @@ -1308,8 +1307,8 @@ func makeNodeInfo( switch config.FastSync.Version { case "v0": bcChannel = bcv0.BlockchainChannel - case "v2": - bcChannel = bcv2.BlockchainChannel + // case "v2": + // bcChannel = bcv2.BlockchainChannel default: return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 7ca48483e2..33b904f00c 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -84,7 +84,7 @@ type ManifestNode struct { // runner will wait for the network to reach at least this block height. StartAt int64 `toml:"start_at"` - // FastSync specifies the fast sync mode: "" (disable), "v0" or "v2". + // FastSync specifies the fast sync mode: "" (disable), "v0" // Defaults to disabled. FastSync string `toml:"fast_sync"` diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index 9451a70913..dd4a86848d 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -301,7 +301,7 @@ func (n Node) Validate(testnet Testnet) error { } } switch n.FastSync { - case "", "v0", "v2": + case "", "v0": default: return fmt.Errorf("invalid fast sync setting %q", n.FastSync) }