Skip to content

Commit

Permalink
chore(end-to-end): refactor TestSystemRPC
Browse files Browse the repository at this point in the history
- Add empty skipped tests for missing cases
- Split each subtest individually
- Keep on retrying until main context is canceled
- Fix `networkState` test case
- Assert more fields
- Fix #2161 and #807
  • Loading branch information
qdm12 committed Jun 6, 2022
1 parent 79bc022 commit 97fb63f
Show file tree
Hide file tree
Showing 2 changed files with 196 additions and 104 deletions.
258 changes: 154 additions & 104 deletions tests/rpc/rpc_01-system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ package rpc

import (
"context"
"reflect"
"testing"
"time"

Expand All @@ -14,137 +13,188 @@ import (
"github.com/ChainSafe/gossamer/tests/utils"
"github.com/ChainSafe/gossamer/tests/utils/config"
"github.com/ChainSafe/gossamer/tests/utils/node"
"github.com/ChainSafe/gossamer/tests/utils/retry"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)

const peerIDRegex = `^[a-zA-Z0-9]{52}$`

func TestSystemRPC(t *testing.T) {
if utils.MODE != rpcSuite {
t.Log("Going to skip RPC suite tests")
return
}

testCases := []*testCase{
{ //TODO
description: "test system_name",
method: "system_name",
skip: true,
},
{ //TODO
description: "test system_version",
method: "system_version",
skip: true,
},
{ //TODO
description: "test system_chain",
method: "system_chain",
skip: true,
},
{ //TODO
description: "test system_properties",
method: "system_properties",
skip: true,
},
{
description: "test system_health",
method: "system_health",
expected: modules.SystemHealthResponse{
Peers: 2,
IsSyncing: true,
ShouldHavePeers: true,
},
params: "{}",
},
{
description: "test system_peers",
method: "system_peers",
expected: modules.SystemPeersResponse{},
params: "{}",
},
{
description: "test system_network_state",
method: "system_networkState",
expected: modules.SystemNetworkStateResponse{
NetworkState: modules.NetworkStateString{
PeerID: "",
},
},
params: "{}",
},
{ //TODO
description: "test system_addReservedPeer",
method: "system_addReservedPeer",
skip: true,
},
{ //TODO
description: "test system_removeReservedPeer",
method: "system_removeReservedPeer",
skip: true,
},
{ //TODO
description: "test system_nodeRoles",
method: "system_nodeRoles",
skip: true,
},
{ //TODO
description: "test system_accountNextIndex",
method: "system_accountNextIndex",
skip: true,
},
}
const testTimeout = 8 * time.Minute
ctx, cancel := context.WithTimeout(context.Background(), testTimeout)

const numberOfNodes = 3

genesisPath := libutils.GetGssmrGenesisRawPathTest(t)
config := config.CreateDefault(t)
nodes := node.MakeNodes(t, 3, node.SetGenesis(genesisPath), node.SetConfig(config))
nodes := node.MakeNodes(t, numberOfNodes, node.SetGenesis(genesisPath), node.SetConfig(config))

ctx, cancel := context.WithCancel(context.Background())
nodes.InitAndStartTest(ctx, t, cancel)

time.Sleep(time.Second) // give server a second to start
t.Run("system_health", func(t *testing.T) {
t.Parallel()

for _, test := range testCases {
t.Run(test.description, func(t *testing.T) {
if test.skip {
t.SkipNow()
}
const method = "system_health"
const params = "{}"

expected := modules.SystemHealthResponse{
Peers: numberOfNodes - 1,
ShouldHavePeers: true,
}

var response modules.SystemHealthResponse
err := retry.UntilOK(ctx, time.Second, func() (ok bool, err error) {
getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second)
defer getResponseCancel()
target := reflect.New(reflect.TypeOf(test.expected)).Interface()
err := getResponse(getResponseCtx, test.method, test.params, target)
require.NoError(t, err)
err = getResponse(getResponseCtx, method, params, &response)
getResponseCancel()
if err != nil {
return false, err
}
return response.Peers == expected.Peers, nil
})
require.NoError(t, err)

switch v := target.(type) {
case *modules.SystemHealthResponse:
t.Log("Will assert SystemHealthResponse", "target", target)
// IsSyncing can be true or false
response.IsSyncing = false

require.Equal(t, test.expected.(modules.SystemHealthResponse).IsSyncing, v.IsSyncing)
require.Equal(t, test.expected.(modules.SystemHealthResponse).ShouldHavePeers, v.ShouldHavePeers)
require.GreaterOrEqual(t, v.Peers, test.expected.(modules.SystemHealthResponse).Peers)
assert.Equal(t, expected, response)
})

case *modules.SystemNetworkStateResponse:
t.Log("Will assert SystemNetworkStateResponse", "target", target)
t.Run("system_peers", func(t *testing.T) {
t.Parallel()

require.NotNil(t, v.NetworkState)
require.NotNil(t, v.NetworkState.PeerID)
// Wait for N-1 peers connected and no syncing
err := retry.UntilOK(ctx, time.Second, func() (ok bool, err error) {
getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second)
const method = "system_health"
const params = "{}"
var healthResponse modules.SystemHealthResponse
err = getResponse(getResponseCtx, method, params, &healthResponse)
getResponseCancel()
if err != nil {
return false, err // error and stop retrying
}

case *modules.SystemPeersResponse:
t.Log("Will assert SystemPeersResponse", "target", target)
ok = healthResponse.Peers == numberOfNodes-1 && !healthResponse.IsSyncing
return ok, nil
})
require.NoError(t, err)

require.NotNil(t, v)
var response modules.SystemPeersResponse
// Wait for N-1 peers with peer IDs set
err = retry.UntilOK(ctx, time.Second, func() (ok bool, err error) {
getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second)
const method = "system_peers"
const params = "{}"
err = getResponse(getResponseCtx, method, params, &response)
getResponseCancel()
if err != nil {
return false, err // error and stop retrying
}

//TODO: #807
//this assertion requires more time on init to be enabled
//require.GreaterOrEqual(t, len(v.Peers), 2)
if len(response) != numberOfNodes-1 {
return false, nil // retry
}

for _, vv := range *v {
require.NotNil(t, vv.PeerID)
require.NotNil(t, vv.Roles)
require.NotNil(t, vv.BestHash)
require.NotNil(t, vv.BestNumber)
bestBlockNumber := response[0].BestNumber
for _, peer := range response {
// wait for all peers to have the same best block number
sameBestBlockNumber := bestBlockNumber == peer.BestNumber
if peer.PeerID == "" || peer.BestHash.IsEmpty() || !sameBestBlockNumber {
return false, nil // retry
}

}

return true, nil // success, stop retrying
})
}
require.NoError(t, err)

bestBlockNumber := response[0].BestNumber
bestBlockHash := response[0].BestHash
expectedResponse := modules.SystemPeersResponse{
// Assert they all have the same best block number and hash
{Roles: 4, PeerID: "", BestNumber: bestBlockNumber, BestHash: bestBlockHash},
{Roles: 4, PeerID: "", BestNumber: bestBlockNumber, BestHash: bestBlockHash},
}
for i := range response {
// Check randomly generated peer IDs and clear them
assert.Regexp(t, peerIDRegex, response[i].PeerID)
response[i].PeerID = ""
}

assert.Equal(t, expectedResponse, response)
})

t.Run("system_networkState", func(t *testing.T) {
t.Parallel()

const method = "system_networkState"
const params = "{}"

getResponseCtx, getResponseCancel := context.WithTimeout(ctx, time.Second)
defer getResponseCancel()
var response modules.SystemNetworkStateResponse
err := getResponse(getResponseCtx, method, params, &response)

require.NoError(t, err)

assert.Regexp(t, peerIDRegex, response.NetworkState.PeerID)
response.NetworkState.PeerID = ""

assert.NotEmpty(t, response.NetworkState.Multiaddrs)
for _, addr := range response.NetworkState.Multiaddrs {
assert.Regexp(t, "^/ip[4|6]/.+/tcp/[0-9]{1,5}/p2p/[a-zA-Z0-9]{52}$", addr)
}
response.NetworkState.Multiaddrs = nil

// Ensure we don't need to assert other fields
expectedResponse := modules.SystemNetworkStateResponse{}
assert.Equal(t, expectedResponse, response)
})

t.Run("system_name", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_version", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_chain", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_properties", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_addReservedPeer", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_removeReservedPeer", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_nodeRoles", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})

t.Run("system_accountNextIndex", func(t *testing.T) {
t.Parallel()
t.Skip("test not implemented")
})
}
42 changes: 42 additions & 0 deletions tests/utils/retry/untilok.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
// Copyright 2022 ChainSafe Systems (ON)
// SPDX-License-Identifier: LGPL-3.0-only

package retry

import (
"context"
"fmt"
"time"
)

// UntilOK retries the function `f` until it returns a true
// value for `ok` or a non nil error.
// It waits `retryWait` after each failed call to `f`.
// If the context `ctx` is canceled, the function returns
// immediately an error stating the number of failed tries,
// for how long it retried and the context error.
func UntilOK(ctx context.Context, retryWait time.Duration,
f func() (ok bool, err error)) (err error) {
failedTries := 0
for ctx.Err() == nil {
ok, err := f()
if ok {
return nil
} else if err != nil {
return fmt.Errorf("stop retrying function: %w", err)
}

failedTries++
waitCtx, waitCancel := context.WithTimeout(ctx, retryWait)
<-waitCtx.Done()
waitCancel()
}

totalRetryTime := time.Duration(failedTries) * retryWait
tryWord := "try"
if failedTries > 1 {
tryWord = "tries"
}
return fmt.Errorf("failed after %d %s during %s (%w)",
failedTries, tryWord, totalRetryTime, ctx.Err())
}

0 comments on commit 97fb63f

Please sign in to comment.