Skip to content

Commit

Permalink
Merge branch 'main' into docker_pectra
Browse files Browse the repository at this point in the history
  • Loading branch information
somnathb1 committed Nov 11, 2024
2 parents f095b64 + 2a4f612 commit 7290579
Show file tree
Hide file tree
Showing 132 changed files with 3,174 additions and 3,731 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/qa-sync-from-scratch.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ jobs:
strategy:
fail-fast: false
matrix:
chain: [ sepolia, holesky, amoy ] # Chain name as specified on the erigon command line
chain: [ sepolia, holesky, amoy, chiado ] # Chain name as specified on the erigon command line
env:
ERIGON_DATA_DIR: ${{ github.workspace }}/erigon_data
ERIGON_QA_PATH: /home/qarunner/erigon-qa
Expand Down
6 changes: 5 additions & 1 deletion .github/workflows/scripts/run_rpc_tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ disabled_tests=(
debug_traceBlockByNumber/test_10.tar
debug_traceBlockByNumber/test_11.tar
debug_traceBlockByNumber/test_12.tar
# modified expected in case of empty rsp
debug_storageRangeAt/test_11.json
# remove this line after https://github.com/erigontech/rpc-tests/pull/281
parity_getBlockReceipts
parity_listStorageKeys/test_12.json
Expand Down Expand Up @@ -66,7 +68,9 @@ disabled_tests=(
net_peerCount/test_1.json
net_version/test_1.json
txpool_status/test_1.json
web3_clientVersion/test_1.json)
web3_clientVersion/test_1.json
# broken by https://github.com/erigontech/erigon/pull/12642 (@AskAlexSharov will fix)
debug_storageRangeAt/test_11.json)

# Transform the array into a comma-separated string
disabled_test_list=$(IFS=,; echo "${disabled_tests[*]}")
Expand Down
2 changes: 1 addition & 1 deletion DEV_CHAIN.md
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ You might face a conflict with ports if you run it on the same machine. To speci

To check if the nodes are connected, you can go to the log of both nodes and look for the line

``` [p2p] GoodPeers eth66=1 ```
``` [p2p] GoodPeers eth68=1 ```

Note: this might take a while it is not instantaneous, also if you see a 1 on either one of the two the node is fine.

Expand Down
3 changes: 3 additions & 0 deletions cl/antiquary/state_antiquary.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,9 @@ func FillStaticValidatorsTableIfNeeded(ctx context.Context, logger log.Logger, s
if err := stateSn.OpenFolder(); err != nil {
return false, err
}
if stateSn.BlocksAvailable() == 0 {
return false, nil
}
blocksAvaiable := stateSn.BlocksAvailable()
stateSnRoTx := stateSn.View()
defer stateSnRoTx.Close()
Expand Down
37 changes: 19 additions & 18 deletions cl/beacon/handler/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -355,28 +355,29 @@ func (a *ApiHandler) PostEthV1BeaconPoolSyncCommittees(w http.ResponseWriter, r
}

for _, subnet := range publishingSubnets {
if err = a.syncCommitteeMessagesService.ProcessMessage(r.Context(), &subnet, v); err != nil && !errors.Is(err, services.ErrIgnore) {

var syncCommitteeMessageWithGossipData cltypes.SyncCommitteeMessageWithGossipData
syncCommitteeMessageWithGossipData.SyncCommitteeMessage = v
syncCommitteeMessageWithGossipData.ImmediateVerification = true

encodedSSZ, err := syncCommitteeMessageWithGossipData.SyncCommitteeMessage.EncodeSSZ(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}

subnetId := subnet
syncCommitteeMessageWithGossipData.GossipData = &sentinel.GossipData{
Data: encodedSSZ,
Name: gossip.TopicNamePrefixSyncCommittee,
SubnetId: &subnetId,
}

if err = a.syncCommitteeMessagesService.ProcessMessage(r.Context(), &subnet, &syncCommitteeMessageWithGossipData); err != nil && !errors.Is(err, services.ErrIgnore) {
log.Warn("[Beacon REST] failed to process attestation in syncCommittee service", "err", err)
failures = append(failures, poolingFailure{Index: idx, Message: err.Error()})
break
}
// Broadcast to gossip
if a.sentinel != nil {
encodedSSZ, err := v.EncodeSSZ(nil)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
subnetId := subnet // this effectively makes a copy
if _, err := a.sentinel.PublishGossip(r.Context(), &sentinel.GossipData{
Data: encodedSSZ,
Name: gossip.TopicNamePrefixSyncCommittee,
SubnetId: &subnetId,
}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
}
}
if len(failures) > 0 {
Expand Down
4 changes: 2 additions & 2 deletions cl/beacon/handler/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,8 +119,8 @@ func setupTestingHandler(t *testing.T, v clparams.StateVersion, logger log.Logge
mockValidatorMonitor := mockMonitor.NewMockValidatorMonitor(ctrl)

// ctx context.Context, subnetID *uint64, msg *cltypes.SyncCommitteeMessage) error
syncCommitteeMessagesService.EXPECT().ProcessMessage(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, subnetID *uint64, msg *cltypes.SyncCommitteeMessage) error {
return h.syncMessagePool.AddSyncCommitteeMessage(postState, *subnetID, msg)
syncCommitteeMessagesService.EXPECT().ProcessMessage(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, subnetID *uint64, msg *cltypes.SyncCommitteeMessageWithGossipData) error {
return h.syncMessagePool.AddSyncCommitteeMessage(postState, *subnetID, msg.SyncCommitteeMessage)
}).AnyTimes()

syncContributionService.EXPECT().ProcessMessage(gomock.Any(), gomock.Any(), gomock.Any()).DoAndReturn(func(ctx context.Context, subnetID *uint64, msg *cltypes.SignedContributionAndProof) error {
Expand Down
7 changes: 7 additions & 0 deletions cl/cltypes/contribution.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
libcommon "github.com/erigontech/erigon-lib/common"
"github.com/erigontech/erigon-lib/common/hexutility"
"github.com/erigontech/erigon-lib/common/length"
sentinel "github.com/erigontech/erigon-lib/gointerfaces/sentinelproto"
"github.com/erigontech/erigon-lib/types/clonable"
"github.com/erigontech/erigon/cl/merkle_tree"
ssz2 "github.com/erigontech/erigon/cl/ssz"
Expand Down Expand Up @@ -179,6 +180,12 @@ func (agg *SyncContribution) HashSSZ() ([32]byte, error) {

}

type SyncCommitteeMessageWithGossipData struct {
SyncCommitteeMessage *SyncCommitteeMessage
GossipData *sentinel.GossipData
ImmediateVerification bool
}

type SyncCommitteeMessage struct {
Slot uint64 `json:"slot,string"`
BeaconBlockRoot libcommon.Hash `json:"beacon_block_root"`
Expand Down
4 changes: 2 additions & 2 deletions cl/phase1/network/gossip_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -252,8 +252,8 @@ func (g *GossipManager) routeAndProcess(ctx context.Context, data *sentinel.Goss
// The background checks above are enough for now.
return g.blobService.ProcessMessage(ctx, data.SubnetId, blobSideCar)
case gossip.IsTopicSyncCommittee(data.Name):
msg := &cltypes.SyncCommitteeMessage{}
if err := msg.DecodeSSZ(common.CopyBytes(data.Data), int(version)); err != nil {
msg := &cltypes.SyncCommitteeMessageWithGossipData{}
if err := msg.SyncCommitteeMessage.DecodeSSZ(common.CopyBytes(data.Data), int(version)); err != nil {
return err
}
return g.syncCommitteeMessagesService.ProcessMessage(ctx, data.SubnetId, msg)
Expand Down
7 changes: 7 additions & 0 deletions cl/phase1/network/services/batch_signature_verification.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ type BatchSignatureVerifier struct {
attVerifyAndExecute chan *AggregateVerificationData
aggregateProofVerify chan *AggregateVerificationData
blsToExecutionChangeVerify chan *AggregateVerificationData
syncCommitteeMessage chan *AggregateVerificationData
voluntaryExitVerify chan *AggregateVerificationData
ctx context.Context
}
Expand All @@ -50,6 +51,7 @@ func NewBatchSignatureVerifier(ctx context.Context, sentinel sentinel.SentinelCl
attVerifyAndExecute: make(chan *AggregateVerificationData, 1024),
aggregateProofVerify: make(chan *AggregateVerificationData, 1024),
blsToExecutionChangeVerify: make(chan *AggregateVerificationData, 1024),
syncCommitteeMessage: make(chan *AggregateVerificationData, 1024),
voluntaryExitVerify: make(chan *AggregateVerificationData, 1024),
}
}
Expand All @@ -67,6 +69,10 @@ func (b *BatchSignatureVerifier) AsyncVerifyBlsToExecutionChange(data *Aggregate
b.blsToExecutionChangeVerify <- data
}

func (b *BatchSignatureVerifier) AsyncVerifySyncCommitteeMessage(data *AggregateVerificationData) {
b.syncCommitteeMessage <- data
}

func (b *BatchSignatureVerifier) AsyncVerifyVoluntaryExit(data *AggregateVerificationData) {
b.voluntaryExitVerify <- data
}
Expand All @@ -80,6 +86,7 @@ func (b *BatchSignatureVerifier) Start() {
go b.start(b.attVerifyAndExecute)
go b.start(b.aggregateProofVerify)
go b.start(b.blsToExecutionChangeVerify)
go b.start(b.syncCommitteeMessage)
go b.start(b.voluntaryExitVerify)
}

Expand Down
2 changes: 1 addition & 1 deletion cl/phase1/network/services/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ type BlockService Service[*cltypes.SignedBeaconBlock]
type BlobSidecarsService Service[*cltypes.BlobSidecar]

//go:generate mockgen -typed=true -destination=./mock_services/sync_committee_messages_service_mock.go -package=mock_services . SyncCommitteeMessagesService
type SyncCommitteeMessagesService Service[*cltypes.SyncCommitteeMessage]
type SyncCommitteeMessagesService Service[*cltypes.SyncCommitteeMessageWithGossipData]

//go:generate mockgen -typed=true -destination=./mock_services/sync_contribution_service_mock.go -package=mock_services . SyncContributionService
type SyncContributionService Service[*cltypes.SignedContributionAndProof]
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

65 changes: 40 additions & 25 deletions cl/phase1/network/services/sync_committee_messages_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,10 @@ package services

import (
"context"
"errors"
"fmt"
"slices"
"sync"

"github.com/Giulio2002/bls"

"github.com/erigontech/erigon/cl/beacon/synced_data"
"github.com/erigontech/erigon/cl/clparams"
"github.com/erigontech/erigon/cl/cltypes"
Expand All @@ -47,6 +44,7 @@ type syncCommitteeMessagesService struct {
beaconChainCfg *clparams.BeaconChainConfig
syncContributionPool sync_contribution_pool.SyncContributionPool
ethClock eth_clock.EthereumClock
batchSignatureVerifier *BatchSignatureVerifier
test bool

mu sync.Mutex
Expand All @@ -58,6 +56,7 @@ func NewSyncCommitteeMessagesService(
ethClock eth_clock.EthereumClock,
syncedDataManager *synced_data.SyncedDataManager,
syncContributionPool sync_contribution_pool.SyncContributionPool,
batchSignatureVerifier *BatchSignatureVerifier,
test bool,
) SyncCommitteeMessagesService {
return &syncCommitteeMessagesService{
Expand All @@ -66,30 +65,31 @@ func NewSyncCommitteeMessagesService(
syncedDataManager: syncedDataManager,
beaconChainCfg: beaconChainCfg,
syncContributionPool: syncContributionPool,
batchSignatureVerifier: batchSignatureVerifier,
test: test,
}
}

// ProcessMessage processes a sync committee message
func (s *syncCommitteeMessagesService) ProcessMessage(ctx context.Context, subnet *uint64, msg *cltypes.SyncCommitteeMessage) error {
func (s *syncCommitteeMessagesService) ProcessMessage(ctx context.Context, subnet *uint64, msg *cltypes.SyncCommitteeMessageWithGossipData) error {
s.mu.Lock()
defer s.mu.Unlock()

return s.syncedDataManager.ViewHeadState(func(headState *state.CachingBeaconState) error {
// [IGNORE] The message's slot is for the current slot (with a MAXIMUM_GOSSIP_CLOCK_DISPARITY allowance), i.e. sync_committee_message.slot == current_slot.
if !s.ethClock.IsSlotCurrentSlotWithMaximumClockDisparity(msg.Slot) {
if !s.ethClock.IsSlotCurrentSlotWithMaximumClockDisparity(msg.SyncCommitteeMessage.Slot) {
return ErrIgnore
}
// [REJECT] The subnet_id is valid for the given validator, i.e. subnet_id in compute_subnets_for_sync_committee(state, sync_committee_message.validator_index).
// Note this validation implies the validator is part of the broader current sync committee along with the correct subcommittee.
subnets, err := subnets.ComputeSubnetsForSyncCommittee(headState, msg.ValidatorIndex)
subnets, err := subnets.ComputeSubnetsForSyncCommittee(headState, msg.SyncCommitteeMessage.ValidatorIndex)
if err != nil {
return err
}
seenSyncCommitteeMessageIdentifier := seenSyncCommitteeMessage{
subnet: *subnet,
slot: msg.Slot,
validatorIndex: msg.ValidatorIndex,
slot: msg.SyncCommitteeMessage.Slot,
validatorIndex: msg.SyncCommitteeMessage.ValidatorIndex,
}

if !slices.Contains(subnets, *subnet) {
Expand All @@ -100,13 +100,35 @@ func (s *syncCommitteeMessagesService) ProcessMessage(ctx context.Context, subne
return ErrIgnore
}
// [REJECT] The signature is valid for the message beacon_block_root for the validator referenced by validator_index
if err := verifySyncCommitteeMessageSignature(headState, msg); !s.test && err != nil {
signature, signingRoot, pubKey, err := verifySyncCommitteeMessageSignature(headState, msg.SyncCommitteeMessage)
if !s.test && err != nil {
return err
}
s.seenSyncCommitteeMessages[seenSyncCommitteeMessageIdentifier] = struct{}{}
s.cleanupOldSyncCommitteeMessages() // cleanup old messages
// Aggregate the message
return s.syncContributionPool.AddSyncCommitteeMessage(headState, *subnet, msg)
aggregateVerificationData := &AggregateVerificationData{
Signatures: [][]byte{signature},
SignRoots: [][]byte{signingRoot},
Pks: [][]byte{pubKey},
GossipData: msg.GossipData,
F: func() {
s.seenSyncCommitteeMessages[seenSyncCommitteeMessageIdentifier] = struct{}{}
s.cleanupOldSyncCommitteeMessages() // cleanup old messages
// Aggregate the message
s.syncContributionPool.AddSyncCommitteeMessage(headState, *subnet, msg.SyncCommitteeMessage)
},
}

if msg.ImmediateVerification {
return s.batchSignatureVerifier.ImmediateVerification(aggregateVerificationData)
}

// push the signatures to verify asynchronously and run final functions after that.
s.batchSignatureVerifier.AsyncVerifySyncCommitteeMessage(aggregateVerificationData)

// As the logic goes, if we return ErrIgnore there will be no peer banning and further publishing
// gossip data into the network by the gossip manager. That's what we want because we will be doing that ourselves
// in BatchSignatureVerifier service. After validating signatures, if they are valid we will publish the
// gossip ourselves or ban the peer which sent that particular invalid signature.
return ErrIgnore
})
}

Expand All @@ -121,26 +143,19 @@ func (s *syncCommitteeMessagesService) cleanupOldSyncCommitteeMessages() {
}

// verifySyncCommitteeMessageSignature verifies the signature of a sync committee message
func verifySyncCommitteeMessageSignature(s *state.CachingBeaconState, msg *cltypes.SyncCommitteeMessage) error {
func verifySyncCommitteeMessageSignature(s *state.CachingBeaconState, msg *cltypes.SyncCommitteeMessage) ([]byte, []byte, []byte, error) {
publicKey, err := s.ValidatorPublicKey(int(msg.ValidatorIndex))
if err != nil {
return err
return nil, nil, nil, err
}
cfg := s.BeaconConfig()
domain, err := s.GetDomain(cfg.DomainSyncCommittee, state.Epoch(s))
if err != nil {
return err
return nil, nil, nil, err
}
signingRoot, err := utils.Sha256(msg.BeaconBlockRoot[:], domain), nil
if err != nil {
return err
}
valid, err := bls.Verify(msg.Signature[:], signingRoot[:], publicKey[:])
if err != nil {
return errors.New("invalid signature")
}
if !valid {
return errors.New("invalid signature")
return nil, nil, nil, err
}
return nil
return msg.Signature[:], signingRoot[:], publicKey[:], nil
}
Loading

0 comments on commit 7290579

Please sign in to comment.