diff --git a/CHANGELOG.md b/CHANGELOG.md index 0464827b45..e29ce53513 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,9 @@ See [RELEASE](./RELEASE.md) for workflow instructions. * [#6387](https://github.com/spacemeshos/go-spacemesh/pull/6387) Fix an issue were in rare cases invalid proofs for malicious identities were created. +* [#6393](https://github.com/spacemeshos/go-spacemesh/pull/6393) Further improved proposal building process to avoid + late proposals in 1:N setups and during cyclegap. + ## v1.7.4 ### Improvements diff --git a/activation/activation_test.go b/activation/activation_test.go index 4f3550d7dd..192de85da4 100644 --- a/activation/activation_test.go +++ b/activation/activation_test.go @@ -32,7 +32,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/atxs" "github.com/spacemeshos/go-spacemesh/sql/localsql" "github.com/spacemeshos/go-spacemesh/sql/localsql/nipost" - sqlmocks "github.com/spacemeshos/go-spacemesh/sql/mocks" "github.com/spacemeshos/go-spacemesh/sql/statesql" ) @@ -1396,7 +1395,7 @@ func TestGetPositioningAtx(t *testing.T) { t.Parallel() tab := newTestBuilder(t, 1) - db := sqlmocks.NewMockExecutor(gomock.NewController(t)) + db := sql.NewMockExecutor(gomock.NewController(t)) tab.Builder.db = db expected := errors.New("db error") db.EXPECT().Exec(gomock.Any(), gomock.Any(), gomock.Any()).Return(0, expected) diff --git a/activation/e2e/checkpoint_test.go b/activation/e2e/checkpoint_test.go index a9018c8fcb..080e678d05 100644 --- a/activation/e2e/checkpoint_test.go +++ b/activation/e2e/checkpoint_test.go @@ -46,7 +46,7 @@ func TestCheckpoint_PublishingSoloATXs(t *testing.T) { require.NoError(t, err) cfg := testPostConfig() - db := statesql.InMemory() + db := statesql.InMemoryTest(t) cdb := datastore.NewCachedDB(db, logger) opts := testPostSetupOpts(t) @@ -86,7 +86,7 @@ func TestCheckpoint_PublishingSoloATXs(t *testing.T) { require.NoError(t, err) t.Cleanup(clock.Close) - localDB := localsql.InMemory() + localDB := localsql.InMemoryTest(t) nb, err := activation.NewNIPostBuilder( localDB, svc, @@ -190,7 +190,7 @@ func TestCheckpoint_PublishingSoloATXs(t *testing.T) { poetDb, err = activation.NewPoetDb(newDB, logger.Named("poetDb")) require.NoError(t, err) cdb = datastore.NewCachedDB(newDB, logger) - atxdata, err = atxsdata.Warm(newDB, 1, logger) + atxdata, err = atxsdata.Warm(newDB, 1, logger, sig) poetService = activation.NewPoetServiceWithClient(poetDb, client, poetCfg, logger) validator = activation.NewValidator(newDB, poetDb, cfg, opts.Scrypt, verifier) require.NoError(t, err) diff --git a/atxsdata/data.go b/atxsdata/data.go index 8096239f07..542cdbe74f 100644 --- a/atxsdata/data.go +++ b/atxsdata/data.go @@ -8,6 +8,7 @@ import ( "golang.org/x/exp/maps" "github.com/spacemeshos/go-spacemesh/common/types" + "github.com/spacemeshos/go-spacemesh/signing" ) // SAFETY: all exported fields are read-only and are safe to read concurrently. @@ -23,7 +24,10 @@ type ATX struct { func New() *Data { return &Data{ malicious: map[types.NodeID]struct{}{}, - epochs: map[types.EpochID]epochCache{}, + epochs: map[types.EpochID]map[types.ATXID]*ATX{}, + + signers: map[types.NodeID]struct{}{}, + managed: map[types.EpochID]map[types.NodeID]types.ATXID{}, } } @@ -32,11 +36,26 @@ type Data struct { mu sync.RWMutex malicious map[types.NodeID]struct{} - epochs map[types.EpochID]epochCache + epochs map[types.EpochID]map[types.ATXID]*ATX + + signers map[types.NodeID]struct{} + managed map[types.EpochID]map[types.NodeID]types.ATXID } -type epochCache struct { - index map[types.ATXID]*ATX +func (d *Data) Register(sig *signing.EdSigner) { + if _, exists := d.signers[sig.NodeID()]; exists { + return + } + d.signers[sig.NodeID()] = struct{}{} + + // update quick access for newly registered signer + for epoch, ecache := range d.epochs { + for id, atx := range ecache { + if atx.Node == sig.NodeID() { + d.managed[epoch][sig.NodeID()] = id + } + } + } } func (d *Data) Evicted() types.EpochID { @@ -58,6 +77,11 @@ func (d *Data) EvictEpoch(evict types.EpochID) { if d.evicted.Load() < evict.Uint32() { d.evicted.Store(evict.Uint32()) } + for epoch := range d.managed { + if epoch <= evict { + delete(d.managed, epoch) + } + } for epoch := range d.epochs { if epoch <= evict { delete(d.epochs, epoch) @@ -92,19 +116,23 @@ func (d *Data) AddAtx(target types.EpochID, id types.ATXID, atx *ATX) bool { } ecache, exists := d.epochs[target] if !exists { - ecache = epochCache{ - index: map[types.ATXID]*ATX{}, - } - d.epochs[target] = ecache + d.epochs[target] = map[types.ATXID]*ATX{} + ecache = d.epochs[target] } - - if _, exists = ecache.index[id]; exists { + if _, exists = ecache[id]; exists { return false } atxsCounter.WithLabelValues(target.String()).Inc() - - ecache.index[id] = atx + ecache[id] = atx + if _, exists = d.signers[atx.Node]; exists { + managedCache, exists := d.managed[target] + if !exists { + d.managed[target] = map[types.NodeID]types.ATXID{} + managedCache = d.managed[target] + } + managedCache[atx.Node] = id + } return true } @@ -164,13 +192,40 @@ func (d *Data) Get(epoch types.EpochID, atx types.ATXID) *ATX { if !exists { return nil } - data, exists := ecache.index[atx] + data, exists := ecache[atx] if !exists { return nil } return data } +// GetByEpochAndNodeID returns atx data by epoch and node id. This query will be slow for nodeIDs that +// are not managed by the node. +func (d *Data) GetByEpochAndNodeID(epoch types.EpochID, node types.NodeID) (types.ATXID, *ATX) { + d.mu.RLock() + defer d.mu.RUnlock() + atxcache, exists := d.managed[epoch] + if exists { + if atxid, exists := atxcache[node]; exists { + atx, exists := d.epochs[epoch][atxid] + if exists { + return atxid, atx + } + } + } + + ecache, exists := d.epochs[epoch] + if !exists { + return types.EmptyATXID, nil + } + for id, atx := range ecache { + if atx.Node == node { + return id, atx + } + } + return types.EmptyATXID, nil +} + func (d *Data) Size(target types.EpochID) int { d.mu.RLock() defer d.mu.RUnlock() @@ -178,7 +233,7 @@ func (d *Data) Size(target types.EpochID) int { if !exists { return 0 } - return len(ecache.index) + return len(ecache) } type lockGuard struct{} @@ -203,7 +258,7 @@ func (d *Data) IterateInEpoch(epoch types.EpochID, fn func(types.ATXID, *ATX), f if !exists { return } - for id, atx := range ecache.index { + for id, atx := range ecache { ok := true for _, filter := range filters { ok = ok && filter(d, atx, lockGuard{}) @@ -250,7 +305,7 @@ func (d *Data) MissingInEpoch(epoch types.EpochID, atxs []types.ATXID) []types.A } var missing []types.ATXID for _, id := range atxs { - if _, exists := ecache.index[id]; !exists { + if _, exists := ecache[id]; !exists { missing = append(missing, id) } } @@ -271,7 +326,7 @@ func (d *Data) WeightForSet(epoch types.EpochID, set []types.ATXID) (uint64, []b } var weight uint64 for i, id := range set { - if data, exists := ecache.index[id]; exists { + if data, exists := ecache[id]; exists { weight += data.Weight used[i] = true } diff --git a/atxsdata/data_test.go b/atxsdata/data_test.go index bb7f4ffc1a..eb4f309c74 100644 --- a/atxsdata/data_test.go +++ b/atxsdata/data_test.go @@ -8,10 +8,10 @@ import ( "sync/atomic" "testing" - fuzz "github.com/google/gofuzz" "github.com/stretchr/testify/require" "github.com/spacemeshos/go-spacemesh/common/types" + "github.com/spacemeshos/go-spacemesh/signing" ) func TestData(t *testing.T) { @@ -21,11 +21,8 @@ func TestData(t *testing.T) { ids = 100 ) c := New() - f := fuzz.New() - f.RandSource(rand.NewSource(101)) atxids := [epochs][ids]types.ATXID{} data := [epochs][ids]ATX{} - f.Fuzz(&data) for repeat := 0; repeat < 10; repeat++ { for epoch := 0; epoch < epochs; epoch++ { @@ -50,6 +47,66 @@ func TestData(t *testing.T) { for i := range atxids[epoch] { atx := c.Get(types.EpochID(epoch)+1, atxids[epoch][i]) require.Equal(t, &data[epoch][i], atx) + id, atx := c.GetByEpochAndNodeID(types.EpochID(epoch)+1, data[epoch][i].Node) + require.Equal(t, atxids[epoch][i], id) + require.Equal(t, &data[epoch][i], atx) + require.False(t, c.IsMalicious(atx.Node)) + } + } + } + }) + t.Run("managed", func(t *testing.T) { + const ( + epochs = 10 + ids = 100 + ) + c := New() + atxids := [epochs][ids]types.ATXID{} + data := [epochs][ids]ATX{} + sig, err := signing.NewEdSigner() + require.NoError(t, err) + c.Register(sig) + + for repeat := 0; repeat < 10; repeat++ { + for epoch := 0; epoch < epochs; epoch++ { + atxids[epoch][0] = types.ATXID{byte(epoch), 0} + data[epoch][0].Node = sig.NodeID() + d := data[epoch][0] + c.Add( + types.EpochID(epoch)+1, + d.Node, + d.Coinbase, + atxids[epoch][0], + d.Weight, + d.BaseHeight, + d.Height, + d.Nonce, + false, + ) + for i := 1; i < len(atxids[epoch]); i++ { + atxids[epoch][i] = types.ATXID{byte(epoch), byte(i)} + data[epoch][i].Node = types.NodeID{byte(epoch), byte(i)} + d := data[epoch][i] + c.Add( + types.EpochID(epoch)+1, + d.Node, + d.Coinbase, + atxids[epoch][i], + d.Weight, + d.BaseHeight, + d.Height, + d.Nonce, + false, + ) + } + } + for epoch := 0; epoch < epochs; epoch++ { + for i := range atxids[epoch] { + atx := c.Get(types.EpochID(epoch)+1, atxids[epoch][i]) + require.Equal(t, &data[epoch][i], atx) + id, atx := c.GetByEpochAndNodeID(types.EpochID(epoch)+1, data[epoch][i].Node) + require.Equal(t, atxids[epoch][i], id) + require.Equal(t, &data[epoch][i], atx) require.False(t, c.IsMalicious(atx.Node)) } } @@ -210,34 +267,34 @@ func BenchmarkConcurrentReadWrite(b *testing.B) { }) } -func benchmarkkWeightForSet(b *testing.B, size, setSize int) { +func benchmarkCache(b *testing.B, size int, sigs ...*signing.EdSigner) (*Data, types.EpochID, []types.ATXID) { c := New() const epoch = 1 + nodeIDs := make([]types.NodeID, 0, size) atxs := make([]types.ATXID, 0, size) rng := rand.New(rand.NewSource(10101)) for i := range size { var ( - node types.NodeID - atx types.ATXID + nodeID types.NodeID + atxID types.ATXID ) - binary.PutUvarint(node[:], uint64(i+1)) - binary.PutUvarint(atx[:], uint64(i+1)) - atxs = append(atxs, atx) - c.Add(epoch, node, types.Address{}, atx, 500, 100, 0, 0, false) + binary.PutUvarint(nodeID[:], uint64(i+1)) + nodeIDs = append(nodeIDs, nodeID) + binary.PutUvarint(atxID[:], uint64(i+1)) + atxs = append(atxs, atxID) + } + for i, sig := range sigs { + c.Register(sig) + nodeIDs[i] = sig.NodeID() + } + for i := range size { + c.Add(epoch, nodeIDs[i], types.Address{}, atxs[i], 500, 100, 0, 0, false) } rng.Shuffle(size, func(i, j int) { atxs[i], atxs[j] = atxs[j], atxs[i] }) b.ResetTimer() - for range b.N { - weight, used := c.WeightForSet(epoch, atxs[:setSize]) - if weight == 0 { - b.Fatalf("weight can't be zero") - } - if len(used) != setSize { - b.Fatalf("used should be equal to set size") - } - } + return c, epoch, atxs } func BenchmarkWeightForSet(b *testing.B) { @@ -253,7 +310,72 @@ func BenchmarkWeightForSet(b *testing.B) { {1_000_000, 1_000_000}, } { b.Run(fmt.Sprintf("size=%d set_size=%d", bc.size, bc.setSize), func(b *testing.B) { - benchmarkkWeightForSet(b, bc.size, bc.setSize) + c, epoch, atxs := benchmarkCache(b, bc.size) + + b.ResetTimer() + for range b.N { + weight, used := c.WeightForSet(epoch, atxs[:bc.setSize]) + if weight == 0 { + b.Fatalf("weight can't be zero") + } + if len(used) != bc.setSize { + b.Fatalf("used should be equal to set size") + } + } + }) + } +} + +func BenchmarkGetByNodeID(b *testing.B) { + for _, bc := range []struct { + size int + }{ + {100_000}, + {200_000}, + {400_000}, + {1_000_000}, + } { + b.Run(fmt.Sprintf("size=%d", bc.size), func(b *testing.B) { + c, epoch, _ := benchmarkCache(b, bc.size) + var nodeID types.NodeID + binary.PutUvarint(nodeID[:], uint64(10+1)) + + var id types.ATXID + var atx *ATX + b.ResetTimer() + for range b.N { + id, atx = c.GetByEpochAndNodeID(epoch, nodeID) + } + require.NotEqual(b, types.EmptyATXID, id) + require.NotNil(b, atx) + }) + } +} + +func BenchmarkGetByNodeID_Managed(b *testing.B) { + for _, bc := range []struct { + size int + }{ + {100_000}, + {200_000}, + {400_000}, + {1_000_000}, + } { + b.Run(fmt.Sprintf("size=%d", bc.size), func(b *testing.B) { + sig, err := signing.NewEdSigner() + require.NoError(b, err) + + c, epoch, _ := benchmarkCache(b, bc.size, sig) + nodeID := sig.NodeID() + + var id types.ATXID + var atx *ATX + b.ResetTimer() + for range b.N { + id, atx = c.GetByEpochAndNodeID(epoch, nodeID) + } + require.NotEqual(b, types.EmptyATXID, id) + require.NotNil(b, atx) }) } } diff --git a/atxsdata/warmup.go b/atxsdata/warmup.go index e1f0736796..b45e612402 100644 --- a/atxsdata/warmup.go +++ b/atxsdata/warmup.go @@ -8,6 +8,7 @@ import ( "go.uber.org/zap" "github.com/spacemeshos/go-spacemesh/common/types" + "github.com/spacemeshos/go-spacemesh/signing" "github.com/spacemeshos/go-spacemesh/sql" "github.com/spacemeshos/go-spacemesh/sql/atxs" "github.com/spacemeshos/go-spacemesh/sql/builder" @@ -15,8 +16,11 @@ import ( "github.com/spacemeshos/go-spacemesh/sql/layers" ) -func Warm(db sql.StateDatabase, keep types.EpochID, logger *zap.Logger) (*Data, error) { +func Warm(db sql.StateDatabase, keep types.EpochID, logger *zap.Logger, signers ...*signing.EdSigner) (*Data, error) { cache := New() + for _, sig := range signers { + cache.Register(sig) + } tx, err := db.Tx(context.Background()) if err != nil { return nil, err diff --git a/atxsdata/warmup_test.go b/atxsdata/warmup_test.go index bf14b257cf..947192b430 100644 --- a/atxsdata/warmup_test.go +++ b/atxsdata/warmup_test.go @@ -14,7 +14,6 @@ import ( "github.com/spacemeshos/go-spacemesh/sql" "github.com/spacemeshos/go-spacemesh/sql/atxs" "github.com/spacemeshos/go-spacemesh/sql/layers" - "github.com/spacemeshos/go-spacemesh/sql/mocks" "github.com/spacemeshos/go-spacemesh/sql/statesql" ) @@ -39,7 +38,7 @@ func gatx( func TestWarmup(t *testing.T) { types.SetLayersPerEpoch(3) t.Run("sanity", func(t *testing.T) { - db := statesql.InMemory() + db := statesql.InMemoryTest(t) applied := types.LayerID(10) nonce := types.VRFPostIndex(1) data := []types.ActivationTx{ @@ -62,28 +61,29 @@ func TestWarmup(t *testing.T) { } }) t.Run("no data", func(t *testing.T) { - c, err := Warm(statesql.InMemory(), 1, zaptest.NewLogger(t)) + c, err := Warm(statesql.InMemoryTest(t), 1, zaptest.NewLogger(t)) require.NoError(t, err) require.NotNil(t, c) }) t.Run("closed db", func(t *testing.T) { - db := statesql.InMemory() + db := statesql.InMemoryTest(t) require.NoError(t, db.Close()) c, err := Warm(db, 1, zaptest.NewLogger(t)) require.Error(t, err) require.Nil(t, c) }) t.Run("db failures", func(t *testing.T) { - db := statesql.InMemory() + db := statesql.InMemoryTest(t) nonce := types.VRFPostIndex(1) data := gatx(types.ATXID{1, 1}, 1, types.NodeID{1}, nonce) require.NoError(t, atxs.Add(db, &data, types.AtxBlob{})) - exec := mocks.NewMockExecutor(gomock.NewController(t)) + exec := sql.NewMockExecutor(gomock.NewController(t)) call := 0 fail := 0 tx, err := db.Tx(context.Background()) require.NoError(t, err) + defer tx.Release() exec.EXPECT(). Exec(gomock.Any(), gomock.Any(), gomock.Any()). DoAndReturn(func(q string, enc sql.Encoder, dec sql.Decoder) (int, error) { diff --git a/cmd/genesisgen/main.go b/cmd/genesisgen/main.go index d6944fe62d..b1f0f2e4f9 100644 --- a/cmd/genesisgen/main.go +++ b/cmd/genesisgen/main.go @@ -6,7 +6,7 @@ import ( "flag" "fmt" "os" - stdtime "time" + "time" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/config" @@ -14,9 +14,9 @@ import ( ) var ( - extra = flag.String("extra", "", "genesis extra data. valid size is between [1, 255]") - time = flag.String("time", "", "genesis time. format "+stdtime.RFC3339) - n = flag.Int("n", 10, "number of keys") + extra = flag.String("extra", "", "genesis extra data. valid size is between [1, 255]") + genesisTime = flag.String("time", "", "genesis time. format "+time.RFC3339) + n = flag.Int("n", 10, "number of keys") ) type output struct { @@ -28,8 +28,13 @@ type output struct { func main() { flag.Parse() + genesis, err := time.Parse(*genesisTime, time.RFC3339) + if err != nil { + fmt.Printf("invalid genesis time: %s\n", err) + os.Exit(1) + } - conf := config.GenesisConfig{GenesisTime: *time, ExtraData: *extra} + conf := config.GenesisConfig{GenesisTime: config.Genesis(genesis), ExtraData: *extra} if err := conf.Validate(); err != nil { fmt.Printf("invalid config values: %s\n", err) os.Exit(1) diff --git a/cmd/root.go b/cmd/root.go index 47645ef313..142028da6d 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -47,8 +47,12 @@ func AddFlags(flagSet *pflag.FlagSet, cfg *config.Config) (configPath *string) { "poet-servers", "JSON-encoded list of poet servers (address and pubkey)", ) - flagSet.StringVar(&cfg.Genesis.GenesisTime, "genesis-time", - cfg.Genesis.GenesisTime, "Time of the genesis layer in 2019-13-02T17:02:00+00:00 format") + flagSet.AddFlag(&pflag.Flag{ + Name: "genesis-time", + Value: &cfg.Genesis.GenesisTime, + DefValue: cfg.Genesis.GenesisTime.String(), + Usage: "Time of the genesis layer in 2019-13-02T17:02:00+00:00 format", + }) flagSet.StringVar(&cfg.Genesis.ExtraData, "genesis-extra-data", cfg.Genesis.ExtraData, "genesis extra-data will be committed to the genesis id") flagSet.DurationVar(&cfg.LayerDuration, "layer-duration", diff --git a/config/genesis.go b/config/genesis.go index 903762560c..4e73b88f0e 100644 --- a/config/genesis.go +++ b/config/genesis.go @@ -17,9 +17,58 @@ import ( "github.com/spacemeshos/go-spacemesh/signing" ) +type Genesis time.Time + +func (g Genesis) Time() time.Time { + return time.Time(g) +} + +func (g Genesis) MarshalJSON() ([]byte, error) { + return json.Marshal(g.Time()) +} + +func (g *Genesis) UnmarshalJSON(data []byte) error { + var t time.Time + if err := json.Unmarshal(data, &t); err != nil { + return err + } + *g = Genesis(t) + return nil +} + +// UnmarshalText implements encoding.TextUnmarshaler and is needed for viper. +func (g *Genesis) UnmarshalText(text []byte) error { + return g.Set(string(text)) +} + +// Equal is needed for go-cmp. +func (g Genesis) Equal(other Genesis) bool { + return time.Time(g).Equal(time.Time(other)) +} + +// String implements fmt.Stringer. +func (g Genesis) String() string { + return g.Time().String() +} + +// Set implements pflag.Value.Set. +func (g *Genesis) Set(value string) error { + t, err := time.Parse(time.RFC3339, value) + if err != nil { + return err + } + *g = Genesis(t) + return nil +} + +// Type implements pflag.Value.Type. +func (Genesis) Type() string { + return "Genesis" +} + // GenesisConfig contains immutable parameters for the protocol. type GenesisConfig struct { - GenesisTime string `mapstructure:"genesis-time"` + GenesisTime Genesis `mapstructure:"genesis-time"` ExtraData string `mapstructure:"genesis-extra-data"` Accounts map[string]uint64 `mapstructure:"accounts"` } @@ -32,11 +81,7 @@ func (g *GenesisConfig) GenesisID() types.Hash20 { func (g *GenesisConfig) GoldenATX() types.Hash32 { hh := hash.GetHasher() defer hash.PutHasher(hh) - parsed, err := time.Parse(time.RFC3339, g.GenesisTime) - if err != nil { - panic("code should have run Validate before this method") - } - hh.Write([]byte(strconv.FormatInt(parsed.Unix(), 10))) + hh.Write([]byte(strconv.FormatInt(g.GenesisTime.Time().Unix(), 10))) hh.Write([]byte(g.ExtraData)) return types.BytesToHash(hh.Sum(nil)) } @@ -49,11 +94,6 @@ func (g *GenesisConfig) Validate() error { if len(g.ExtraData) > 255 { return fmt.Errorf("extra-data is longer than 255 symbols: %s", g.ExtraData) } - _, err := time.Parse(time.RFC3339, g.GenesisTime) - if err != nil { - return fmt.Errorf("can't parse genesis time %s using time.RFC3339(%s) %w", - g.GenesisTime, time.RFC3339, err) - } return nil } @@ -111,7 +151,7 @@ func DefaultGenesisConfig() GenesisConfig { // NOTE(dshulyak) keys in default config are used in some tests return GenesisConfig{ ExtraData: "mainnet", - GenesisTime: time.Now().Format(time.RFC3339), + GenesisTime: Genesis(time.Now()), Accounts: generateGenesisAccounts(), } } @@ -121,7 +161,7 @@ func DefaultTestGenesisConfig() GenesisConfig { // NOTE(dshulyak) keys in default config are used in some tests return GenesisConfig{ ExtraData: "testnet", - GenesisTime: time.Now().Format(time.RFC3339), + GenesisTime: Genesis(time.Now()), Accounts: generateGenesisAccounts(), } } diff --git a/config/genesis_test.go b/config/genesis_test.go index af32e1c710..f8a22841ef 100644 --- a/config/genesis_test.go +++ b/config/genesis_test.go @@ -1,6 +1,7 @@ package config import ( + "encoding/json" "testing" "time" @@ -9,26 +10,65 @@ import ( "github.com/spacemeshos/go-spacemesh/hash" ) +func TestGenesisJsonEncode(t *testing.T) { + tString := "2023-03-15T18:00:00Z" + t1, err := time.Parse(time.RFC3339, tString) + require.NoError(t, err) + + t.Run("marshal", func(t *testing.T) { + g := Genesis(t1) + b, err := g.MarshalJSON() + require.NoError(t, err) + expected, err := json.Marshal(tString) + require.NoError(t, err) + require.Equal(t, expected, b) + }) + t.Run("unmarshal", func(t *testing.T) { + reference, err := json.Marshal(tString) + require.NoError(t, err) + var g Genesis + err = g.UnmarshalJSON([]byte(reference)) + require.NoError(t, err) + require.Equal(t, t1, g.Time()) + }) +} + +func TestGenesisTextUnmarshal(t *testing.T) { + tString := "2023-03-15T18:00:00Z" + t1, err := time.Parse(time.RFC3339, tString) + require.NoError(t, err) + + var g Genesis + err = g.UnmarshalText([]byte(tString)) + require.NoError(t, err) + require.Equal(t, t1, g.Time()) +} + func TestGenesisID(t *testing.T) { + t1, err := time.Parse(time.RFC3339, "2023-03-15T18:00:00Z") + require.NoError(t, err) + t2, err := time.Parse(time.RFC3339, "1989-03-15T18:00:00Z") + require.NoError(t, err) + t.Run("changes based on cfg vars", func(t *testing.T) { - cfg1 := GenesisConfig{ExtraData: "one", GenesisTime: "2023-03-15T18:00:00Z"} - cfg2 := GenesisConfig{ExtraData: "one", GenesisTime: "1989-03-15T18:00:00Z"} + cfg1 := GenesisConfig{ExtraData: "one", GenesisTime: Genesis(t1)} + cfg2 := GenesisConfig{ExtraData: "one", GenesisTime: Genesis(t2)} require.NoError(t, cfg1.Validate()) require.NotEqual(t, cfg1.GenesisID(), cfg2.GenesisID()) }) t.Run("require non-empty", func(t *testing.T) { - cfg := GenesisConfig{GenesisTime: "2023-03-15T18:00:00Z"} + cfg := GenesisConfig{GenesisTime: Genesis(t1)} require.ErrorContains(t, cfg.Validate(), "wait until genesis-extra-data is available") }) t.Run("consistent", func(t *testing.T) { - cfg := GenesisConfig{ExtraData: "one", GenesisTime: "2023-03-15T18:00:00Z"} + cfg := GenesisConfig{ExtraData: "one", GenesisTime: Genesis(t1)} require.NoError(t, cfg.Validate()) genID := cfg.GenesisID() require.Equal(t, genID, cfg.GenesisID()) }) t.Run("stable", func(t *testing.T) { - unixtime := time.Unix(10101, 0) - cfg := GenesisConfig{ExtraData: "one", GenesisTime: unixtime.Format(time.RFC3339)} + unixTime := time.Unix(10101, 0) + cfg := GenesisConfig{ExtraData: "one", GenesisTime: Genesis(unixTime)} require.NoError(t, cfg.Validate()) expected := hash.Sum([]byte("10101"), []byte("one")) diff --git a/config/mainnet.go b/config/mainnet.go index 0add25c46b..65312d2877 100644 --- a/config/mainnet.go +++ b/config/mainnet.go @@ -32,6 +32,11 @@ import ( ) func MainnetConfig() Config { + genesisTime, err := time.Parse(time.RFC3339, "2023-07-14T08:00:00Z") + if err != nil { + panic(err) + } + var postPowDifficulty activation.PowDifficulty difficulty := []byte("000dfb23b0979b4b000000000000000000000000000000000000000000000000") if err := postPowDifficulty.UnmarshalText(difficulty); err != nil { @@ -121,7 +126,7 @@ func MainnetConfig() Config { PprofHTTPServerListener: "localhost:6060", }, Genesis: GenesisConfig{ - GenesisTime: "2023-07-14T08:00:00Z", + GenesisTime: Genesis(genesisTime), ExtraData: "00000000000000000001a6bc150307b5c1998045752b3c87eccf3c013036f3cc", Accounts: MainnetAccounts(), }, diff --git a/config/presets/testnet.go b/config/presets/testnet.go index f828bc12cc..bb066c352e 100644 --- a/config/presets/testnet.go +++ b/config/presets/testnet.go @@ -37,6 +37,11 @@ func init() { } func testnet() config.Config { + genesisTime, err := time.Parse(time.RFC3339, "2023-09-13T18:00:00Z") + if err != nil { + panic(err) + } + p2pconfig := p2p.DefaultConfig() smeshing := config.DefaultSmeshingConfig() @@ -87,7 +92,7 @@ func testnet() config.Config { PprofHTTPServerListener: "localhost:6060", }, Genesis: config.GenesisConfig{ - GenesisTime: "2023-09-13T18:00:00Z", + GenesisTime: config.Genesis(genesisTime), ExtraData: "0000000000000000000000c76c58ebac180989673fd6d237b40e66ed5c976ec3", }, Tortoise: tortoise.Config{ diff --git a/miner/active_set_generator.go b/miner/active_set_generator.go index 8dafec3d15..a708868daa 100644 --- a/miner/active_set_generator.go +++ b/miner/active_set_generator.go @@ -13,7 +13,6 @@ import ( "go.uber.org/zap" "golang.org/x/exp/maps" - "github.com/spacemeshos/go-spacemesh/atxsdata" "github.com/spacemeshos/go-spacemesh/common/types" "github.com/spacemeshos/go-spacemesh/sql" "github.com/spacemeshos/go-spacemesh/sql/activesets" @@ -36,7 +35,7 @@ func newActiveSetGenerator( cfg config, log *zap.Logger, db, localdb sql.Executor, - atxsdata *atxsdata.Data, + atxsdata atxsData, clock layerClock, opts ...activesetGenOpt, ) *activeSetGenerator { @@ -61,7 +60,7 @@ type activeSetGenerator struct { log *zap.Logger db, localdb sql.Executor - atxsdata *atxsdata.Data + atxsdata atxsData clock layerClock wallclock clockwork.Clock @@ -254,7 +253,7 @@ func activeSetFromGrades( }, nil } -func getSetWeight(atxsdata *atxsdata.Data, target types.EpochID, set []types.ATXID) (uint64, error) { +func getSetWeight(atxsdata atxsData, target types.EpochID, set []types.ATXID) (uint64, error) { var setWeight uint64 for _, id := range set { atx := atxsdata.Get(target, id) diff --git a/miner/active_set_generator_test.go b/miner/active_set_generator_test.go index fe489c7ffc..a5dfba2b1a 100644 --- a/miner/active_set_generator_test.go +++ b/miner/active_set_generator_test.go @@ -168,7 +168,7 @@ func TestActiveSetGenerate(t *testing.T) { gatx(types.ATXID{3}, 2, types.NodeID{3}, 2, genAtxWithReceived(time.Unix(20, 0))), }, malfeasent: []identity{ - gidentity(types.NodeID{3}, time.Unix(29, 0)), + testIdentity(types.NodeID{3}, time.Unix(29, 0)), }, epochStart: unixPtr(30, 0), networkDelay: 2 * time.Second, @@ -184,7 +184,7 @@ func TestActiveSetGenerate(t *testing.T) { gatx(types.ATXID{3}, 2, types.NodeID{3}, 2, genAtxWithReceived(time.Unix(20, 0))), }, malfeasent: []identity{ - gidentity(types.NodeID{3}, time.Unix(31, 0)), + testIdentity(types.NodeID{3}, time.Unix(31, 0)), }, epochStart: unixPtr(30, 0), networkDelay: 2 * time.Second, diff --git a/miner/mocks/mocks.go b/miner/mocks/mocks.go index ccb902af5f..55415cd438 100644 --- a/miner/mocks/mocks.go +++ b/miner/mocks/mocks.go @@ -14,6 +14,7 @@ import ( reflect "reflect" time "time" + atxsdata "github.com/spacemeshos/go-spacemesh/atxsdata" types "github.com/spacemeshos/go-spacemesh/common/types" tortoise "github.com/spacemeshos/go-spacemesh/tortoise" gomock "go.uber.org/mock/gomock" @@ -361,65 +362,103 @@ func (c *MocklayerClockLayerToTimeCall) DoAndReturn(f func(types.LayerID) time.T return c } -// MockatxSearch is a mock of atxSearch interface. -type MockatxSearch struct { +// MockatxsData is a mock of atxsData interface. +type MockatxsData struct { ctrl *gomock.Controller - recorder *MockatxSearchMockRecorder + recorder *MockatxsDataMockRecorder isgomock struct{} } -// MockatxSearchMockRecorder is the mock recorder for MockatxSearch. -type MockatxSearchMockRecorder struct { - mock *MockatxSearch +// MockatxsDataMockRecorder is the mock recorder for MockatxsData. +type MockatxsDataMockRecorder struct { + mock *MockatxsData } -// NewMockatxSearch creates a new mock instance. -func NewMockatxSearch(ctrl *gomock.Controller) *MockatxSearch { - mock := &MockatxSearch{ctrl: ctrl} - mock.recorder = &MockatxSearchMockRecorder{mock} +// NewMockatxsData creates a new mock instance. +func NewMockatxsData(ctrl *gomock.Controller) *MockatxsData { + mock := &MockatxsData{ctrl: ctrl} + mock.recorder = &MockatxsDataMockRecorder{mock} return mock } // EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockatxSearch) EXPECT() *MockatxSearchMockRecorder { +func (m *MockatxsData) EXPECT() *MockatxsDataMockRecorder { return m.recorder } -// GetIDByEpochAndNodeID mocks base method. -func (m *MockatxSearch) GetIDByEpochAndNodeID(ctx context.Context, epoch types.EpochID, nodeID types.NodeID) (types.ATXID, error) { +// Get mocks base method. +func (m *MockatxsData) Get(arg0 types.EpochID, arg1 types.ATXID) *atxsdata.ATX { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "GetIDByEpochAndNodeID", ctx, epoch, nodeID) + ret := m.ctrl.Call(m, "Get", arg0, arg1) + ret0, _ := ret[0].(*atxsdata.ATX) + return ret0 +} + +// Get indicates an expected call of Get. +func (mr *MockatxsDataMockRecorder) Get(arg0, arg1 any) *MockatxsDataGetCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockatxsData)(nil).Get), arg0, arg1) + return &MockatxsDataGetCall{Call: call} +} + +// MockatxsDataGetCall wrap *gomock.Call +type MockatxsDataGetCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockatxsDataGetCall) Return(arg0 *atxsdata.ATX) *MockatxsDataGetCall { + c.Call = c.Call.Return(arg0) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockatxsDataGetCall) Do(f func(types.EpochID, types.ATXID) *atxsdata.ATX) *MockatxsDataGetCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockatxsDataGetCall) DoAndReturn(f func(types.EpochID, types.ATXID) *atxsdata.ATX) *MockatxsDataGetCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + +// GetByEpochAndNodeID mocks base method. +func (m *MockatxsData) GetByEpochAndNodeID(arg0 types.EpochID, arg1 types.NodeID) (types.ATXID, *atxsdata.ATX) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetByEpochAndNodeID", arg0, arg1) ret0, _ := ret[0].(types.ATXID) - ret1, _ := ret[1].(error) + ret1, _ := ret[1].(*atxsdata.ATX) return ret0, ret1 } -// GetIDByEpochAndNodeID indicates an expected call of GetIDByEpochAndNodeID. -func (mr *MockatxSearchMockRecorder) GetIDByEpochAndNodeID(ctx, epoch, nodeID any) *MockatxSearchGetIDByEpochAndNodeIDCall { +// GetByEpochAndNodeID indicates an expected call of GetByEpochAndNodeID. +func (mr *MockatxsDataMockRecorder) GetByEpochAndNodeID(arg0, arg1 any) *MockatxsDataGetByEpochAndNodeIDCall { mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetIDByEpochAndNodeID", reflect.TypeOf((*MockatxSearch)(nil).GetIDByEpochAndNodeID), ctx, epoch, nodeID) - return &MockatxSearchGetIDByEpochAndNodeIDCall{Call: call} + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetByEpochAndNodeID", reflect.TypeOf((*MockatxsData)(nil).GetByEpochAndNodeID), arg0, arg1) + return &MockatxsDataGetByEpochAndNodeIDCall{Call: call} } -// MockatxSearchGetIDByEpochAndNodeIDCall wrap *gomock.Call -type MockatxSearchGetIDByEpochAndNodeIDCall struct { +// MockatxsDataGetByEpochAndNodeIDCall wrap *gomock.Call +type MockatxsDataGetByEpochAndNodeIDCall struct { *gomock.Call } // Return rewrite *gomock.Call.Return -func (c *MockatxSearchGetIDByEpochAndNodeIDCall) Return(arg0 types.ATXID, arg1 error) *MockatxSearchGetIDByEpochAndNodeIDCall { +func (c *MockatxsDataGetByEpochAndNodeIDCall) Return(arg0 types.ATXID, arg1 *atxsdata.ATX) *MockatxsDataGetByEpochAndNodeIDCall { c.Call = c.Call.Return(arg0, arg1) return c } // Do rewrite *gomock.Call.Do -func (c *MockatxSearchGetIDByEpochAndNodeIDCall) Do(f func(context.Context, types.EpochID, types.NodeID) (types.ATXID, error)) *MockatxSearchGetIDByEpochAndNodeIDCall { +func (c *MockatxsDataGetByEpochAndNodeIDCall) Do(f func(types.EpochID, types.NodeID) (types.ATXID, *atxsdata.ATX)) *MockatxsDataGetByEpochAndNodeIDCall { c.Call = c.Call.Do(f) return c } // DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockatxSearchGetIDByEpochAndNodeIDCall) DoAndReturn(f func(context.Context, types.EpochID, types.NodeID) (types.ATXID, error)) *MockatxSearchGetIDByEpochAndNodeIDCall { +func (c *MockatxsDataGetByEpochAndNodeIDCall) DoAndReturn(f func(types.EpochID, types.NodeID) (types.ATXID, *atxsdata.ATX)) *MockatxsDataGetByEpochAndNodeIDCall { c.Call = c.Call.DoAndReturn(f) return c } diff --git a/miner/proposal_builder.go b/miner/proposal_builder.go index 8caf7355e4..d0c7873f8f 100644 --- a/miner/proposal_builder.go +++ b/miner/proposal_builder.go @@ -27,7 +27,6 @@ import ( "github.com/spacemeshos/go-spacemesh/signing" "github.com/spacemeshos/go-spacemesh/sql" "github.com/spacemeshos/go-spacemesh/sql/activesets" - "github.com/spacemeshos/go-spacemesh/sql/atxs" "github.com/spacemeshos/go-spacemesh/sql/ballots" "github.com/spacemeshos/go-spacemesh/sql/beacons" "github.com/spacemeshos/go-spacemesh/sql/certificates" @@ -56,20 +55,9 @@ type layerClock interface { LayerToTime(types.LayerID) time.Time } -type atxSearch interface { - GetIDByEpochAndNodeID(ctx context.Context, epoch types.EpochID, nodeID types.NodeID) (types.ATXID, error) -} - -type defaultAtxSearch struct { - db sql.Executor -} - -func (p defaultAtxSearch) GetIDByEpochAndNodeID( - _ context.Context, - epoch types.EpochID, - nodeID types.NodeID, -) (types.ATXID, error) { - return atxs.GetIDByEpochAndNodeID(p.db, epoch, nodeID) +type atxsData interface { + Get(types.EpochID, types.ATXID) *atxsdata.ATX + GetByEpochAndNodeID(types.EpochID, types.NodeID) (types.ATXID, *atxsdata.ATX) } // ProposalBuilder builds Proposals for a miner. @@ -79,20 +67,21 @@ type ProposalBuilder struct { db sql.Executor localdb sql.Executor - atxsdata *atxsdata.Data + atxsdata atxsData clock layerClock publisher pubsub.Publisher conState conservativeState tortoise votesEncoder syncer system.SyncStateProvider activeGen *activeSetGenerator - atxs atxSearch signers struct { mu sync.Mutex signers map[types.NodeID]*signerSession } shared sharedSession + + limiter *semaphore.Weighted } type signerSession struct { @@ -278,18 +267,12 @@ func WithActivesetPreparation(prep ActiveSetPreparation) Opt { } } -func withAtxSearch(p atxSearch) Opt { - return func(pb *ProposalBuilder) { - pb.atxs = p - } -} - // New creates a struct of block builder type. func New( clock layerClock, db sql.Executor, localdb sql.Executor, - atxsdata *atxsdata.Data, + atxsdata atxsData, publisher pubsub.Publisher, trtl votesEncoder, syncer system.SyncStateProvider, @@ -310,7 +293,6 @@ func New( tortoise: trtl, syncer: syncer, conState: conState, - atxs: defaultAtxSearch{db}, signers: struct { mu sync.Mutex signers map[types.NodeID]*signerSession @@ -321,6 +303,7 @@ func New( for _, opt := range opts { opt(pb) } + pb.limiter = semaphore.NewWeighted(int64(pb.cfg.workersLimit)) pb.activeGen = newActiveSetGenerator(pb.cfg, pb.logger, pb.db, pb.localdb, pb.atxsdata, pb.clock) return pb } @@ -496,23 +479,16 @@ func (pb *ProposalBuilder) initSharedData(current types.LayerID) error { return nil } -func (pb *ProposalBuilder) initSignerData(ctx context.Context, ss *signerSession, lid types.LayerID) error { +func (pb *ProposalBuilder) initSignerData(ss *signerSession, lid types.LayerID) error { if ss.session.epoch != lid.GetEpoch() { ss.session = session{epoch: lid.GetEpoch()} } if ss.session.atx == types.EmptyATXID { - atxid, err := pb.atxs.GetIDByEpochAndNodeID(ctx, ss.session.epoch-1, ss.signer.NodeID()) - switch { - case errors.Is(err, sql.ErrNotFound): + id, atx := pb.atxsdata.GetByEpochAndNodeID(ss.session.epoch, ss.signer.NodeID()) + if id == types.EmptyATXID { return errAtxNotAvailable - case err != nil: - return fmt.Errorf("get atx in epoch %v: %w", ss.session.epoch-1, err) - } - atx := pb.atxsdata.Get(ss.session.epoch, atxid) - if atx == nil { - return fmt.Errorf("missing atx in atxsdata %v", atxid) } - ss.session.atx = atxid + ss.session.atx = id ss.session.atxWeight = atx.Weight ss.session.nonce = atx.Nonce } @@ -622,21 +598,20 @@ func (pb *ProposalBuilder) build(ctx context.Context, lid types.LayerID) error { eligible := make(chan *signerSession, len(signers)) // Stage 1 - // Use a semaphore instead of eg.SetLimit so that the stage 2 starts immediately after + // We use a semaphore instead of eg.SetLimit so that the stage 2 starts immediately after // scheduling all signers in the stage 1. Otherwise, stage 2 would wait for all stage 1 // goroutines to at least start, which is not what we want. We want to start stage 2 as soon as possible. - limiter := semaphore.NewWeighted(int64(pb.cfg.workersLimit)) var eg errgroup.Group for _, ss := range signers { eg.Go(func() error { - if err := limiter.Acquire(ctx, 1); err != nil { + if err := pb.limiter.Acquire(ctx, 1); err != nil { return err } - defer limiter.Release(1) + defer pb.limiter.Release(1) start := time.Now() ss.latency.start = buildStartTime - if err := pb.initSignerData(ctx, ss, lid); err != nil { + if err := pb.initSignerData(ss, lid); err != nil { if errors.Is(err, errAtxNotAvailable) { ss.log.Debug("smesher doesn't have atx that targets this epoch", log.ZContext(ctx), @@ -671,76 +646,75 @@ func (pb *ProposalBuilder) build(ctx context.Context, lid types.LayerID) error { }) } - var stage1Err error - go func() { - stage1Err = eg.Wait() - close(eligible) - }() - // Stage 2 - eg2 := errgroup.Group{} - for ss := range eligible { - start := time.Now() - opinion, err := encodeVotesOnce() - if err != nil { - return err - } - ss.latency.tortoise = time.Since(start) - - start = time.Now() - meshHash := calcMeshHashOnce() - ss.latency.hash = time.Since(start) - - eg2.Go(func() error { - // needs to be saved before publishing, as we will query it in handler - if ss.session.ref == types.EmptyBallotID { - start := time.Now() - if err := persistActiveSetOnce(); err != nil { - return err - } - ss.latency.activeSet = time.Since(start) + var eg2 errgroup.Group + eg2.Go(func() error { + for ss := range eligible { + start := time.Now() + opinion, err := encodeVotesOnce() + if err != nil { + return err } - proofs := ss.session.eligibilities.proofs[lid] + ss.latency.tortoise = time.Since(start) start = time.Now() - txs := pb.conState.SelectProposalTXs(lid, len(proofs)) - ss.latency.txs = time.Since(start) - - start = time.Now() - proposal := createProposal( - &ss.session, - pb.shared.beacon, - pb.shared.active.set, - ss.signer, - lid, - txs, - opinion, - proofs, - meshHash, - ) - if err := pb.publisher.Publish(ctx, pubsub.ProposalProtocol, codec.MustEncode(proposal)); err != nil { - ss.log.Error("failed to publish proposal", - log.ZContext(ctx), - zap.Uint32("lid", proposal.Layer.Uint32()), - zap.Stringer("id", proposal.ID()), - zap.Error(err), - ) - } else { - ss.latency.publish = time.Since(start) - ss.latency.end = time.Now() - ss.log.Info("proposal created", - log.ZContext(ctx), - zap.Inline(proposal), - zap.Object("latency", &ss.latency), + meshHash := calcMeshHashOnce() + ss.latency.hash = time.Since(start) + + eg2.Go(func() error { + // needs to be saved before publishing, as we will query it in handler + if ss.session.ref == types.EmptyBallotID { + start := time.Now() + if err := persistActiveSetOnce(); err != nil { + return err + } + ss.latency.activeSet = time.Since(start) + } + proofs := ss.session.eligibilities.proofs[lid] + + start = time.Now() + txs := pb.conState.SelectProposalTXs(lid, len(proofs)) + ss.latency.txs = time.Since(start) + + start = time.Now() + proposal := createProposal( + &ss.session, + pb.shared.beacon, + pb.shared.active.set, + ss.signer, + lid, + txs, + opinion, + proofs, + meshHash, ) - proposalBuild.Observe(ss.latency.total().Seconds()) - events.EmitProposal(ss.signer.NodeID(), lid, proposal.ID()) - events.ReportProposal(events.ProposalCreated, proposal) - } - return nil - }) - } + if err := pb.publisher.Publish(ctx, pubsub.ProposalProtocol, codec.MustEncode(proposal)); err != nil { + ss.log.Error("failed to publish proposal", + log.ZContext(ctx), + zap.Uint32("lid", proposal.Layer.Uint32()), + zap.Stringer("id", proposal.ID()), + zap.Error(err), + ) + } else { + ss.latency.publish = time.Since(start) + ss.latency.end = time.Now() + ss.log.Info("proposal created", + log.ZContext(ctx), + zap.Inline(proposal), + zap.Object("latency", &ss.latency), + ) + proposalBuild.Observe(ss.latency.total().Seconds()) + events.EmitProposal(ss.signer.NodeID(), lid, proposal.ID()) + events.ReportProposal(events.ProposalCreated, proposal) + } + return nil + }) + } + return nil + }) + stage1Err := eg.Wait() + close(eligible) return errors.Join(stage1Err, eg2.Wait()) } diff --git a/miner/proposal_builder_test.go b/miner/proposal_builder_test.go index 02657cab5a..7a3e6110e7 100644 --- a/miner/proposal_builder_test.go +++ b/miner/proposal_builder_test.go @@ -3,9 +3,12 @@ package miner import ( "bytes" "context" + "encoding/hex" "errors" + "fmt" "math/rand" "os" + "runtime" "sort" "testing" "time" @@ -194,7 +197,7 @@ func expectProposal( return p } -func gidentity(id types.NodeID, received time.Time) identity { +func testIdentity(id types.NodeID, received time.Time) identity { return identity{ id: id, // kind of proof is irrelevant for this test, we want to avoid validation failing @@ -265,33 +268,39 @@ func TestBuild_BlockedSignerInitDoesntBlockEligible(t *testing.T) { syncer = smocks.NewMockSyncStateProvider(ctrl) db = statesql.InMemoryTest(t) localdb = localsql.InMemoryTest(t) - atxsdata = atxsdata.New() // singer[1] is blocked - atxSearch = mocks.NewMockatxSearch(ctrl) + dataMock = mocks.NewMockatxsData(ctrl) ) opts := []Opt{ WithLayerPerEpoch(types.GetLayersPerEpoch()), WithLayerSize(2), WithLogger(zaptest.NewLogger(t)), WithSigners(signers...), - withAtxSearch(atxSearch), } - builder := New(clock, db, localdb, atxsdata, publisher, trtl, syncer, conState, opts...) + builder := New(clock, db, localdb, dataMock, publisher, trtl, syncer, conState, opts...) lid := types.LayerID(15) // only signer[0] has ATX atx := gatx(types.ATXID{1}, lid.GetEpoch()-1, signers[0].NodeID(), 1, genAtxWithNonce(777)) require.NoError(t, atxs.Add(db, atx, types.AtxBlob{})) - atxsdata.AddFromAtx(atx, false) - atxSearch.EXPECT().GetIDByEpochAndNodeID(gomock.Any(), lid.GetEpoch()-1, signers[0].NodeID()).DoAndReturn( - func(_ context.Context, epoch types.EpochID, nodeID types.NodeID) (types.ATXID, error) { - return atxs.GetIDByEpochAndNodeID(db, epoch, nodeID) + atxdata := &atxsdata.ATX{ + Node: atx.SmesherID, + Coinbase: atx.Coinbase, + Weight: atx.Weight, + BaseHeight: atx.BaseTickHeight, + Height: atx.TickHeight(), + Nonce: atx.VRFNonce, + } + dataMock.EXPECT().GetByEpochAndNodeID(lid.GetEpoch(), signers[0].NodeID()).DoAndReturn( + func(epoch types.EpochID, nodeID types.NodeID) (types.ATXID, *atxsdata.ATX) { + return atx.ID(), atxdata }, ) - atxSearch.EXPECT().GetIDByEpochAndNodeID(gomock.Any(), lid.GetEpoch()-1, signers[1].NodeID()).DoAndReturn( - func(ctx context.Context, epoch types.EpochID, nodeID types.NodeID) (types.ATXID, error) { - <-ctx.Done() - return types.EmptyATXID, ctx.Err() + stop := make(chan struct{}) + dataMock.EXPECT().GetByEpochAndNodeID(lid.GetEpoch(), signers[1].NodeID()).DoAndReturn( + func(epoch types.EpochID, nodeID types.NodeID) (types.ATXID, *atxsdata.ATX) { + <-stop + return types.EmptyATXID, nil }, ) @@ -311,11 +320,10 @@ func TestBuild_BlockedSignerInitDoesntBlockEligible(t *testing.T) { trtl.EXPECT().TallyVotes(lid) trtl.EXPECT().EncodeVotes(gomock.Any(), gomock.Any()).Return(&opinion, nil) trtl.EXPECT().LatestComplete().Return(lid - 1) - ctx, cancel := context.WithCancel(context.Background()) publisher.EXPECT(). - Publish(ctx, pubsub.ProposalProtocol, gomock.Any()). + Publish(gomock.Any(), pubsub.ProposalProtocol, gomock.Any()). DoAndReturn(func(_ context.Context, _ string, msg []byte) error { - defer cancel() // unblock the build hang on atx lookup for signer[1] + defer close(stop) // unblock the build hang on atx lookup for signer[1] var proposal types.Proposal codec.MustDecode(msg, &proposal) proposal.MustInitialize() @@ -324,15 +332,15 @@ func TestBuild_BlockedSignerInitDoesntBlockEligible(t *testing.T) { return nil }) - require.ErrorIs(t, builder.build(ctx, lid), context.Canceled) + require.NoError(t, builder.build(context.Background(), lid)) // Try again in the next layer // signer[1] is still NOT initialized (missing ATX) but it won't block this time lid += 1 txs = []types.TransactionID{{17}, {22}} - atxSearch.EXPECT().GetIDByEpochAndNodeID(gomock.Any(), lid.GetEpoch()-1, signers[1].NodeID()).DoAndReturn( - func(_ context.Context, epoch types.EpochID, nodeID types.NodeID) (types.ATXID, error) { - return atxs.GetIDByEpochAndNodeID(db, epoch, nodeID) + dataMock.EXPECT().GetByEpochAndNodeID(lid.GetEpoch(), signers[1].NodeID()).DoAndReturn( + func(epoch types.EpochID, nodeID types.NodeID) (types.ATXID, *atxsdata.ATX) { + return types.EmptyATXID, nil }, ) expectedProposal = expectProposal( @@ -1204,3 +1212,79 @@ func TestGradeAtx(t *testing.T) { }) } } + +func BenchmarkCache(b *testing.B) { + runtime.GC() + cache := make(map[types.ATXID]*atxsdata.ATX, 10_000_000) + + for range 10_000_000 { + nodeID := types.RandomNodeID() + atxID := types.RandomATXID() + cache[atxID] = &atxsdata.ATX{ + Node: nodeID, + } + } + + nodeID := types.RandomNodeID() // random node id, that isn't in the cache (slowest possible case) + var found types.ATXID + b.ResetTimer() + for i := 0; i < b.N; i++ { + for id, data := range cache { + if data.Node == nodeID { + found = id + } + } + } + + var m runtime.MemStats + runtime.ReadMemStats(&m) + fmt.Println("heap:", m.HeapInuse) + fmt.Println("stack:", m.StackInuse) + + require.Equal(b, types.EmptyATXID, found) +} + +func BenchmarkDoubleCache(b *testing.B) { + runtime.GC() + cache := make(map[types.ATXID]*atxsdata.ATX, 10_000_000) + cache2 := make(map[types.NodeID]types.ATXID, 10_000_000) + + for range 10_000_000 { + nodeID := types.RandomNodeID() + atxID := types.RandomATXID() + cache[atxID] = &atxsdata.ATX{ + Node: nodeID, + } + cache2[nodeID] = atxID + } + + nodeID := types.RandomNodeID() // random node id, that isn't in the cache (slowest possible case) + var found types.ATXID + b.ResetTimer() + for i := 0; i < b.N; i++ { + found = cache2[nodeID] + } + + var m runtime.MemStats + runtime.ReadMemStats(&m) + fmt.Println("heap:", m.HeapInuse) + fmt.Println("stack:", m.StackInuse) + + require.Equal(b, types.EmptyATXID, found) +} + +func BenchmarkDB(b *testing.B) { + db, err := statesql.Open("file:state.sql") + require.NoError(b, err) + defer db.Close() + + bytes, err := hex.DecodeString("00003ce28800fadd692c522f7b1db219f675b49108aec7f818e2c4fd935573f6") + require.NoError(b, err) + nodeID := types.BytesToNodeID(bytes) + var found types.ATXID + b.ResetTimer() + for i := 0; i < b.N; i++ { + found, _ = atxs.GetByEpochAndNodeID(db, 30, nodeID) + } + require.NotEqual(b, types.EmptyATXID, found) +} diff --git a/node/node.go b/node/node.go index ca15e527b3..73eff7afc6 100644 --- a/node/node.go +++ b/node/node.go @@ -1314,9 +1314,7 @@ func (app *App) launchStandalone(ctx context.Context) error { cfg.RawRESTListener = parsed.Host cfg.RawRPCListener = parsed.Hostname() + ":0" - if err := cfg.Genesis.UnmarshalFlag(app.Config.Genesis.GenesisTime); err != nil { - return err - } + cfg.Genesis = server.Genesis(app.Config.Genesis.GenesisTime) cfg.Round.EpochDuration = app.Config.LayerDuration * time.Duration(app.Config.LayersPerEpoch) cfg.Round.CycleGap = app.Config.POET.CycleGap cfg.Round.PhaseShift = app.Config.POET.PhaseShift @@ -2020,6 +2018,7 @@ func (app *App) setupDBs(ctx context.Context, lg log.Log) error { app.db, app.Config.Tortoise.WindowSizeEpochs(applied), warmupLog, + app.signers..., ) if err != nil { return err @@ -2153,15 +2152,10 @@ func (app *App) startSynchronous(ctx context.Context) (err error) { } /* Initialize all protocol services */ - - gTime, err := time.Parse(time.RFC3339, app.Config.Genesis.GenesisTime) - if err != nil { - return fmt.Errorf("cannot parse genesis time %s: %w", app.Config.Genesis.GenesisTime, err) - } app.clock, err = timesync.NewClock( timesync.WithLayerDuration(app.Config.LayerDuration), timesync.WithTickInterval(1*time.Second), - timesync.WithGenesisTime(gTime), + timesync.WithGenesisTime(app.Config.Genesis.GenesisTime.Time()), timesync.WithLogger(app.addLogger(ClockLogger, logger).Zap()), ) if err != nil { diff --git a/node/node_test.go b/node/node_test.go index d8482ef133..1b27115794 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -217,13 +217,10 @@ func TestSpacemeshApp_GrpcService(t *testing.T) { err := app.NewIdentity() require.NoError(t, err) - gTime, err := time.Parse(time.RFC3339, app.Config.Genesis.GenesisTime) - require.NoError(t, err) - app.clock, err = timesync.NewClock( timesync.WithLayerDuration(cfg.LayerDuration), timesync.WithTickInterval(1*time.Second), - timesync.WithGenesisTime(gTime), + timesync.WithGenesisTime(app.Config.Genesis.GenesisTime.Time()), timesync.WithLogger(zaptest.NewLogger(t))) require.NoError(t, err) @@ -265,13 +262,10 @@ func TestSpacemeshApp_JsonServiceNotRunning(t *testing.T) { err := app.NewIdentity() require.NoError(t, err) - gTime, err := time.Parse(time.RFC3339, app.Config.Genesis.GenesisTime) - require.NoError(t, err) - app.clock, err = timesync.NewClock( timesync.WithLayerDuration(cfg.LayerDuration), timesync.WithTickInterval(1*time.Second), - timesync.WithGenesisTime(gTime), + timesync.WithGenesisTime(app.Config.Genesis.GenesisTime.Time()), timesync.WithLogger(zaptest.NewLogger(t))) require.NoError(t, err) @@ -300,13 +294,11 @@ func TestSpacemeshApp_JsonService(t *testing.T) { cfg.API.PrivateServices = nil app := New(WithConfig(cfg), WithLog(logtest.New(t))) - gTime, err := time.Parse(time.RFC3339, app.Config.Genesis.GenesisTime) - require.NoError(t, err) - + var err error app.clock, err = timesync.NewClock( timesync.WithLayerDuration(cfg.LayerDuration), timesync.WithTickInterval(1*time.Second), - timesync.WithGenesisTime(gTime), + timesync.WithGenesisTime(app.Config.Genesis.GenesisTime.Time()), timesync.WithLogger(zaptest.NewLogger(t))) require.NoError(t, err) @@ -505,7 +497,7 @@ func TestSpacemeshApp_TransactionService(t *testing.T) { app.Config.HARE3.RoundDuration = 100 * time.Millisecond app.Config.Genesis = config.GenesisConfig{ - GenesisTime: time.Now().Add(20 * time.Second).Format(time.RFC3339), + GenesisTime: config.Genesis(time.Now().Add(20 * time.Second)), Accounts: map[string]uint64{ address.String(): 100_000_000, }, @@ -883,10 +875,7 @@ func TestGenesisConfig(t *testing.T) { t.Cleanup(func() { app.Cleanup(context.Background()) }) var existing config.GenesisConfig - require.NoError( - t, - existing.LoadFromFile(filepath.Join(app.Config.DataDir(), genesisFileName)), - ) + require.NoError(t, existing.LoadFromFile(filepath.Join(app.Config.DataDir(), genesisFileName))) require.Empty(t, existing.Diff(&app.Config.Genesis)) }) @@ -914,14 +903,6 @@ func TestGenesisConfig(t *testing.T) { require.ErrorContains(t, err, "genesis config") }) - t.Run("not valid time", func(t *testing.T) { - cfg := getTestDefaultConfig(t) - cfg.Genesis.GenesisTime = time.Now().Format(time.RFC1123) - app := New(WithConfig(cfg)) - - require.ErrorContains(t, app.Initialize(), "time.RFC3339") - }) - t.Run("long extra data", func(t *testing.T) { cfg := getTestDefaultConfig(t) cfg.Genesis.ExtraData = string(make([]byte, 256)) @@ -975,7 +956,7 @@ func TestAdminEvents(t *testing.T) { cfg.SMESHING.Start = true cfg.POSTService.PostServiceCmd = activation.DefaultTestPostServiceConfig().PostServiceCmd - cfg.Genesis.GenesisTime = time.Now().Add(5 * time.Second).Format(time.RFC3339) + cfg.Genesis.GenesisTime = config.Genesis(time.Now().Add(5 * time.Second)) types.SetLayersPerEpoch(cfg.LayersPerEpoch) logger := logtest.New(t, zapcore.DebugLevel) @@ -1052,7 +1033,7 @@ func TestAdminEvents_MultiSmesher(t *testing.T) { cfg.API.PostListener = "0.0.0.0:10094" cfg.POSTService.PostServiceCmd = activation.DefaultTestPostServiceConfig().PostServiceCmd - cfg.Genesis.GenesisTime = time.Now().Add(5 * time.Second).Format(time.RFC3339) + cfg.Genesis.GenesisTime = config.Genesis(time.Now().Add(5 * time.Second)) types.SetLayersPerEpoch(cfg.LayersPerEpoch) logger := zaptest.NewLogger(t) diff --git a/sql/atxs/atxs.go b/sql/atxs/atxs.go index a60707b17e..7b4c549614 100644 --- a/sql/atxs/atxs.go +++ b/sql/atxs/atxs.go @@ -797,7 +797,7 @@ func CountAtxsByOps(db sql.Executor, operations builder.Operations) (count uint3 return } -// IterateForGrading selects every atx from publish epoch and joins identities to load malfeasence proofs if they exist. +// IterateForGrading selects every atx from publish epoch and joins identities to load malfeasance proofs if they exist. func IterateForGrading( db sql.Executor, epoch types.EpochID, diff --git a/sql/database.go b/sql/database.go index 78de23395d..e527ef27a4 100644 --- a/sql/database.go +++ b/sql/database.go @@ -44,13 +44,6 @@ const ( beginImmediate = "BEGIN IMMEDIATE;" ) -//go:generate mockgen -typed -package=mocks -destination=./mocks/mocks.go github.com/spacemeshos/go-spacemesh/sql Executor - -// Executor is an interface for executing raw statement. -type Executor interface { - Exec(string, Encoder, Decoder) (int, error) -} - // Statement is an sqlite statement. type Statement = sqlite.Stmt diff --git a/sql/interface.go b/sql/interface.go index 23859f5202..c9b0ee1441 100644 --- a/sql/interface.go +++ b/sql/interface.go @@ -4,6 +4,11 @@ import "go.uber.org/zap" //go:generate mockgen -typed -package=sql -destination=./mocks.go -source=./interface.go +// Executor is an interface for executing raw statement. +type Executor interface { + Exec(string, Encoder, Decoder) (int, error) +} + // Migration is interface for migrations provider. type Migration interface { Apply(db Executor, logger *zap.Logger) error diff --git a/sql/mocks.go b/sql/mocks.go index c6e9205bec..2be336b646 100644 --- a/sql/mocks.go +++ b/sql/mocks.go @@ -16,6 +16,69 @@ import ( zap "go.uber.org/zap" ) +// MockExecutor is a mock of Executor interface. +type MockExecutor struct { + ctrl *gomock.Controller + recorder *MockExecutorMockRecorder + isgomock struct{} +} + +// MockExecutorMockRecorder is the mock recorder for MockExecutor. +type MockExecutorMockRecorder struct { + mock *MockExecutor +} + +// NewMockExecutor creates a new mock instance. +func NewMockExecutor(ctrl *gomock.Controller) *MockExecutor { + mock := &MockExecutor{ctrl: ctrl} + mock.recorder = &MockExecutorMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockExecutor) EXPECT() *MockExecutorMockRecorder { + return m.recorder +} + +// Exec mocks base method. +func (m *MockExecutor) Exec(arg0 string, arg1 Encoder, arg2 Decoder) (int, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "Exec", arg0, arg1, arg2) + ret0, _ := ret[0].(int) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// Exec indicates an expected call of Exec. +func (mr *MockExecutorMockRecorder) Exec(arg0, arg1, arg2 any) *MockExecutorExecCall { + mr.mock.ctrl.T.Helper() + call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockExecutor)(nil).Exec), arg0, arg1, arg2) + return &MockExecutorExecCall{Call: call} +} + +// MockExecutorExecCall wrap *gomock.Call +type MockExecutorExecCall struct { + *gomock.Call +} + +// Return rewrite *gomock.Call.Return +func (c *MockExecutorExecCall) Return(arg0 int, arg1 error) *MockExecutorExecCall { + c.Call = c.Call.Return(arg0, arg1) + return c +} + +// Do rewrite *gomock.Call.Do +func (c *MockExecutorExecCall) Do(f func(string, Encoder, Decoder) (int, error)) *MockExecutorExecCall { + c.Call = c.Call.Do(f) + return c +} + +// DoAndReturn rewrite *gomock.Call.DoAndReturn +func (c *MockExecutorExecCall) DoAndReturn(f func(string, Encoder, Decoder) (int, error)) *MockExecutorExecCall { + c.Call = c.Call.DoAndReturn(f) + return c +} + // MockMigration is a mock of Migration interface. type MockMigration struct { ctrl *gomock.Controller diff --git a/sql/mocks/mocks.go b/sql/mocks/mocks.go deleted file mode 100644 index 8a0a656cb1..0000000000 --- a/sql/mocks/mocks.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/spacemeshos/go-spacemesh/sql (interfaces: Executor) -// -// Generated by this command: -// -// mockgen -typed -package=mocks -destination=./mocks/mocks.go github.com/spacemeshos/go-spacemesh/sql Executor -// - -// Package mocks is a generated GoMock package. -package mocks - -import ( - reflect "reflect" - - sql "github.com/spacemeshos/go-spacemesh/sql" - gomock "go.uber.org/mock/gomock" -) - -// MockExecutor is a mock of Executor interface. -type MockExecutor struct { - ctrl *gomock.Controller - recorder *MockExecutorMockRecorder - isgomock struct{} -} - -// MockExecutorMockRecorder is the mock recorder for MockExecutor. -type MockExecutorMockRecorder struct { - mock *MockExecutor -} - -// NewMockExecutor creates a new mock instance. -func NewMockExecutor(ctrl *gomock.Controller) *MockExecutor { - mock := &MockExecutor{ctrl: ctrl} - mock.recorder = &MockExecutorMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockExecutor) EXPECT() *MockExecutorMockRecorder { - return m.recorder -} - -// Exec mocks base method. -func (m *MockExecutor) Exec(arg0 string, arg1 sql.Encoder, arg2 sql.Decoder) (int, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Exec", arg0, arg1, arg2) - ret0, _ := ret[0].(int) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Exec indicates an expected call of Exec. -func (mr *MockExecutorMockRecorder) Exec(arg0, arg1, arg2 any) *MockExecutorExecCall { - mr.mock.ctrl.T.Helper() - call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Exec", reflect.TypeOf((*MockExecutor)(nil).Exec), arg0, arg1, arg2) - return &MockExecutorExecCall{Call: call} -} - -// MockExecutorExecCall wrap *gomock.Call -type MockExecutorExecCall struct { - *gomock.Call -} - -// Return rewrite *gomock.Call.Return -func (c *MockExecutorExecCall) Return(arg0 int, arg1 error) *MockExecutorExecCall { - c.Call = c.Call.Return(arg0, arg1) - return c -} - -// Do rewrite *gomock.Call.Do -func (c *MockExecutorExecCall) Do(f func(string, sql.Encoder, sql.Decoder) (int, error)) *MockExecutorExecCall { - c.Call = c.Call.Do(f) - return c -} - -// DoAndReturn rewrite *gomock.Call.DoAndReturn -func (c *MockExecutorExecCall) DoAndReturn(f func(string, sql.Encoder, sql.Decoder) (int, error)) *MockExecutorExecCall { - c.Call = c.Call.DoAndReturn(f) - return c -} diff --git a/systest/cluster/cluster.go b/systest/cluster/cluster.go index 36e495d77f..def5999891 100644 --- a/systest/cluster/cluster.go +++ b/systest/cluster/cluster.go @@ -964,7 +964,7 @@ func (c *Cluster) NodeConfig(ctx *testcontext.Context) (*config.Config, error) { return nil, err } cfg.Genesis = config.GenesisConfig{ - GenesisTime: c.Genesis().Format(time.RFC3339), + GenesisTime: config.Genesis(c.Genesis()), ExtraData: c.GenesisExtraData(), } cfg.LayersPerEpoch = uint32(testcontext.LayersPerEpoch.Get(ctx.Parameters)) diff --git a/tortoise/replay/replay_test.go b/tortoise/replay/replay_test.go index 0393919fd4..dde5a69362 100644 --- a/tortoise/replay/replay_test.go +++ b/tortoise/replay/replay_test.go @@ -40,12 +40,10 @@ func TestReplayMainnet(t *testing.T) { tortoise.WithConfig(cfg.Tortoise), } - genesis, err := time.Parse(time.RFC3339, cfg.Genesis.GenesisTime) - require.NoError(t, err) clock, err := timesync.NewClock( timesync.WithLayerDuration(cfg.LayerDuration), timesync.WithTickInterval(1*time.Second), - timesync.WithGenesisTime(genesis), + timesync.WithGenesisTime(cfg.Genesis.GenesisTime.Time()), timesync.WithLogger(zap.NewNop()), ) require.NoError(t, err)