diff --git a/.gitignore b/.gitignore index 146e44fc94f5..695dcd8cfb27 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,8 @@ vagrant # IDE .idea *.iml +*.ipr +*.iws .dir-locals.el .vscode diff --git a/CHANGELOG.md b/CHANGELOG.md index dc1dffdacd0d..abe40f074a08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * [\#10962](https://github.com/cosmos/cosmos-sdk/pull/10962) ADR-040: Add state migration from iavl (v1Store) to smt (v2Store) * (types) [\#10948](https://github.com/cosmos/cosmos-sdk/issues/10948) Add `app-db-backend` to the `app.toml` config to replace the compile-time `types.DBbackend` variable. * (authz)[\#11060](https://github.com/cosmos/cosmos-sdk/pull/11060) Support grant with no expire time. +* [#11691](https://github.com/cosmos/cosmos-sdk/pull/11691) Plugin architecture for ADR-038 ### API Breaking Changes diff --git a/Makefile b/Makefile index 0c598af97cde..07b58037202f 100644 --- a/Makefile +++ b/Makefile @@ -259,6 +259,9 @@ endif .PHONY: run-tests test test-all $(TEST_TARGETS) +# Sim tests with state listening plugins enabled +include sim-state-listening.mk + test-sim-nondeterminism: @echo "Running non-determinism test..." @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminism -Enabled=true \ diff --git a/baseapp/abci.go b/baseapp/abci.go index c9b1a6fad98a..cb628a962ad6 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -7,6 +7,7 @@ import ( "os" "sort" "strings" + "sync" "syscall" "github.com/gogo/protobuf/proto" @@ -190,11 +191,30 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg app.voteInfos = req.LastCommitInfo.GetVotes() // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + app.halt() + } + }() + } else { + go func() { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + } + }() } } + // wait for all the listener calls to finish + wg.Wait() return res } @@ -215,12 +235,31 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc res.ConsensusParamUpdates = cp } - // call the streaming service hooks with the EndBlock messages + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + app.halt() + } + }() + } else { + go func() { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + } + }() } } + // wait for all the listener calls to finish + wg.Wait() return res } @@ -269,11 +308,31 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx var abciRes abci.ResponseDeliverTx defer func() { + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + app.halt() + } + }() + } else { + go func() { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + } + }() } } + // wait for all the listener calls to finish + wg.Wait() }() ctx := app.getContextForTx(runTxModeDeliver, req.Tx) @@ -289,7 +348,6 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx } return abciRes - } // Commit implements the ABCI interface. It will commit all state that exists in diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 45c65b8030ee..ba60a614b684 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -4,7 +4,6 @@ import ( "context" "errors" "fmt" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" diff --git a/baseapp/options.go b/baseapp/options.go index 4b24c108da06..e477171e5486 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -2,9 +2,8 @@ package baseapp import ( "fmt" - "io" - dbm "github.com/tendermint/tm-db" + "io" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/snapshots" diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 39e0f1ca6e9b..940ff43aee15 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -18,6 +18,13 @@ type ABCIListener interface { ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // HaltAppOnDeliveryError returns true if the application has been configured to halt when + // ListenBeginBlock, ListenEndBlock, ListenDeliverTx fail to process messages and false when + // the application has been configured to send messages to ListenBeginBlock, ListenEndBlock, ListenDeliverTx + // in fire-and-forget fashion. + // + // This behavior is controlled by a corresponding app config setting. + HaltAppOnDeliveryError() bool } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index b9c209b4aa32..835386f6500b 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -30,10 +30,10 @@ In a new file, `store/types/listening.go`, we will create a `WriteListener` inte ```go // WriteListener interface for streaming data out from a listenkv.Store type WriteListener interface { - // if value is nil then it was deleted - // storeKey indicates the source KVStore, to facilitate using the same WriteListener across separate KVStores - // delete bool indicates if it was a delete; true: delete, false: set - OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error + // if value is nil then it was deleted + // storeKey indicates the source KVStore, to facilitate using the same WriteListener across separate KVStores + // delete bool indicates if it was a delete; true: delete, false: set + OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error } ``` @@ -59,33 +59,33 @@ message StoreKVPair { // StoreKVPairWriteListener is used to configure listening to a KVStore by writing out length-prefixed // protobuf encoded StoreKVPairs to an underlying io.Writer type StoreKVPairWriteListener struct { - writer io.Writer - marshaller codec.BinaryCodec + writer io.Writer + marshaller codec.BinaryCodec } // NewStoreKVPairWriteListener wraps creates a StoreKVPairWriteListener with a provdied io.Writer and codec.BinaryCodec func NewStoreKVPairWriteListener(w io.Writer, m codec.BinaryCodec) *StoreKVPairWriteListener { - return &StoreKVPairWriteListener{ - writer: w, - marshaller: m, - } + return &StoreKVPairWriteListener{ + writer: w, + marshaller: m, + } } // OnWrite satisfies the WriteListener interface by writing length-prefixed protobuf encoded StoreKVPairs func (wl *StoreKVPairWriteListener) OnWrite(storeKey types.StoreKey, key []byte, value []byte, delete bool) error error { - kvPair := new(types.StoreKVPair) - kvPair.StoreKey = storeKey.Name() - kvPair.Delete = Delete - kvPair.Key = key - kvPair.Value = value - by, err := wl.marshaller.MarshalBinaryLengthPrefixed(kvPair) - if err != nil { - return err - } - if _, err := wl.writer.Write(by); err != nil { - return err - } - return nil + kvPair := new(types.StoreKVPair) + kvPair.StoreKey = storeKey.Name() + kvPair.Delete = Delete + kvPair.Key = key + kvPair.Value = value + by, err := wl.marshaller.MarshalBinaryLengthPrefixed(kvPair) + if err != nil { + return err + } + if _, err := wl.writer.Write(by); err != nil { + return err + } + return nil } ``` @@ -99,39 +99,39 @@ We can configure the `Store` with a set of `WriteListener`s which stream the out // Operations are traced on each core KVStore call and written to any of the // underlying listeners with the proper key and operation permissions type Store struct { - parent types.KVStore - listeners []types.WriteListener - parentStoreKey types.StoreKey + parent types.KVStore + listeners []types.WriteListener + parentStoreKey types.StoreKey } // NewStore returns a reference to a new traceKVStore given a parent // KVStore implementation and a buffered writer. func NewStore(parent types.KVStore, psk types.StoreKey, listeners []types.WriteListener) *Store { - return &Store{parent: parent, listeners: listeners, parentStoreKey: psk} + return &Store{parent: parent, listeners: listeners, parentStoreKey: psk} } // Set implements the KVStore interface. It traces a write operation and // delegates the Set call to the parent KVStore. func (s *Store) Set(key []byte, value []byte) { - types.AssertValidKey(key) - s.parent.Set(key, value) - s.onWrite(false, key, value) + types.AssertValidKey(key) + s.parent.Set(key, value) + s.onWrite(false, key, value) } // Delete implements the KVStore interface. It traces a write operation and // delegates the Delete call to the parent KVStore. func (s *Store) Delete(key []byte) { - s.parent.Delete(key) - s.onWrite(true, key, nil) + s.parent.Delete(key) + s.onWrite(true, key, nil) } // onWrite writes a KVStore operation to all the WriteListeners func (s *Store) onWrite(delete bool, key, value []byte) { - for _, l := range s.listeners { - if err := l.OnWrite(s.parentStoreKey, key, value, delete); err != nil { - // log error - } - } + for _, l := range s.listeners { + if err := l.OnWrite(s.parentStoreKey, key, value, delete); err != nil { + // log error + } + } } ``` @@ -142,30 +142,30 @@ Additionally, we will update the `CacheWrap` and `CacheWrapper` interfaces to en ```go type MultiStore interface { - ... + ... - // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey - ListeningEnabled(key StoreKey) bool + // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey + ListeningEnabled(key StoreKey) bool - // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey - // It appends the listeners to a current set, if one already exists - AddListeners(key StoreKey, listeners []WriteListener) + // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey + // It appends the listeners to a current set, if one already exists + AddListeners(key StoreKey, listeners []WriteListener) } ``` ```go type CacheWrap interface { - ... + ... - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap + // CacheWrapWithListeners recursively wraps again with listening enabled + CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap } type CacheWrapper interface { - ... + ... - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap + // CacheWrapWithListeners recursively wraps again with listening enabled + CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap } ``` @@ -176,16 +176,16 @@ to wrap the returned `KVStore` with a `listenkv.Store` if listening is turned on ```go func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { - store := rs.stores[key].(types.KVStore) + store := rs.stores[key].(types.KVStore) - if rs.TracingEnabled() { - store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext) - } - if rs.ListeningEnabled(key) { - store = listenkv.NewStore(key, store, rs.listeners[key]) - } + if rs.TracingEnabled() { + store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext) + } + if rs.ListeningEnabled(key) { + store = listenkv.NewStore(key, store, rs.listeners[key]) + } - return store + return store } ``` @@ -194,11 +194,11 @@ to and enable listening in the cache layer. ```go func (rs *Store) CacheMultiStore() types.CacheMultiStore { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range rs.stores { - stores[k] = v - } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners) + stores := make(map[types.StoreKey]types.CacheWrapper) + for k, v := range rs.stores { + stores[k] = v + } + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners) } ``` @@ -216,27 +216,31 @@ receipt from the `StreamingService`. ```go // ABCIListener interface used to hook into the ABCI message processing of the BaseApp type ABCIListener interface { - // ListenBeginBlock updates the streaming service with the latest BeginBlock messages - ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error - // ListenEndBlock updates the steaming service with the latest EndBlock messages - ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error - // ListenDeliverTx updates the steaming service with the latest DeliverTx messages - ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error - // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service - // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt - ListenSuccess() <-chan bool + // ListenBeginBlock updates the streaming service with the latest BeginBlock messages + ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + // ListenEndBlock updates the steaming service with the latest EndBlock messages + ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + // ListenDeliverTx updates the steaming service with the latest DeliverTx messages + ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails + // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. When `false, the app will operate in fire-and-forget mode. + // When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will + // be replayed to all listeners when the node restarts and all successful listeners that received data + // prior to the halt will receive duplicate data. Whether or not a listener operates in a fire-and-forget mode + // is determined by the listener's configuration property `halt_app_on_delivery_error = true|false`. + HaltAppOnDeliveryError() bool } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks type StreamingService interface { - // Stream is the streaming service loop, awaits kv pairs and writes them to a destination stream or file - Stream(wg *sync.WaitGroup) error - // Listeners returns the streaming service's listeners for the BaseApp to register - Listeners() map[types.StoreKey][]store.WriteListener - // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp - ABCIListener - // Closer interface - io.Closer + // Stream is the streaming service loop, awaits kv pairs and writes them to a destination stream or file + Stream(wg *sync.WaitGroup) error + // Listeners returns the streaming service's listeners for the BaseApp to register + Listeners() map[types.StoreKey][]store.WriteListener + // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp + ABCIListener + // Closer interface + io.Closer } ``` @@ -257,15 +261,6 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { } ``` -We will add a new method to the `BaseApp` that is used to configure a global wait limit for receiving positive acknowledgement -of message receipt from the integrated `StreamingService`s. - -```go -func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { - app.globalWaitLimit = t -} -``` - We will also modify the `BeginBlock`, `EndBlock`, and `DeliverTx` methods to pass ABCI requests and responses to any streaming service hooks registered with the `BaseApp`. @@ -274,10 +269,32 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg ... - // Call the streaming service hooks with the BeginBlock messages - for _, listener := range app.abciListeners { - listener.ListenBeginBlock(app.deliverState.ctx, req, res) + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) + for _, streamingListener := range app.abciListeners { + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + app.halt() + } + }() + } else { + // fire and forget semantics + go func() { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + } + }() + } } + // wait for all the listener calls to finish + wg.Wait() return res } @@ -289,94 +306,72 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc ... // Call the streaming service hooks with the EndBlock messages - for _, listener := range app.abciListeners { - listener.ListenEndBlock(app.deliverState.ctx, req, res) - } - - return res -} -``` - -```go -func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - - ... - - gInfo, result, err := app.runTx(runTxModeDeliver, req.Tx) - if err != nil { - resultStr = "failed" - res := sdkerrors.ResponseDeliverTx(err, gInfo.GasWanted, gInfo.GasUsed, app.trace) - // If we throw an error, be sure to still call the streaming service's hook - for _, listener := range app.abciListeners { - listener.ListenDeliverTx(app.deliverState.ctx, req, res) + wg := new(sync.WaitGroup) + for _, streamingListener := range app.abciListeners { + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + app.halt() + } + }() + } else { + // fire and forget semantics + go func() { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + } + }() } - return res - } - - res := abci.ResponseDeliverTx{ - GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? - GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints? - Log: result.Log, - Data: result.Data, - Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), - } - - // Call the streaming service hooks with the DeliverTx messages - for _, listener := range app.abciListeners { - listener.ListenDeliverTx(app.deliverState.ctx, req, res) } + // wait for all the listener calls to finish + wg.Wait() return res } ``` -We will also modify the `Commit` method to process `success/failure` signals from the integrated `StreamingService`s using -the `ABCIListener.ListenSuccess()` method. Each `StreamingService` has an internal wait threshold after which it sends -`false` to the `ListenSuccess()` channel, and the BaseApp also imposes a configurable global wait limit. -If the `StreamingService` is operating in a "fire-and-forget" mode, `ListenSuccess()` should immediately return `true` -off the channel despite the success status of the service. - ```go -func (app *BaseApp) Commit() (res abci.ResponseCommit) { +func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - ... - - var halt bool - - switch { - case app.haltHeight > 0 && uint64(header.Height) >= app.haltHeight: - halt = true - - case app.haltTime > 0 && header.Time.Unix() >= int64(app.haltTime): - halt = true - } - - // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel - // but the BaseApp also imposes a global wait limit - maxWait := time.NewTicker(app.globalWaitLimit) - for _, lis := range app.abciListeners { - select { - case success := <- lis.ListenSuccess(): - if success == false { - halt = true - break + var abciRes abci.ResponseDeliverTx + defer func() { + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) + for _, streamingListener := range app.abciListeners { + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + app.halt() + } + }() + } else { + // fire and forget semantics + go func() { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + } + }() } - case <- maxWait.C: - halt = true - break } - } - - if halt { - // Halt the binary and allow Tendermint to receive the ResponseCommit - // response with the commit ID hash. This will allow the node to successfully - // restart and process blocks assuming the halt configuration has been - // reset or moved to a more distant value. - app.halt() - } - + // wait for all the listener calls to finish + wg.Wait() + }() + ... + return res } ``` @@ -421,7 +416,7 @@ type StateStreamingPlugin interface { Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error // Start starts the background streaming process of the plugin streaming service - Start(wg *sync.WaitGroup) + Start(wg *sync.WaitGroup) error // Plugin is the base Plugin interface Plugin @@ -442,34 +437,37 @@ func NewSimApp( ... - // this loads the preloaded and any plugins found in `plugins.dir` - pluginLoader, err := loader.NewPluginLoader(appOpts, logger) - if err != nil { - // handle error - } - - // initialize the loaded plugins - if err := pluginLoader.Initialize(); err != nil { - // hanlde error - } - keys := sdk.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, - minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, - evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, ) - // register the plugin(s) with the BaseApp - if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { - // handle error - } + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // this loads the preloaded and any plugins found in `plugins.dir` + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + // handle error + } - // start the plugin services, optionally use wg to synchronize shutdown using io.Closer - wg := new(sync.WaitGroup) - if err := pluginLoader.Start(wg); err != nil { - // handler error - } + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + // handle error + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + // handle error + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + // handler error + } + } ... @@ -485,39 +483,42 @@ The plugin system will be configured within an app's app.toml file. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" ``` -There will be three parameters for configuring the plugin system: `plugins.on`, `plugins.disabled` and `plugins.dir`. +There will be three parameters for configuring the plugin system: `plugins.on`, `plugins.enabled` and `plugins.dir`. `plugins.on` is a bool that turns on or off the plugin system at large, `plugins.dir` directs the system to a directory -to load plugins from, and `plugins.disabled` is a list of names for the plugins we want to disable (useful for disabling preloaded plugins). +to load plugins from, and `plugins.enabled` provides `opt-in` semantics to plugin names to enable (including preloaded plugins). Configuration of a given plugin is ultimately specific to the plugin, but we will introduce some standards here: Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). + Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys -(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a mode (e.g. `plugins.streaming.file.mode`) -that signifies whether the service operates in a fire-and-forget capacity (`faf`) or the BaseApp should require positive -acknowledgement of message receipt by the service (`ack`). +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.halt_app_on_delivery_error`) +that signifies whether the service operates in a fire-and-forget capacity, or stop the BaseApp when an error occurs in +any of `ListenBeginBlock`, `ListenEndBlock` and `ListenDeliverTx`. e.g. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to " [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their plugin name [plugins.streaming.file] # the specific parameters for the file streaming service plugin keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - writeDir = "path to the write directory" + write_dir = "path to the write directory" prefix = "optional prefix to prepend to the generated file names" - mode = "faf" # faf == fire-and-forget; ack == require positive acknowledge of receipt + halt_app_on_delivery_error = "false" # false == fire-and-forget; true == stop the application [plugins.streaming.kafka] - ... - [plugins.modules] + keys = [] + topic_prefix = "block" # Optional prefix for topic names where data will be stored. + flush_timeout_ms = 5000 # Flush and wait for outstanding messages and requests to complete delivery when calling `StreamingService.Close(). (milliseconds) + halt_app_on_delivery_error = true # Whether or not to halt the application when plugin fails to deliver message(s). ... ``` diff --git a/go.mod b/go.mod index 62e25f28ea8d..5471f3adeac0 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/cockroachdb/apd/v2 v2.0.2 github.com/coinbase/rosetta-sdk-go v0.7.8 github.com/confio/ics23/go v0.7.0 + github.com/confluentinc/confluent-kafka-go v1.8.2 github.com/cosmos/btcutil v1.0.4 github.com/cosmos/cosmos-proto v1.0.0-alpha7 github.com/cosmos/cosmos-sdk/api v0.1.0 diff --git a/go.sum b/go.sum index 0cafef3401a8..a9f06617c414 100644 --- a/go.sum +++ b/go.sum @@ -221,6 +221,8 @@ github.com/coinbase/rosetta-sdk-go v0.7.8 h1:op/O3/ZngTfcrZnp3p/TziRfKGdo7AUZGUm github.com/coinbase/rosetta-sdk-go v0.7.8/go.mod h1:vB6hZ0ZnZmln3ThA4x0mZvOAPDJ5BhfgnjH76hxoy10= github.com/confio/ics23/go v0.7.0 h1:00d2kukk7sPoHWL4zZBZwzxnpA2pec1NPdwbSokJ5w8= github.com/confio/ics23/go v0.7.0/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= +github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E= +github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= diff --git a/plugin/README.md b/plugin/README.md new file mode 100644 index 000000000000..dc5a94bfaeed --- /dev/null +++ b/plugin/README.md @@ -0,0 +1,151 @@ +# Comsos-SDK Plugins +This package contains an extensible plugin system for the Cosmos-SDK. Included in this top-level package is the base interface +for a Cosmos-SDK plugin, as well as more specific plugin interface definitions that build on top of this base interface. +The [loader](./loader) sub-directory contains the Go package and scripts for loading plugins into the SDK. The [plugins](./plugins) +sub-directory contains the preloaded plugins and a script for building them, this is also the directory that the plugin loader will look +for non-preloaded plugins by default. + +The base plugin interface is defined as: +```go +// Plugin is the base interface for all kinds of cosmos-sdk plugins +// It will be included in interfaces of different Plugins +type Plugin interface { + // Name should return unique name of the plugin + Name() string + + // Version returns current version of the plugin + Version() string + + // Init is called once when the Plugin is being loaded + // The plugin is passed the AppOptions for configuration + // A plugin will not necessarily have a functional Init + Init(env serverTypes.AppOptions) error + + // Closer interface to shutting down the plugin process + io.Closer +} +``` + +Specific plugin types extend this interface, enabling them to work with the loader tooling defined in the [loader sub-directory](./loader). + +The plugin system itself is configured using the `plugins` TOML mapping in the App's `app.toml` file. There are three +parameters for configuring the plugins: `plugins.on`, `plugins.enabled` and `plugins.dir`. `plugins.on` is a bool that +turns on or off the plugin system at large, `plugins.dir` directs the system to a directory to load plugins from, and +`plugins.enabled` is a list enabled plugin names. + +```toml +[plugins] + on = false # turn the plugin system, as a whole, on or off + enabled = ["list", "of", "plugin", "names", "to", "enable"] + dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" +``` + +As mentioned above, some plugins can be preloaded. This means they do not need to be loaded from the specified `plugins.dir` and instead +are loaded by default. Note, both preloaded and non-preloaded plugins must appear in `plugins.enabled` list for the app to send events to them. +This provides node operators with the ability to `opt-in` and enable only plugins of interest. At this time the only preloaded plugins are; +the [file streaming service plugin](./plugins/file), the [trace streaming service plugin](./plugins/trace) and the [kafka streaming service plugin](./plugins/kafka). +Plugins can be added to the preloaded set by adding the plugin to the [plugins dir](../../plugin/plugin.go) and modifying the [preload_list](../../plugin/loader/preload_list). + +In your application, if the `plugins.on` is set to `true` use this to direct the invocation of `NewPluginLoader` and walk through +the steps of plugin loading, initialization, injection, starting, and closure. + +e.g. in `NewSimApp`: + +```go +func NewSimApp( + logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool, + homePath string, invCheckPeriod uint, encodingConfig simappparams.EncodingConfig, + appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp), +) *SimApp { + + ... + + keys := sdk.NewKVStoreKeys( + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, + ) + + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // this loads the preloaded and any plugins found in `plugins.dir` + // if their names match those in the `plugins.enabled` list. + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + // handle error + } + + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + // handle error + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + // handle error + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + // handler error + } + } + + ... + + return app +} +``` + +# State Streaming Plugin +The `BaseApp` package contains the interface for a `StreamingService` used to write state changes out from individual KVStores to a +file or stream, as described in [ADR-038](../docs/architecture/adr-038-state-listening.md). + +Specific `StreamingService` implementations are written and loaded as plugins by extending the above interface with a +`StateStreamingPlugin` interface that adds `Register` method used to register the plugin's `StreamingService` with the +`BaseApp` and a `Start` method to start the streaming service. + +```go +// StateStreamingPlugin interface for plugins that load a streaming.Service onto a baseapp.BaseApp +type StateStreamingPlugin interface { + // Register configures and registers the plugin streaming service with the BaseApp + Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error + + // Start starts the background streaming process of the plugin streaming service + Start(wg *sync.WaitGroup) error + + // Plugin is the base Plugin interface + Plugin +} +``` + +A `StateStreamingPlugin` is configured from within an App using the `AppOptions` loaded from the `app.toml` file. +Every `StateStreamingPlugin` will be configured within the `plugins.streaming` TOML mapping. The exact keys/parameters +present in this mapping will be dependent on the specific `StateStreamingPlugin`, but we will introduce some standards +here using the file `StateStreamingPlugin`: + +Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). + +Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). +It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.halt_app_on_delivery_error`) +that signifies whether the service operates in a fire-and-forget capacity, or the BaseApp should halt in case of a delivery error by the plugin service. +The file `StreamingService` does not have an individual `halt_app_on_delivery_error` since it operates synchronously with the App. + +e.g. + +```toml +[plugins] + on = false # turn the plugin system, as a whole, on or off + enabled = ["list", "of", "plugin", "names", "to", "enable"] + dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" + [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName + [plugins.streaming.file] # the specific parameters for the file streaming service plugin + keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] + write_dir = "path to the write directory" + prefix = "optional prefix to prepend to the generated file names + # Whether or not to halt the application when plugin fails to deliver message(s). + halt_app_on_delivery_error = false # false = fire-and-forget +``` diff --git a/plugin/Rules.mk b/plugin/Rules.mk new file mode 100644 index 000000000000..1e26d2a3c692 --- /dev/null +++ b/plugin/Rules.mk @@ -0,0 +1,9 @@ +include mk/header.mk + +dir := $(d)/loader +include $(dir)/Rules.mk + +dir := $(d)/plugins +include $(dir)/Rules.mk + +include mk/footer.mk diff --git a/plugin/example_config.toml b/plugin/example_config.toml new file mode 100644 index 000000000000..8d36d8efb76a --- /dev/null +++ b/plugin/example_config.toml @@ -0,0 +1,96 @@ +############################################################################### +### Plugin system configuration ### +############################################################################### + +[plugins] + +# turn the plugin system, as a whole, on or off +on = true + +# List of plugin names to enable from the plugin/plugins/* +enabled = ["kafka"] + +# The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins +dir = "" + +# a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName +[plugins.streaming] + +############################################################################### +### File plugin configuration ### +############################################################################### + +# the specific parameters for the file streaming service plugin +[plugins.streaming.file] + +# List of store keys to expose to this streaming service. +# Leaving this blank will include all store keys. +keys = [] + +# Path to the write directory +write_dir = "" + +# Optional prefix to prepend to the generated file names +prefix = "" + +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true + +############################################################################### +### Trace Plugin configuration ### +############################################################################### + +# The specific parameters for the Kafka streaming service plugin +[plugins.streaming.trace] + +# List of store keys we want to expose for this streaming service. +keys = [] + +# In addition to block event info, print the data to stdout as well. +print_data_to_stdout = false + +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true + +############################################################################### +### Kafka Plugin configuration ### +############################################################################### + +# The specific parameters for the Kafka streaming service plugin +[plugins.streaming.kafka] + +# List of store keys we want to expose for this streaming service. +keys = [] + +# Optional topic prefix for the topic(s) where data will be stored +topic_prefix = "block" + +# Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) +flush_timeout_ms = 1500 + +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true + +# Producer configuration properties. +# The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. +# For a full list of producer configuration properties +# see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md +[plugins.streaming.kafka.producer] + +# Initial list of brokers as a comma seperated list of broker host or host:port[, host:port[,...]] +bootstrap_servers = "localhost:9092" + +# Client identifier +client_id = "my-app-id" + +# This field indicates the number of acknowledgements the leader +# broker must receive from ISR brokers before responding to the request +acks = "all" + +# When set to true, the producer will ensure that messages +# are successfully produced exactly once and in the original produce order. +# The following configuration properties are adjusted automatically (if not modified by the user) +# when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), +# retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. +# Producer instantation will fail if user-supplied configuration is incompatible. +enable_idempotence = true diff --git a/plugin/loader/Rules.mk b/plugin/loader/Rules.mk new file mode 100644 index 000000000000..dd842bfa1892 --- /dev/null +++ b/plugin/loader/Rules.mk @@ -0,0 +1,13 @@ +include mk/header.mk + +COSMOS_PLUGINS ?= +export COSMOS_PLUGINS + +$(d)/preload.go: d:=$(d) +$(d)/preload.go: $(d)/preload_list $(d)/preload.sh ALWAYS + $(d)/preload.sh > $@ + go fmt $@ >/dev/null + +DEPS_GO += $(d)/preload.go + +include mk/footer.mk diff --git a/plugin/loader/load_nocgo.go b/plugin/loader/load_nocgo.go new file mode 100644 index 000000000000..51d7c4a6e63f --- /dev/null +++ b/plugin/loader/load_nocgo.go @@ -0,0 +1,18 @@ +// +build !cgo,!noplugin +// +build linux darwin freebsd + +package loader + +import ( + "errors" + + cplugin "github.com/cosmos/cosmos-sdk/plugin" +) + +func init() { + loadPluginFunc = nocgoLoadPlugin +} + +func nocgoLoadPlugin(fi string) ([]cplugin.Plugin, error) { + return nil, errors.New("not built with cgo support") +} diff --git a/plugin/loader/load_noplugin.go b/plugin/loader/load_noplugin.go new file mode 100644 index 000000000000..1ababd520f2a --- /dev/null +++ b/plugin/loader/load_noplugin.go @@ -0,0 +1,17 @@ +// +build noplugin + +package loader + +import ( + "errors" + + cplugin "github.com/cosmos/cosmos-sdk/plugin" +) + +func init() { + loadPluginFunc = nopluginLoadPlugin +} + +func nopluginLoadPlugin(string) ([]cplugin.Plugin, error) { + return nil, errors.New("not built with plugin support") +} diff --git a/plugin/loader/load_unix.go b/plugin/loader/load_unix.go new file mode 100644 index 000000000000..9740e9d42a2f --- /dev/null +++ b/plugin/loader/load_unix.go @@ -0,0 +1,33 @@ +// +build cgo,!noplugin +// +build linux darwin freebsd + +package loader + +import ( + "errors" + "plugin" + + cplugin "github.com/cosmos/cosmos-sdk/plugin" +) + +func init() { + loadPluginFunc = unixLoadPlugin +} + +func unixLoadPlugin(fi string) ([]cplugin.Plugin, error) { + pl, err := plugin.Open(fi) + if err != nil { + return nil, err + } + pls, err := pl.Lookup(cplugin.PLUGINS_SYMBOL) + if err != nil { + return nil, err + } + + typePls, ok := pls.(*[]cplugin.Plugin) + if !ok { + return nil, errors.New("filed 'Plugins' didn't contain correct type") + } + + return *typePls, nil +} diff --git a/plugin/loader/loader.go b/plugin/loader/loader.go new file mode 100644 index 000000000000..5c24710c9683 --- /dev/null +++ b/plugin/loader/loader.go @@ -0,0 +1,293 @@ +package loader + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/spf13/cast" + logging "github.com/tendermint/tendermint/libs/log" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + storeTypes "github.com/cosmos/cosmos-sdk/store/types" +) + +var preloadPlugins []plugin.Plugin + +// Preload adds one or more plugins to the preload list. This should _only_ be called during init. +func Preload(plugins ...plugin.Plugin) { + preloadPlugins = append(preloadPlugins, plugins...) +} + +var loadPluginFunc = func(string) ([]plugin.Plugin, error) { + return nil, fmt.Errorf("unsupported platform %s", runtime.GOOS) +} + +type loaderState int + +const ( + loaderLoading loaderState = iota + loaderInitializing + loaderInitialized + loaderInjecting + loaderInjected + loaderStarting + loaderStarted + loaderClosing + loaderClosed + loaderFailed +) + +func (ls loaderState) String() string { + switch ls { + case loaderLoading: + return "Loading" + case loaderInitializing: + return "Initializing" + case loaderInitialized: + return "Initialized" + case loaderInjecting: + return "Injecting" + case loaderInjected: + return "Injected" + case loaderStarting: + return "Starting" + case loaderStarted: + return "Started" + case loaderClosing: + return "Closing" + case loaderClosed: + return "Closed" + case loaderFailed: + return "Failed" + default: + return "Unknown" + } +} + +// PluginLoader keeps track of loaded plugins. +// +// To use: +// 1. Load any desired plugins with Load and LoadDirectory. Preloaded plugins +// will automatically be loaded. +// 2. Call Initialize to run all initialization logic. +// 3. Call Inject to register the plugins. +// 4. Optionally call Start to start plugins. +// 5. Call Close to close all plugins. +type PluginLoader struct { + state loaderState + plugins map[string]plugin.Plugin + started []plugin.Plugin + opts serverTypes.AppOptions + logger logging.Logger + enabled []string +} + +// NewPluginLoader creates new plugin loader +func NewPluginLoader(opts serverTypes.AppOptions, logger logging.Logger) (*PluginLoader, error) { + loader := &PluginLoader{plugins: make(map[string]plugin.Plugin, len(preloadPlugins)), opts: opts, logger: logger} + loader.enabled = cast.ToStringSlice(opts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ENABLED_TOML_KEY))) + for _, v := range preloadPlugins { + if err := loader.Load(v); err != nil { + return nil, err + } + } + pluginDir := cast.ToString(opts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_DIR_TOML_KEY))) + if pluginDir == "" { + pluginDir = filepath.Join(os.Getenv("GOPATH"), plugin.DEFAULT_PLUGINS_DIRECTORY) + } + if err := loader.LoadDirectory(pluginDir); err != nil { + return nil, err + } + return loader, nil +} + +func (loader *PluginLoader) assertState(state loaderState) error { + if loader.state != state { + return fmt.Errorf("loader state must be %s, was %s", state, loader.state) + } + return nil +} + +func (loader *PluginLoader) transition(from, to loaderState) error { + if err := loader.assertState(from); err != nil { + return err + } + loader.state = to + return nil +} + +// Load loads a plugin into the plugin loader. +func (loader *PluginLoader) Load(pl plugin.Plugin) error { + if err := loader.assertState(loaderLoading); err != nil { + return err + } + + name := pl.Name() + if ppl, ok := loader.plugins[name]; ok { + // plugin is already loaded + return fmt.Errorf( + "plugin: %s, is duplicated in version: %s, "+ + "while trying to load dynamically: %s", + name, ppl.Version(), pl.Version()) + } + if sliceContainsStr(loader.enabled, name) { + loader.plugins[name] = pl + loader.logger.Info("loading enabled plugin", "name", name) + return nil + } + return nil +} + +func sliceContainsStr(slice []string, str string) bool { + for _, ele := range slice { + if ele == str { + return true + } + } + return false +} + +// LoadDirectory loads a directory of plugins into the plugin loader. +func (loader *PluginLoader) LoadDirectory(pluginDir string) error { + if err := loader.assertState(loaderLoading); err != nil { + return err + } + newPls, err := loader.loadDynamicPlugins(pluginDir) + if err != nil { + return err + } + + for _, pl := range newPls { + if err := loader.Load(pl); err != nil { + return err + } + } + return nil +} + +func (loader *PluginLoader) loadDynamicPlugins(pluginDir string) ([]plugin.Plugin, error) { + _, err := os.Stat(pluginDir) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + + var plugins []plugin.Plugin + + err = filepath.Walk(pluginDir, func(fi string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + if fi != pluginDir { + loader.logger.Info("found directory inside plugins directory", "directory", fi) + } + return nil + } + + if info.Mode().Perm()&0111 == 0 { + // file is not executable let's not load it + // this is to prevent loading plugins from for example non-executable + // mounts, some /tmp mounts are marked as such for security + loader.logger.Error("non-executable file in plugins directory", "file", fi) + return nil + } + + if newPlugins, err := loadPluginFunc(fi); err == nil { + plugins = append(plugins, newPlugins...) + } else { + return fmt.Errorf("loading plugin %s: %s", fi, err) + } + return nil + }) + + return plugins, err +} + +// Initialize initializes all loaded plugins +func (loader *PluginLoader) Initialize() error { + if err := loader.transition(loaderLoading, loaderInitializing); err != nil { + return err + } + for name, p := range loader.plugins { + if err := p.Init(loader.opts); err != nil { + loader.state = loaderFailed + return fmt.Errorf("unable to initialize plugin %s: %v", name, err) + } + } + + return loader.transition(loaderInitializing, loaderInitialized) +} + +// Inject hooks all the plugins into the BaseApp. +func (loader *PluginLoader) Inject(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*storeTypes.KVStoreKey) error { + if err := loader.transition(loaderInitialized, loaderInjecting); err != nil { + return err + } + + for _, pl := range loader.plugins { + if pl, ok := pl.(plugin.StateStreamingPlugin); ok { + if err := pl.Register(bApp, marshaller, keys); err != nil { + loader.state = loaderFailed + return err + } + } + } + + return loader.transition(loaderInjecting, loaderInjected) +} + +// Start starts all long-running plugins. +func (loader *PluginLoader) Start(wg *sync.WaitGroup) error { + if err := loader.transition(loaderInjected, loaderStarting); err != nil { + return err + } + for _, pl := range loader.plugins { + if pl, ok := pl.(plugin.StateStreamingPlugin); ok { + if err := pl.Start(wg); err != nil { + return err + } + loader.started = append(loader.started, pl) + } + } + + return loader.transition(loaderStarting, loaderStarted) +} + +// Close stops all long-running plugins. +func (loader *PluginLoader) Close() error { + switch loader.state { + case loaderClosing, loaderFailed, loaderClosed: + // nothing to do. + return nil + } + loader.state = loaderClosing + + var errs []string + started := loader.started + loader.started = nil + for _, pl := range started { + if err := pl.Close(); err != nil { + errs = append(errs, fmt.Sprintf( + "error closing plugin %s: %s", + pl.Name(), + err.Error(), + )) + } + } + if errs != nil { + loader.state = loaderFailed + return fmt.Errorf(strings.Join(errs, "\n")) + } + loader.state = loaderClosed + return nil +} diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go new file mode 100644 index 000000000000..c801954b61c7 --- /dev/null +++ b/plugin/loader/preload.go @@ -0,0 +1,17 @@ +package loader + +import ( + pluginfile "github.com/cosmos/cosmos-sdk/plugin/plugins/file" + pluginkafka "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka" + plugintrace "github.com/cosmos/cosmos-sdk/plugin/plugins/trace" +) + +// DO NOT EDIT THIS FILE +// This file is being generated as part of plugin build process +// To change it, modify the plugin/loader/preload.sh + +func init() { + Preload(pluginfile.Plugins...) + Preload(pluginkafka.Plugins...) + Preload(plugintrace.Plugins...) +} diff --git a/plugin/loader/preload.sh b/plugin/loader/preload.sh new file mode 100755 index 000000000000..52db5a345004 --- /dev/null +++ b/plugin/loader/preload.sh @@ -0,0 +1,43 @@ +#!/usr/bin/env bash + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +LIST="$DIR/preload_list" + +to_preload() { + awk 'NF' $LIST | sed '/^#/d' + if [[ -n "$COSMOS_PLUGINS" ]]; then + for plugin in $COSMOS_PLUGINS; do + echo "$plugin github.com/cosmos/cosmos-sdk/plugin/plugins/$plugin *" + done + fi +} + +cat </dev/null + +$($(d)_plugins_so): %.so : %/main/main.go +$($(d)_plugins_so): $$(DEPS_GO) ALWAYS + $(GOCC) build -buildmode=plugin -pkgdir "$(GOPATH)/pkg/linux_amd64_dynlink" $(go-flags-with-tags) -o "$@" "$(call go-pkg-name,$(basename $@))/main" + chmod +x "$@" + +CLEAN += $($(d)_plugins_so) +CLEAN += $(foreach main_dir,$($(d)_plugins_main),$(dir $(main_dir))) + +build_plugins: $($(d)_plugins_so) + + +include mk/footer.mk diff --git a/plugin/plugins/file/file.go b/plugin/plugins/file/file.go new file mode 100644 index 000000000000..db754ff7568f --- /dev/null +++ b/plugin/plugins/file/file.go @@ -0,0 +1,113 @@ +package file + +import ( + "fmt" + "sync" + "time" + + "github.com/spf13/cast" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/plugins/file/service" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// Plugin name and version +const ( + // PLUGIN_NAME is the name for this streaming service plugin + PLUGIN_NAME = "file" + + // PLUGIN_VERSION is the version for this streaming service plugin + PLUGIN_VERSION = "0.0.1" +) + +// TOML configuration parameter keys +const ( + // PREFIX_PARAM is an optional prefix to prepend to the files we write + PREFIX_PARAM = "prefix" + + // WRITE_DIR_PARAM is the directory we want to write files out to + WRITE_DIR_PARAM = "write_dir" + + // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service + KEYS_PARAM = "keys" + + // HALT_APP_ON_DELIVERY_ERROR whether or not to halt the application when plugin fails to deliver message(s) + HALT_APP_ON_DELIVERY_ERROR = "halt_app_on_delivery_error" +) + +const minWaitDuration = time.Millisecond * 10 + +// Plugins is the exported symbol for loading this plugin +var Plugins = []plugin.Plugin{ + &streamingServicePlugin{}, +} + +type streamingServicePlugin struct { + fss *service.FileStreamingService + opts serverTypes.AppOptions +} + +var _ plugin.StateStreamingPlugin = (*streamingServicePlugin)(nil) + +// Name satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Name() string { + return PLUGIN_NAME +} + +// Version satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Version() string { + return PLUGIN_VERSION +} + +// Init satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { + ssp.opts = env + return nil +} + +// Register satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error { + // load all the params required for this plugin from the provided AppOptions + tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) + filePrefix := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PREFIX_PARAM))) + fileDir := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, WRITE_DIR_PARAM))) + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) + var exposeStoreKeys []types.StoreKey + if len(exposeKeyStrings) > 0 { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrings)) + for _, keyStr := range exposeKeyStrings { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } else { // if none are specified, we expose all the keys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + haltAppOnDeliveryError := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, HALT_APP_ON_DELIVERY_ERROR))) + var err error + ssp.fss, err = service.NewFileStreamingService(fileDir, filePrefix, exposeStoreKeys, marshaller, haltAppOnDeliveryError) + if err != nil { + return err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(ssp.fss) + return nil +} + +// Start satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Start(wg *sync.WaitGroup) error { + return ssp.fss.Stream(wg) +} + +// Close satisfies io.Closer +func (ssp *streamingServicePlugin) Close() error { + return ssp.fss.Close() +} diff --git a/plugin/plugins/file/service/service.go b/plugin/plugins/file/service/service.go new file mode 100644 index 000000000000..104796707a56 --- /dev/null +++ b/plugin/plugins/file/service/service.go @@ -0,0 +1,318 @@ +package service + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +/* +The naming schema and data format for the files this service writes out to is as such: + +After every `BeginBlock` request a new file is created with the name `block-{N}-begin`, where N is the block number. All +subsequent state changes are written out to this file until the first `DeliverTx` request is received. At the head of these files, +the length-prefixed protobuf encoded `BeginBlock` request is written, and the response is written at the tail. + +After every `DeliverTx` request a new file is created with the name `block-{N}-tx-{M}` where N is the block number and M +is the tx number in the block (i.e. 0, 1, 2...). All subsequent state changes are written out to this file until the next +`DeliverTx` request is received or an `EndBlock` request is received. At the head of these files, the length-prefixed protobuf +encoded `DeliverTx` request is written, and the response is written at the tail. + +After every `EndBlock` request a new file is created with the name `block-{N}-end`, where N is the block number. All +subsequent state changes are written out to this file until the next `BeginBlock` request is received. At the head of these files, +the length-prefixed protobuf encoded `EndBlock` request is written, and the response is written at the tail. +*/ + +var _ baseapp.StreamingService = (*FileStreamingService)(nil) + +// FileStreamingService is a concrete implementation of streaming.Service that writes state changes out to files +type FileStreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + filePrefix string // optional prefix for each of the generated files + writeDir string // directory to write files into + codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + haltAppOnDeliveryError bool // true if the app should be halted on streaming errors, false otherwise +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener +// everytime we begin writing to a new file +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewFileStreamingService creates a new FileStreamingService for the provided writeDir, (optional) filePrefix, and storeKeys +func NewFileStreamingService( + writeDir, + filePrefix string, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + haltAppOnDeliveryError bool, +) (*FileStreamingService, error) { + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + // check that the writeDir exists and is writeable so that we can catch the error here at initialization if it is not + // we don't open a dstFile until we receive our first ABCI message + if err := isDirWriteable(writeDir); err != nil { + return nil, err + } + return &FileStreamingService{ + listeners: listeners, + srcChan: listenChan, + filePrefix: filePrefix, + writeDir: writeDir, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + haltAppOnDeliveryError: haltAppOnDeliveryError, + }, nil +} + +// Listeners returns the FileStreamingService's underlying WriteListeners, use for registering them with the BaseApp +func (fss *FileStreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return fss.listeners +} + +// ListenBeginBlock satisfies the Hook interface +// It writes out the received BeginBlock request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (fss *FileStreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { + // generate the new file + dstFile, err := fss.openBeginBlockFile(req) + if err != nil { + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + return err + } + // close file + if err := dstFile.Close(); err != nil { + return err + } + return nil +} + +func (fss *FileStreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) { + fss.currentBlockNumber = req.GetHeader().Height + fss.currentTxIndex = 0 + fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) +} + +// ListenDeliverTx satisfies the Hook interface +// It writes out the received DeliverTx request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (fss *FileStreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { + // generate the new file + dstFile, err := fss.openDeliverTxFile() + if err != nil { + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + return err + } + // close file + if err := dstFile.Close(); err != nil { + return err + } + return nil +} + +func (fss *FileStreamingService) openDeliverTxFile() (*os.File, error) { + fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + fss.currentTxIndex++ + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) +} + +// ListenEndBlock satisfies the Hook interface +// It writes out the received EndBlock request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (fss *FileStreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { + // generate the new file + dstFile, err := fss.openEndBlockFile() + if err != nil { + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + return err + } + // close file + if err := dstFile.Close(); err != nil { + return err + } + return nil +} + +func (fss *FileStreamingService) openEndBlockFile() (*os.File, error) { + fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) +} + +// Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received +// Do we need this and an intermediate writer? We could just write directly to the buffer on calls to Write +// But then we don't support a Stream interface, which could be needed for other types of streamers +func (fss *FileStreamingService) Stream(wg *sync.WaitGroup) error { + if fss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + fss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-fss.quitChan: + return + case by := <-fss.srcChan: + fss.stateCacheLock.Lock() + fss.stateCache = append(fss.stateCache, by) + fss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface +func (fss *FileStreamingService) Close() error { + close(fss.quitChan) + return nil +} + +// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails +// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will +// be replayed to all listeners when the node restarts and all successful listeners that received data +// prior to the halt will receive duplicate data. +func (fss *FileStreamingService) HaltAppOnDeliveryError() bool { + return fss.haltAppOnDeliveryError +} + +// isDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func isDirWriteable(dir string) error { + f := path.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), 0600); err != nil { + return err + } + return os.Remove(f) +} diff --git a/store/streaming/file/service_test.go b/plugin/plugins/file/service/service_test.go similarity index 98% rename from store/streaming/file/service_test.go rename to plugin/plugins/file/service/service_test.go index 1276b163642d..81b081a45354 100644 --- a/store/streaming/file/service_test.go +++ b/plugin/plugins/file/service/service_test.go @@ -1,4 +1,4 @@ -package file +package service import ( "encoding/binary" @@ -22,7 +22,7 @@ import ( var ( interfaceRegistry = codecTypes.NewInterfaceRegistry() testMarshaller = codec.NewProtoCodec(interfaceRegistry) - testStreamingService *StreamingService + testStreamingService *FileStreamingService testListener1, testListener2 types.WriteListener emptyContext = sdk.Context{} @@ -133,9 +133,9 @@ func TestFileStreamingService(t *testing.T) { defer os.RemoveAll(testDir) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller) + testStreamingService, err = NewFileStreamingService(testDir, testPrefix, testKeys, testMarshaller, true) require.Nil(t, err) - require.IsType(t, &StreamingService{}, testStreamingService) + require.IsType(t, &FileStreamingService{}, testStreamingService) require.Equal(t, testPrefix, testStreamingService.filePrefix) require.Equal(t, testDir, testStreamingService.writeDir) require.Equal(t, testMarshaller, testStreamingService.codec) diff --git a/plugin/plugins/gen_main.sh b/plugin/plugins/gen_main.sh new file mode 100644 index 000000000000..8fbc11465148 --- /dev/null +++ b/plugin/plugins/gen_main.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +dir=${1:?first parameter with dir to work in is required} +pkg=${2:?second parameter with full name of the package is required} +main_pkg="$dir/main" + +shortpkg="uniquepkgname" + +mkdir -p "$main_pkg" + +cat > "$main_pkg/main.go" < + - [Plugin design](#plugin-design) + - [Function-Based producer](#function-based-producer) + - [Delivery Report handler](#delivery-report-handler) + - [Message serde](#message-serde) + - [Message key](#message-key) + - [Example configuration](#example-configuration) + - [Testing the plugin](#testing-the-plugin) + - [Confluent Platform](#confluent-platform) + + + +## Plugin design +The plugin was build using [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go), a lightweight wrapper around [librdkafka](https://github.com/edenhill/librdkafka). + +This particular implementation uses: +* `Channel-Based producer` - Faster than the function-based `producer.Produce()`. +* `Delivery reports handler` - Notifies the application of success or failure to deliver messages to Kafka. + +### Function-Based producer +The plugin uses the `producer.Produce()` to deliver messages to Kafka. Delivery reports are emitted on the `producer.Events()` or specific private channel. +Any errors that occur during delivery propagate up the stack and `halt` the app when `plugins.streaming.kafka.halt_app_on_delivery_error = true` + +Pros: +* Go:ish + +Cons: +* `Produce()` is a non-blocking call, if the internal librdkafka queue is full the call will fail. + +*The Producer's queue is configurable with the `queue.buffering.max.messages` property (default: 100000). See [config-docs](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) for further understanding. + +### Delivery Report handler +Producing is an asynchronous operation. Therefore, the client notifies the application (per-message) of success or failure through delivery reports. +Deliver reports are by default emitted on the `producer.Events()` channel as `*kafka.Message`. One needs to check `msg.TopicPartition.Error` for `nil` for successful delivery. +The plugin implementation uses a private delivery channel `Produce(msg *Message, deliveryChan chan Event)` for successful delivery of each message. +When `plugins.streaming.kafka.halt_app_on_delivery_error = true`, the app will `halt` if delivery of any messages fails. This helps keep state in sync between the node and Kafka. + +Pros: +* Used to propagate success or failures to the application. +* Used to track the messages produced. +* Is turned off by setting `"go.delivery.reports": false` for a fire-and-forget scenario. + +Cons: +* Slower than when the plugin operates in fire-and-forget mode `plugins.streaming.kafka.halt_app_on_delivery_error = false` as each message needs to be checked whether it was successfully delivered. + +### Message serde + +As of this writing there is no `golang` support for `serialization/deserialization` of proto message schema with the Confluent Schema Registry. Therefore, the Kafka plugin produces messages in proto binary format without a registered schema. + +Note, proto message schemas can be registered with the Confluent Schema Registry by [generating the Java code](https://developers.google.com/protocol-buffers/docs/javatutorial) of the CosmosSDK proto files and then use the supported Java libraries. See the Confluent [docs](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html) for how to do this. + +#### Message `key` + +To be able to identify and track messages in Kafka a [msg_key.proto](./proto/msg_key.proto) was introduced to the plugin. + +``` +syntax = "proto3"; + +option go_package = "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service"; + +option java_multiple_files = true; +option java_package = "network.cosmos.listening.plugins.kafka.service"; + +message MsgKey { + int64 block_height = 1 [json_name = "block_height"]; + enum Event { + BEGIN_BLOCK = 0; + END_BLOCK = 1; + DELIVER_TX = 2; + } + Event event = 2; + int64 event_id = 3 [json_name = "event_id"]; + enum EventType { + REQUEST = 0; + RESPONSE = 1; + STATE_CHANGE = 2; + } + EventType event_type = 4 [json_name = "event_type"]; + int64 event_type_id = 5 [json_name = "event_type_id"]; +} +``` + +### Example configuration + +Below is an example of how to configure the Kafka plugin. +``` +# app.toml + +. . . + +############################################################################### +### Plugin system configuration ### +############################################################################### + +[plugins] + +# turn the plugin system, as a whole, on or off +on = true + +# List of plugin names to enable from the plugin/plugins/* +enabled = ["kafka"] + +# The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins +dir = "" + + +############################################################################### +### Kafka Plugin configuration ### +############################################################################### + +# The specific parameters for the kafka streaming service plugin +[plugins.streaming.kafka] + +# List of store keys we want to expose for this streaming service. +keys = [] + +# Optional prefix for topic names where data will be stored. +topic_prefix = "pio" + +# Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) +flush_timeout_ms = 5000 + +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true + +# Producer configuration properties. +# The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. +# For a full list of producer configuration properties +# see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md +[plugins.streaming.kafka.producer] + +# Initial list of brokers as a comma seperated list of broker host or host:port[, host:port[,...]] +bootstrap_servers = "localhost:9092" + +# Client identifier +client_id = "pio-state-listening" + +# This field indicates the number of acknowledgements the leader +# broker must receive from ISR brokers before responding to the request +acks = "all" + +# When set to true, the producer will ensure that messages +# are successfully produced exactly once and in the original produce order. +# The following configuration properties are adjusted automatically (if not modified by the user) +# when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), +# retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. +# Producer instantation will fail if user-supplied configuration is incompatible. +enable_idempotence = true +``` + +## Testing the plugin + +Non-determinism testing has been set up to run with the Kafka plugin. + +To execute the tests, run: +``` +make test-sim-nondeterminism-state-listening-kafka +``` + +### Confluent Platform + +If you're interested in viewing or querying events stored in kafka you can stand up the Confluent Platform stack with docker. +Visit the Confluent Platform [docs](https://docs.confluent.io/platform/current/quickstart/ce-docker-quickstart.html) for up to date docker instructions. diff --git a/plugin/plugins/kafka/docker-compose.yml b/plugin/plugins/kafka/docker-compose.yml new file mode 100644 index 000000000000..d8ee19bb7d38 --- /dev/null +++ b/plugin/plugins/kafka/docker-compose.yml @@ -0,0 +1,177 @@ +--- +version: '2' +services: + zookeeper: + image: confluentinc/cp-zookeeper:7.0.1 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + broker: + image: confluentinc/cp-server:7.0.1 + hostname: broker + container_name: broker + depends_on: + - zookeeper + ports: + - "9092:9092" + - "9101:9101" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_JMX_PORT: 9101 + KAFKA_JMX_HOSTNAME: localhost + KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092 + CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 + CONFLUENT_METRICS_ENABLE: 'true' + CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' + + schema-registry: + image: confluentinc/cp-schema-registry:7.0.1 + hostname: schema-registry + container_name: schema-registry + depends_on: + - broker + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 + + connect: + image: cnfldemos/cp-server-connect-datagen:0.5.0-6.2.0 + hostname: connect + container_name: connect + depends_on: + - broker + - schema-registry + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + # CLASSPATH required due to CC-2422 + CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.0.1.jar + CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components" + CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + + control-center: + image: confluentinc/cp-enterprise-control-center:7.0.1 + hostname: control-center + container_name: control-center + depends_on: + - broker + - schema-registry + - connect + - ksqldb-server + ports: + - "9021:9021" + environment: + CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092' + CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083' + CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088" + CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088" + CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + CONTROL_CENTER_REPLICATION_FACTOR: 1 + CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 + CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 + CONFLUENT_METRICS_TOPIC_REPLICATION: 1 + PORT: 9021 + + ksqldb-server: + image: confluentinc/cp-ksqldb-server:7.0.1 + hostname: ksqldb-server + container_name: ksqldb-server + depends_on: + - broker + - connect + ports: + - "8088:8088" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_BOOTSTRAP_SERVERS: "broker:29092" + KSQL_HOST_NAME: ksqldb-server + KSQL_LISTENERS: "http://0.0.0.0:8088" + KSQL_CACHE_MAX_BYTES_BUFFERING: 0 + KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + KSQL_KSQL_CONNECT_URL: "http://connect:8083" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1 + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true' + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' + + ksqldb-cli: + image: confluentinc/cp-ksqldb-cli:7.0.1 + container_name: ksqldb-cli + depends_on: + - broker + - connect + - ksqldb-server + entrypoint: /bin/sh + tty: true + + ksql-datagen: + image: confluentinc/ksqldb-examples:7.0.1 + hostname: ksql-datagen + container_name: ksql-datagen + depends_on: + - ksqldb-server + - broker + - schema-registry + - connect + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b broker:29092 1 40 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 40 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 11 && \ + tail -f /dev/null'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + STREAMS_BOOTSTRAP_SERVERS: broker:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + + rest-proxy: + image: confluentinc/cp-kafka-rest:7.0.1 + depends_on: + - broker + - schema-registry + ports: + - 8082:8082 + hostname: rest-proxy + container_name: rest-proxy + environment: + KAFKA_REST_HOST_NAME: rest-proxy + KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' + KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" + KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' \ No newline at end of file diff --git a/plugin/plugins/kafka/kafka.go b/plugin/plugins/kafka/kafka.go new file mode 100644 index 000000000000..a5911b03d782 --- /dev/null +++ b/plugin/plugins/kafka/kafka.go @@ -0,0 +1,166 @@ +package file + +import ( + "errors" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "strings" + "sync" + + "github.com/spf13/cast" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// Plugin name and version +const ( + // PLUGIN_NAME is the name for this streaming service plugin + PLUGIN_NAME = "kafka" + + // PLUGIN_VERSION is the version for this streaming service plugin + PLUGIN_VERSION = "0.0.1" +) + +// TOML configuration parameter keys +const ( + // TOPIC_PREFIX_PARAM is the Kafka topic where events will be streamed to + TOPIC_PREFIX_PARAM = "topic_prefix" + + // FLUSH_TIMEOUT_MS_PARAM is the timeout setting passed to the producer.Flush(timeoutMs) + FLUSH_TIMEOUT_MS_PARAM = "flush_timeout_ms" + + // PRODUCER_CONFIG_PARAM is a map of the Kafka Producer configuration properties + PRODUCER_CONFIG_PARAM = "producer" + + // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service + KEYS_PARAM = "keys" + + // HALT_APP_ON_DELIVERY_ERROR whether or not to halt the application when plugin fails to deliver message(s) + HALT_APP_ON_DELIVERY_ERROR = "halt_app_on_delivery_error" +) + +// Plugins is the exported symbol for loading this plugin +var Plugins = []plugin.Plugin{ + &streamingServicePlugin{}, +} + +type streamingServicePlugin struct { + kss *service.KafkaStreamingService + opts serverTypes.AppOptions +} + +var _ plugin.StateStreamingPlugin = (*streamingServicePlugin)(nil) + +// Name satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Name() string { + return PLUGIN_NAME +} + +// Version satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Version() string { + return PLUGIN_VERSION +} + +// Init satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { + ssp.opts = env + return nil +} + +// Register satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Register( + bApp *baseapp.BaseApp, + marshaller codec.BinaryCodec, + keys map[string]*types.KVStoreKey, +) error { + // load all the params required for this plugin from the provided AppOptions + tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) + topicPrefix := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, TOPIC_PREFIX_PARAM))) + flushTimeoutMs := cast.ToInt(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, FLUSH_TIMEOUT_MS_PARAM))) + haltAppOnDeliveryError := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, HALT_APP_ON_DELIVERY_ERROR))) + producerConfig := cast.ToStringMap(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM))) + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) + var exposeStoreKeys []types.StoreKey + + if len(exposeKeyStrings) > 0 { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrings)) + for _, keyStr := range exposeKeyStrings { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } else { // if none are specified, we expose all the keys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + + // Validate minimum producer config properties + producerConfigKey := fmt.Sprintf("%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM) + + if len(producerConfig) == 0 { + m := fmt.Sprintf("Failed to register plugin. Empty properties for '%s': "+ + "client will not be able to connect to Kafka cluster", producerConfigKey) + return errors.New(m) + } else { + bootstrapServers := cast.ToString(producerConfig["bootstrap_servers"]) + if len(bootstrapServers) == 0 { + m := fmt.Sprintf("Failed to register plugin. No \"%s.%s\" configured:"+ + " client will not be able to connect to Kafka cluster", producerConfigKey, "bootstrap_servers") + return errors.New(m) + } + if strings.TrimSpace(bootstrapServers) == "" { + m := fmt.Sprintf("Failed to register plugin. Empty \"%s.%s\" configured:"+ + " client will not be able to connect to Kafka cluster", producerConfigKey, "bootstrap_servers") + return errors.New(m) + } + } + + // load producer config into a kafka.ConfigMap + producerConfigMap := kafka.ConfigMap{} + for key, element := range producerConfig { + key = strings.ReplaceAll(key, "_", ".") + if err := producerConfigMap.SetKey(key, element); err != nil { + return err + } + if !haltAppOnDeliveryError { + // disable delivery reports when operating in fire-and-forget fashion + if err := producerConfigMap.SetKey("go.delivery.reports", false); err != nil { + return err + } + } + } + + var err error + ssp.kss, err = service.NewKafkaStreamingService( + producerConfigMap, + topicPrefix, + flushTimeoutMs, + exposeStoreKeys, + marshaller, + haltAppOnDeliveryError, + ) + if err != nil { + return err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(ssp.kss) + return nil +} + +// Start satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Start(wg *sync.WaitGroup) error { + return ssp.kss.Stream(wg) +} + +// Close satisfies io.Closer +func (ssp *streamingServicePlugin) Close() error { + return ssp.kss.Close() +} diff --git a/plugin/plugins/kafka/proto/msg_key.proto b/plugin/plugins/kafka/proto/msg_key.proto new file mode 100644 index 000000000000..bf91edb4e4a6 --- /dev/null +++ b/plugin/plugins/kafka/proto/msg_key.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +option go_package = "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service"; + +option java_multiple_files = true; +option java_package = "network.cosmos.listening.plugins.kafka.service"; + +message MsgKey { + int64 block_height = 1 [json_name = "block_height"]; + enum Event { + BEGIN_BLOCK = 0; + END_BLOCK = 1; + DELIVER_TX = 2; + } + Event event = 2; + int64 event_id = 3 [json_name = "event_id"]; + enum EventType { + REQUEST = 0; + RESPONSE = 1; + STATE_CHANGE = 2; + } + EventType event_type = 4 [json_name = "event_type"]; + int64 event_type_id = 5 [json_name = "event_type_id"]; +} \ No newline at end of file diff --git a/plugin/plugins/kafka/service/msg_key.pb.go b/plugin/plugins/kafka/service/msg_key.pb.go new file mode 100644 index 000000000000..dddfe588aefd --- /dev/null +++ b/plugin/plugins/kafka/service/msg_key.pb.go @@ -0,0 +1,300 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: msg_key.proto + +package service + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MsgKey_Event int32 + +const ( + MsgKey_BEGIN_BLOCK MsgKey_Event = 0 + MsgKey_END_BLOCK MsgKey_Event = 1 + MsgKey_DELIVER_TX MsgKey_Event = 2 +) + +// Enum value maps for MsgKey_Event. +var ( + MsgKey_Event_name = map[int32]string{ + 0: "BEGIN_BLOCK", + 1: "END_BLOCK", + 2: "DELIVER_TX", + } + MsgKey_Event_value = map[string]int32{ + "BEGIN_BLOCK": 0, + "END_BLOCK": 1, + "DELIVER_TX": 2, + } +) + +func (x MsgKey_Event) Enum() *MsgKey_Event { + p := new(MsgKey_Event) + *p = x + return p +} + +func (x MsgKey_Event) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MsgKey_Event) Descriptor() protoreflect.EnumDescriptor { + return file_msg_key_proto_enumTypes[0].Descriptor() +} + +func (MsgKey_Event) Type() protoreflect.EnumType { + return &file_msg_key_proto_enumTypes[0] +} + +func (x MsgKey_Event) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MsgKey_Event.Descriptor instead. +func (MsgKey_Event) EnumDescriptor() ([]byte, []int) { + return file_msg_key_proto_rawDescGZIP(), []int{0, 0} +} + +type MsgKey_EventType int32 + +const ( + MsgKey_REQUEST MsgKey_EventType = 0 + MsgKey_RESPONSE MsgKey_EventType = 1 + MsgKey_STATE_CHANGE MsgKey_EventType = 2 +) + +// Enum value maps for MsgKey_EventType. +var ( + MsgKey_EventType_name = map[int32]string{ + 0: "REQUEST", + 1: "RESPONSE", + 2: "STATE_CHANGE", + } + MsgKey_EventType_value = map[string]int32{ + "REQUEST": 0, + "RESPONSE": 1, + "STATE_CHANGE": 2, + } +) + +func (x MsgKey_EventType) Enum() *MsgKey_EventType { + p := new(MsgKey_EventType) + *p = x + return p +} + +func (x MsgKey_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MsgKey_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_msg_key_proto_enumTypes[1].Descriptor() +} + +func (MsgKey_EventType) Type() protoreflect.EnumType { + return &file_msg_key_proto_enumTypes[1] +} + +func (x MsgKey_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MsgKey_EventType.Descriptor instead. +func (MsgKey_EventType) EnumDescriptor() ([]byte, []int) { + return file_msg_key_proto_rawDescGZIP(), []int{0, 1} +} + +type MsgKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,proto3" json:"block_height,omitempty"` + Event MsgKey_Event `protobuf:"varint,2,opt,name=event,proto3,enum=MsgKey_Event" json:"event,omitempty"` + EventId int64 `protobuf:"varint,3,opt,name=event_id,proto3" json:"event_id,omitempty"` + EventType MsgKey_EventType `protobuf:"varint,4,opt,name=event_type,proto3,enum=MsgKey_EventType" json:"event_type,omitempty"` + EventTypeId int64 `protobuf:"varint,5,opt,name=event_type_id,proto3" json:"event_type_id,omitempty"` +} + +func (x *MsgKey) Reset() { + *x = MsgKey{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_key_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MsgKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MsgKey) ProtoMessage() {} + +func (x *MsgKey) ProtoReflect() protoreflect.Message { + mi := &file_msg_key_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MsgKey.ProtoReflect.Descriptor instead. +func (*MsgKey) Descriptor() ([]byte, []int) { + return file_msg_key_proto_rawDescGZIP(), []int{0} +} + +func (x *MsgKey) GetBlockHeight() int64 { + if x != nil { + return x.BlockHeight + } + return 0 +} + +func (x *MsgKey) GetEvent() MsgKey_Event { + if x != nil { + return x.Event + } + return MsgKey_BEGIN_BLOCK +} + +func (x *MsgKey) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *MsgKey) GetEventType() MsgKey_EventType { + if x != nil { + return x.EventType + } + return MsgKey_REQUEST +} + +func (x *MsgKey) GetEventTypeId() int64 { + if x != nil { + return x.EventTypeId + } + return 0 +} + +var File_msg_key_proto protoreflect.FileDescriptor + +var file_msg_key_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x6d, 0x73, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xb9, 0x02, 0x0a, 0x06, 0x4d, 0x73, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x23, + 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, + 0x4d, 0x73, 0x67, 0x4b, 0x65, 0x79, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x12, + 0x31, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x4d, 0x73, 0x67, 0x4b, 0x65, 0x79, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x22, 0x37, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x45, 0x4e, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x5f, 0x54, 0x58, 0x10, + 0x02, 0x22, 0x38, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, + 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x52, + 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x42, 0x65, 0x0a, 0x26, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_msg_key_proto_rawDescOnce sync.Once + file_msg_key_proto_rawDescData = file_msg_key_proto_rawDesc +) + +func file_msg_key_proto_rawDescGZIP() []byte { + file_msg_key_proto_rawDescOnce.Do(func() { + file_msg_key_proto_rawDescData = protoimpl.X.CompressGZIP(file_msg_key_proto_rawDescData) + }) + return file_msg_key_proto_rawDescData +} + +var file_msg_key_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_msg_key_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_msg_key_proto_goTypes = []interface{}{ + (MsgKey_Event)(0), // 0: MsgKey.Event + (MsgKey_EventType)(0), // 1: MsgKey.EventType + (*MsgKey)(nil), // 2: MsgKey +} +var file_msg_key_proto_depIdxs = []int32{ + 0, // 0: MsgKey.event:type_name -> MsgKey.Event + 1, // 1: MsgKey.event_type:type_name -> MsgKey.EventType + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_msg_key_proto_init() } +func file_msg_key_proto_init() { + if File_msg_key_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_msg_key_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MsgKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_msg_key_proto_rawDesc, + NumEnums: 2, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_msg_key_proto_goTypes, + DependencyIndexes: file_msg_key_proto_depIdxs, + EnumInfos: file_msg_key_proto_enumTypes, + MessageInfos: file_msg_key_proto_msgTypes, + }.Build() + File_msg_key_proto = out.File + file_msg_key_proto_rawDesc = nil + file_msg_key_proto_goTypes = nil + file_msg_key_proto_depIdxs = nil +} diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go new file mode 100644 index 000000000000..94994cd22284 --- /dev/null +++ b/plugin/plugins/kafka/service/service.go @@ -0,0 +1,407 @@ +package service + +import ( + "errors" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" + "google.golang.org/protobuf/proto" + "sync" +) + +/* +This service writes all messages to a single topicPrefix with only one partition to maintain the order of messages. + +The naming schema and data format for the messages this service writes out to Kafka is as such: + +After every `BeginBlockEvent` request a new message key prefix is created with the name `block-{N}-begin`, where N is the block number. +All subsequent state changes are written out to this topicPrefix until the first `DeliverTxEvent` request is received. At the head of these files, +the length-prefixed protobuf encoded `BeginBlockEvent` request is written, and the response is written at the tail. + +After every `DeliverTxEvent` request a new file is created with the name `block-{N}-tx-{M}` where N is the block number and M +is the tx number in the block (i.e. 0, 1, 2...). All subsequent state changes are written out to this file until the next +`DeliverTxEvent` request is received or an `EndBlockEvent` request is received. At the head of these files, the length-prefixed protobuf +encoded `DeliverTxEvent` request is written, and the response is written at the tail. + +After every `EndBlockEvent` request a new file is created with the name `block-{N}-end`, where N is the block number. All +subsequent state changes are written out to this file until the next `BeginBlockEvent` request is received. At the head of these files, +the length-prefixed protobuf encoded `EndBlockEvent` request is written, and the response is written at the tail. +*/ + +// Event Kafka message key enum types for listen events. +type Event int64 +const ( + BEGIN_BLOCK Event = iota + END_BLOCK + DELIVER_TX +) + +// EventType Kafka message key enum types for the event types. +type EventType int64 +const ( + REQUEST EventType = iota + RESPONSE + STATE_CHANGE +) + +// EventTypeValueTypeTopic Kafka topic name enum types +type EventTypeValueTypeTopic string +const ( + BeginBlockReqTopic EventTypeValueTypeTopic = "begin-block-req" + BeginBlockResTopic = "begin-block-res" + EndBlockReqTopic = "end-block-req" + EndBlockResTopic = "end-block-res" + DeliverTxReqTopic = "deliver-tx-req" + DeliverTxResTopic = "deliver-tx-res" + StateChangeTopic = "state-change" +) + +// MsgKeyFtm Kafka message composite key format enum types +const ( + MsgKeyFtm = `{"block_height":%d,"event":%d,"event_id":%d,"event_type":%d,"event_type_id":%d}` +) + +var _ baseapp.StreamingService = (*KafkaStreamingService)(nil) + +// KafkaStreamingService is a concrete implementation of streaming.Service that writes state changes out to Kafka +type KafkaStreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + topicPrefix string // topicPrefix prefix name + producer *kafka.Producer // the producer instance that will be used to send messages to Kafka + flushTimeoutMs int // the time to wait for outstanding messages and requests to complete delivery (milliseconds) + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + deliveryChan chan kafka.Event // Kafka producer delivery report channel + haltAppOnDeliveryError bool // true if the app should be halted on streaming errors, false otherwise +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener +// everytime we begin writing to Kafka topic(s) +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewKafkaStreamingService creates a new KafkaStreamingService +func NewKafkaStreamingService( + producerConfig kafka.ConfigMap, + topicPrefix string, + flushTimeoutMs int, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + haltAppOnDeliveryError bool, +) (*KafkaStreamingService, error) { + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + // Initialize the producer and connect to Kafka cluster + p, err := kafka.NewProducer(&producerConfig) + if err != nil { + return nil, err + } + + kss := &KafkaStreamingService{ + listeners: listeners, + srcChan: listenChan, + topicPrefix: topicPrefix, + producer: p, + flushTimeoutMs: flushTimeoutMs, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + haltAppOnDeliveryError: haltAppOnDeliveryError, + } + + // setup private delivery channel to listen for delivery errors. + if haltAppOnDeliveryError { + kss.deliveryChan = make(chan kafka.Event) + } + + return kss, nil +} + +// Listeners returns the KafkaStreamingService's underlying WriteListeners, use for registering them with the BaseApp +func (kss *KafkaStreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return kss.listeners +} + +// ListenBeginBlock satisfies the Hook interface +// It writes out the received BeginBlockEvent request and response and the resulting state changes out to a Kafka topicPrefix +// as described in the above the naming schema +func (kss *KafkaStreamingService) ListenBeginBlock( + ctx sdk.Context, + req abci.RequestBeginBlock, + res abci.ResponseBeginBlock, +) error { + kss.setBeginBlock(req) + event := int64(BEGIN_BLOCK) + eventId := int64(1) + eventTypeId := int64(1) + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(REQUEST), + EventTypeId: eventTypeId, + } + + // write req + if err := kss.writeAsJsonToKafka(ctx, string(BeginBlockReqTopic), key, &req); err != nil { + return err + } + + // write state changes + if err := kss.writeStateChange(ctx, event, eventId); err != nil { + return err + } + + // write res + key.EventType = MsgKey_EventType(RESPONSE) + if err := kss.writeAsJsonToKafka(ctx, BeginBlockResTopic, key, &res); err != nil { + return err + } + + return nil +} + +func (kss *KafkaStreamingService) setBeginBlock(req abci.RequestBeginBlock) { + kss.currentBlockNumber = req.GetHeader().Height + kss.currentTxIndex = 0 +} + +// ListenDeliverTx satisfies the Hook interface +// It writes out the received DeliverTxEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (kss *KafkaStreamingService) ListenDeliverTx( + ctx sdk.Context, + req abci.RequestDeliverTx, + res abci.ResponseDeliverTx, +) error { + event := int64(DELIVER_TX) + eventId := kss.getDeliverTxId() + eventTypeId := int64(1) + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(REQUEST), + EventTypeId: eventTypeId, + } + + // write req + if err := kss.writeAsJsonToKafka(ctx, DeliverTxReqTopic, key, &req); err != nil { + return err + } + + // write state changes + if err := kss.writeStateChange(ctx, event, eventId); err != nil { + return err + } + + // write res + key.EventType = MsgKey_EventType(RESPONSE) + if err := kss.writeAsJsonToKafka(ctx, DeliverTxResTopic, key, &res); err != nil { + return err + } + + return nil +} + +func (kss *KafkaStreamingService) getDeliverTxId() int64 { + kss.currentTxIndex++ + return kss.currentTxIndex +} + +// ListenEndBlock satisfies the Hook interface +// It writes out the received EndBlockEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (kss *KafkaStreamingService) ListenEndBlock( + ctx sdk.Context, + req abci.RequestEndBlock, + res abci.ResponseEndBlock, +) error { + event := int64(END_BLOCK) + eventId := int64(1) + eventTypeId := int64(1) + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(REQUEST), + EventTypeId: eventTypeId, + } + + // write req + if err := kss.writeAsJsonToKafka(ctx, EndBlockReqTopic, key, &req); err != nil { + return err + } + + // write state changes + if err := kss.writeStateChange(ctx, event, eventId); err != nil { + return err + } + + // write res + key.EventType = MsgKey_EventType(RESPONSE) + if err := kss.writeAsJsonToKafka(ctx, EndBlockResTopic, key, &res); err != nil { + return err + } + + return nil +} + +// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails +// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will +// be replayed to all listeners when the node restarts and all successful listeners that received data +// prior to the halt will receive duplicate data. +func (kss *KafkaStreamingService) HaltAppOnDeliveryError() bool { + return kss.haltAppOnDeliveryError +} + +// Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received +// Do we need this and an intermediate writer? We could just write directly to the buffer on calls to Write +// But then we don't support a Stream interface, which could be needed for other types of streamers +func (kss *KafkaStreamingService) Stream(wg *sync.WaitGroup) error { + if kss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + kss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-kss.quitChan: + return + case by := <-kss.srcChan: + kss.stateCacheLock.Lock() + kss.stateCache = append(kss.stateCache, by) + kss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface +func (kss *KafkaStreamingService) Close() error { + kss.producer.Flush(kss.flushTimeoutMs) + close(kss.quitChan) + close(kss.deliveryChan) + kss.producer.Close() + return nil +} + +func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event int64, eventId int64) error { + // write all state changes cached for this stage to Kafka + kss.stateCacheLock.Lock() + kvPair := new(types.StoreKVPair) + for i, stateChange := range kss.stateCache { + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(STATE_CHANGE), + EventTypeId: int64(i + 1), + } + if err := kss.codec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + kss.stateCacheLock.Unlock() + return err + } + if err := kss.writeAsJsonToKafka(ctx, StateChangeTopic, key, kvPair); err != nil { + kss.stateCacheLock.Unlock() + return err + } + } + + // reset cache + kss.stateCache = nil + kss.stateCacheLock.Unlock() + + return nil +} + +func (kss *KafkaStreamingService) writeAsJsonToKafka( + ctx sdk.Context, + topic string, + msgKey *MsgKey, + msgValue codec.ProtoMarshaler, +) error { + key, err := proto.Marshal(msgKey) + if err != nil { + return err + } + value, err := kss.codec.Marshal(msgValue) + if err != nil { + return err + } + + if len(kss.topicPrefix) > 0 { + topic = fmt.Sprintf("%s-%s", kss.topicPrefix, topic) + } + + // produce message + // when `halt_app_on_delivery_error = false`, kss.deliveryChan is `nil` + // and the producer is configured with `go.delivery.reports: false` + // this means that the producer operates in a fire-and-forget mode + if err := kss.producer.Produce(&kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Key: key, + Value: value, + }, kss.deliveryChan); err != nil { + return err + } + + return kss.checkDeliveryReport(ctx) +} + +// checkDeliveryReport checks kafka.Producer delivery report for successful or failed messages +func (kss *KafkaStreamingService) checkDeliveryReport(ctx sdk.Context) error { + if kss.deliveryChan == nil { + return nil + } + + e := <-kss.deliveryChan + m := e.(*kafka.Message) + topic := *m.TopicPartition.Topic + partition := m.TopicPartition.Partition + offset := m.TopicPartition.Offset + key := string(m.Key) + topicErr := m.TopicPartition.Error + logger := ctx.Logger() + + if topicErr != nil { + logger.Error("Delivery failed: ", "topic", topic, "partition", partition, "key", key, "err", topicErr) + } else { + logger.Debug("Delivered message:", "topic", topic, "partition", partition, "offset", offset, "key", key) + } + + return topicErr +} \ No newline at end of file diff --git a/plugin/plugins/kafka/service/service_test.go b/plugin/plugins/kafka/service/service_test.go new file mode 100644 index 000000000000..2305c9bc0c90 --- /dev/null +++ b/plugin/plugins/kafka/service/service_test.go @@ -0,0 +1,560 @@ +package service + +import ( + "context" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/tendermint/tendermint/libs/log" + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" +) + +var ( + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) + testStreamingService *KafkaStreamingService + testListener1, testListener2 types.WriteListener + testingCtx sdk.Context + + // test abci message types + mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + testBeginBlockReq = abci.RequestBeginBlock{ + Header: types1.Header{ + Height: 1, + }, + ByzantineValidators: []abci.Evidence{}, + Hash: mockHash, + LastCommitInfo: abci.LastCommitInfo{ + Round: 1, + Votes: []abci.VoteInfo{}, + }, + } + testBeginBlockRes = abci.ResponseBeginBlock{ + Events: []abci.Event{ + { + Type: "testEventType1", + }, + { + Type: "testEventType2", + }, + }, + } + testEndBlockReq = abci.RequestEndBlock{ + Height: 1, + } + testEndBlockRes = abci.ResponseEndBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &types1.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + } + mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} + testDeliverTxReq1 = abci.RequestDeliverTx{ + Tx: mockTxBytes1, + } + mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} + testDeliverTxReq2 = abci.RequestDeliverTx{ + Tx: mockTxBytes2, + } + mockTxResponseData1 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes1 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData1, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + mockTxResponseData2 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes2 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData2, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + + // mock store keys + mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") + mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") + + // Kafka stuff + bootstrapServers = "localhost:9092" + topicPrefix = "block" + flushTimeoutMs = 15000 + topics = []string{ + string(BeginBlockReqTopic), + BeginBlockResTopic, + DeliverTxReqTopic, + DeliverTxResTopic, + EndBlockReqTopic, + EndBlockResTopic, + StateChangeTopic, + } + + producerConfig = kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + "client.id": "testKafkaStreamService", + "security.protocol": "PLAINTEXT", + "enable.idempotence": "true", + // Best practice for Kafka producer to prevent data loss + "acks": "all", + } + + // mock state changes + mockKey1 = []byte{1, 2, 3} + mockValue1 = []byte{3, 2, 1} + mockKey2 = []byte{2, 3, 4} + mockValue2 = []byte{4, 3, 2} + mockKey3 = []byte{3, 4, 5} + mockValue3 = []byte{5, 4, 3} +) + +func TestIntermediateWriter(t *testing.T) { + outChan := make(chan []byte, 0) + iw := NewIntermediateWriter(outChan) + require.IsType(t, &IntermediateWriter{}, iw) + testBytes := []byte{1, 2, 3, 4, 5} + var length int + var err error + waitChan := make(chan struct{}, 0) + go func() { + length, err = iw.Write(testBytes) + waitChan <- struct{}{} + }() + receivedBytes := <-outChan + <-waitChan + require.Equal(t, len(testBytes), length) + require.Equal(t, testBytes, receivedBytes) + require.Nil(t, err) +} + +// change this to write to in-memory io.Writer (e.g. bytes.Buffer) +func TestKafkaStreamingService(t *testing.T) { + testingCtx = sdk.NewContext(nil, types1.Header{}, false, log.TestingLogger()) + testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} + kss, err := NewKafkaStreamingService(producerConfig, topicPrefix, flushTimeoutMs, testKeys, testMarshaller, true) + testStreamingService = kss + require.Nil(t, err) + require.IsType(t, &KafkaStreamingService{}, testStreamingService) + require.Equal(t, topicPrefix, testStreamingService.topicPrefix) + require.Equal(t, testMarshaller, testStreamingService.codec) + deleteTopics(t, topics, bootstrapServers) + createTopics(t, topics, bootstrapServers) + testListener1 = testStreamingService.listeners[mockStoreKey1][0] + testListener2 = testStreamingService.listeners[mockStoreKey2][0] + wg := new(sync.WaitGroup) + testStreamingService.Stream(wg) + testListenBeginBlock(t) + testListenDeliverTx1(t) + testListenDeliverTx2(t) + testListenEndBlock(t) + testStreamingService.Close() + wg.Wait() +} + +func testListenBeginBlock(t *testing.T) { + expectedBeginBlockReqBytes, err := testMarshaller.MarshalJSON(&testBeginBlockReq) + require.Nil(t, err) + expectedBeginBlockResBytes, err := testMarshaller.MarshalJSON(&testBeginBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenBeginBlock(testingCtx, testBeginBlockReq, testBeginBlockRes) + require.Nil(t, err) + + // consume stored messages + topics := []string{string(BeginBlockReqTopic), BeginBlockResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedBeginBlockReqBytes, getMessageValueForTopic(msgs, string(BeginBlockReqTopic), 0)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 0)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 1)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 2)) + require.Equal(t, expectedBeginBlockResBytes, getMessageValueForTopic(msgs, BeginBlockResTopic, 0)) +} + +func testListenDeliverTx1(t *testing.T) { + expectedDeliverTxReq1Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxReq1) + require.Nil(t, err) + expectedDeliverTxRes1Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxRes1) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(testingCtx, testDeliverTxReq1, testDeliverTxRes1) + require.Nil(t, err) + + // consume stored messages + topics := []string{DeliverTxReqTopic, DeliverTxResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedDeliverTxReq1Bytes, getMessageValueForTopic(msgs, DeliverTxReqTopic, 0)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 3)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 4)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 5)) + require.Equal(t, expectedDeliverTxRes1Bytes, getMessageValueForTopic(msgs, DeliverTxResTopic, 0)) +} + +func testListenDeliverTx2(t *testing.T) { + expectedDeliverTxReq2Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxReq2) + require.Nil(t, err) + expectedDeliverTxRes2Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxRes2) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(testingCtx, testDeliverTxReq2, testDeliverTxRes2) + require.Nil(t, err) + + // consume stored messages + topics := []string{DeliverTxReqTopic, DeliverTxResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedDeliverTxReq2Bytes, getMessageValueForTopic(msgs, DeliverTxReqTopic, 1)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 6)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 7)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 8)) + require.Equal(t, expectedDeliverTxRes2Bytes, getMessageValueForTopic(msgs, DeliverTxResTopic, 1)) +} + +func testListenEndBlock(t *testing.T) { + expectedEndBlockReqBytes, err := testMarshaller.MarshalJSON(&testEndBlockReq) + require.Nil(t, err) + expectedEndBlockResBytes, err := testMarshaller.MarshalJSON(&testEndBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenEndBlock(testingCtx, testEndBlockReq, testEndBlockRes) + require.Nil(t, err) + + // consume stored messages + topics := []string{EndBlockReqTopic, EndBlockResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedEndBlockReqBytes, getMessageValueForTopic(msgs, EndBlockReqTopic, 0)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 9)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 10)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 11)) + require.Equal(t, expectedEndBlockResBytes, getMessageValueForTopic(msgs, EndBlockResTopic, 0)) +} + +func getMessageValueForTopic(msgs []*kafka.Message, topic string, offset int64) []byte { + topic = fmt.Sprintf("%s-%s", topicPrefix, topic) + for _, m := range msgs { + t := *m.TopicPartition.Topic + o := int64(m.TopicPartition.Offset) + if t == topic && o == offset { + return m.Value + } + } + return []byte{0} +} + +func poll(bootstrapServers string, topics []string, expectedMsgCnt int) ([]*kafka.Message, error) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + // Avoid connecting to IPv6 brokers: + // This is needed for the ErrAllBrokersDown show-case below + // when using localhost brokers on OSX, since the OSX resolver + // will return the IPv6 addresses first. + // You typically don't need to specify this configuration property. + "broker.address.family": "v4", + "group.id": fmt.Sprintf("testGroup-%d", os.Process{}.Pid), + "auto.offset.reset": "earliest"}) + + if err != nil { + panic(fmt.Sprintf("Failed to create consumer: %s\n", err)) + } + + fmt.Printf("Created Consumer %v\n", c) + + var _topics []string + for _, t := range topics { + _topics = append(_topics, fmt.Sprintf("%s-%s", topicPrefix, t)) + } + + if err = c.SubscribeTopics(_topics, nil); err != nil { + panic(fmt.Sprintf("Failed to subscribe to consumer: %s\n", err)) + } + + msgs := make([]*kafka.Message, 0) + + run := true + + for run { + select { + case sig := <-sigchan: + fmt.Printf("Caught signal %v: terminating\n", sig) + run = false + default: + ev := c.Poll(100) + if ev == nil { + continue + } + + switch e := ev.(type) { + case *kafka.Message: + msgs = append(msgs, e) + case kafka.Error: + // Errors should generally be considered + // informational, the client will try to + // automatically recover. + // But in this example we choose to terminate + // the application if all brokers are down. + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + if e.Code() == kafka.ErrAllBrokersDown { + run = false + } + default: + fmt.Printf("Ignored %v\n", e) + + // Workaround so our tests pass. + // Wait for the expected messages to be delivered before closing the consumer + if expectedMsgCnt == len(msgs) { + run = false + } + } + } + } + + fmt.Printf("Closing consumer\n") + if err := c.Close(); err != nil { + return nil, err + } + + return msgs, nil +} + +func createTopics(t *testing.T, topics []string, bootstrapServers string) { + + adminClient, err := kafka.NewAdminClient(&kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + "broker.version.fallback": "0.10.0.0", + "api.version.fallback.ms": 0, + }) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + t.Fail() + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create topics on cluster. + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDuration, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("time.ParseDuration(60s)") + t.Fail() + } + + var _topics []kafka.TopicSpecification + for _, s := range topics { + _topics = append(_topics, + kafka.TopicSpecification{ + Topic: fmt.Sprintf("%s-%s", topicPrefix, s), + NumPartitions: 1, + ReplicationFactor: 1}) + } + results, err := adminClient.CreateTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDuration)) + if err != nil { + fmt.Printf("Problem during the topicPrefix creation: %v\n", err) + t.Fail() + } + + // Check for specific topicPrefix errors + for _, result := range results { + if result.Error.Code() != kafka.ErrNoError && + result.Error.Code() != kafka.ErrTopicAlreadyExists { + fmt.Printf("Topic creation failed for %s: %v", + result.Topic, result.Error.String()) + t.Fail() + } + } + + adminClient.Close() +} + +func deleteTopics(t *testing.T, topics []string, bootstrapServers string) { + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + t.Fail() + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Delete topics on cluster + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("ParseDuration(60s)") + t.Fail() + } + + var _topics []string + for _, s := range topics { + _topics = append(_topics, fmt.Sprintf("%s-%s", topicPrefix, s)) + } + + results, err := a.DeleteTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDur)) + if err != nil { + fmt.Printf("Failed to delete topics: %v\n", err) + t.Fail() + } + + // Print results + for _, result := range results { + fmt.Printf("%s\n", result) + } + + a.Close() +} diff --git a/plugin/plugins/trace/README.md b/plugin/plugins/trace/README.md new file mode 100644 index 000000000000..aec40cfe1450 --- /dev/null +++ b/plugin/plugins/trace/README.md @@ -0,0 +1,59 @@ +# Trace Plugin + +This plugin demonstrates how to listen to state changes of individual `KVStores` as described in [ADR-038 State Listening](https://github.com/vulcanize/cosmos-sdk/blob/adr038_plugin_proposal/docs/architecture/adr-038-state-listening.md). + + + + +- [Running the plugin](#running-the-plugin) +- [Plugin design](#plugin-design) + + +## Running the plugin + +The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload_list` for how to enable and disable default plugins. For lighter unit test run: `./plugin/plugins/kafka/service/service_test.go`. + +1. Copy the content below to `~/app.toml`. + + ``` + # app.toml + + . . . + + ############################################################################### + ### Plugin system configuration ### + ############################################################################### + + [plugins] + + # turn the plugin system, as a whole, on or off + on = true + + # List of plugin names to enable from the plugin/plugins/* + enabled = ["kafka"] + + # The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins + dir = "" + + ############################################################################### + ### Trace Plugin configuration ### + ############################################################################### + + # The specific parameters for the trace streaming service plugin + [plugins.streaming.trace] + + # List of store keys we want to expose for this streaming service. + keys = [] + + # In addition to block event info, print the data to stdout as well. + print_data_to_stdout = false + + # Whether or not to halt the application when plugin fails to deliver message(s). + halt_app_on_delivery_error = false + ``` + +2. Run `make test-sim-nondeterminism-state-listening-trace` and wait for the tests to finish. + + +## Plugin design +The plugin is an example implementation of [ADR-038 State Listening](https://docs.cosmos.network/master/architecture/adr-038-state-listening.html) where state change events get logged at `DEBUG` level. diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go new file mode 100644 index 000000000000..a299e71cd338 --- /dev/null +++ b/plugin/plugins/trace/service/service.go @@ -0,0 +1,282 @@ +package service + +import ( + "errors" + "fmt" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" + "sync" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ baseapp.StreamingService = (*TraceStreamingService)(nil) + +// Event message key enum types for listen events. +type Event string +const ( + BeginBlockEvent Event = "BEGIN_BLOCK" + EndBlockEvent = "END_BLOCK" + DeliverTxEvent = "DELIVER_TX" +) + +// EventType message key enum types for the event types. +type EventType string +const ( + RequestEventType EventType = "REQUEST" + ResponseEventType = "RESPONSE" + StateChangeEventType = "STATE_CHANGE" +) + +// LogMsgFmt message output format +const ( + LogMsgFmt = `block_height:%d => event:%s => event_id:%d => event_type:%s => event_type_id:%d` +) + +// TraceStreamingService is a concrete implementation of streaming.Service that writes state changes to log file. +type TraceStreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + printDataToStdout bool // Print types.StoreKVPair data stored in each event to stdout. + haltAppOnDeliveryError bool // true if the app should be halted on streaming errors, false otherwise +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener +// everytime we begin writing +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewTraceStreamingService creates a new TraceStreamingService for the provided +// storeKeys, BinaryCodec and deliverBlockWaitLimit (in milliseconds) +func NewTraceStreamingService( + storeKeys []types.StoreKey, + c codec.BinaryCodec, + printDataToStdout bool, + haltAppOnDeliveryError bool, +) (*TraceStreamingService, error) { + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + + tss := &TraceStreamingService{ + listeners: listeners, + srcChan: listenChan, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + printDataToStdout: printDataToStdout, + haltAppOnDeliveryError: haltAppOnDeliveryError, + } + + return tss, nil +} + +// Listeners returns the TraceStreamingService's underlying WriteListeners, use for registering them with the BaseApp +func (tss *TraceStreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return tss.listeners +} + +// ListenBeginBlock satisfies the Hook interface +// It writes out the received BeginBlockEvent request and response and the resulting state changes to the log +func (tss *TraceStreamingService) ListenBeginBlock( + ctx sdk.Context, + req abci.RequestBeginBlock, + res abci.ResponseBeginBlock, +) error { + tss.setBeginBlock(req) + eventId := int64(1) + eventTypeId := 1 + + // write req + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, BeginBlockEvent, eventId, RequestEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &req); err != nil { + return err + } + + // write state changes + if err := tss.writeStateChange(ctx, string(BeginBlockEvent), eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, EndBlockEvent, 1, ResponseEventType, 1) + if err := tss.writeEventReqRes(ctx, key, &res); err != nil { + return err + } + + return nil +} + +func (tss *TraceStreamingService) setBeginBlock(req abci.RequestBeginBlock) { + tss.currentBlockNumber = req.GetHeader().Height + // reset on new block + tss.currentTxIndex = 0 +} + +// ListenDeliverTx satisfies the Hook interface +// It writes out the received DeliverTxEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (tss *TraceStreamingService) ListenDeliverTx( + ctx sdk.Context, + req abci.RequestDeliverTx, + res abci.ResponseDeliverTx, +) error { + eventId := tss.getDeliverTxId() + eventTypeId := 1 + + // write req + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, DeliverTxEvent, eventId, RequestEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &req); err != nil { + return err + } + + // write state changes + if err := tss.writeStateChange(ctx, DeliverTxEvent, eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, DeliverTxEvent, eventId, ResponseEventType, 1) + if err := tss.writeEventReqRes(ctx, key, &res); err != nil { + return err + } + + return nil +} + +func (tss *TraceStreamingService) getDeliverTxId() int64 { + tss.currentTxIndex++ + return tss.currentTxIndex +} + +// ListenEndBlock satisfies the Hook interface +// It writes out the received EndBlockEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (tss *TraceStreamingService) ListenEndBlock( + ctx sdk.Context, + req abci.RequestEndBlock, + res abci.ResponseEndBlock, +) error { + eventId := int64(1) + eventTypeId := 1 + + // write req + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, EndBlockEvent, eventId, RequestEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &req); err != nil { + return err + } + + // write state changes + if err := tss.writeStateChange(ctx, EndBlockEvent, eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, EndBlockEvent, eventId, ResponseEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &res); err != nil { + return err + } + + return nil +} + +// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails +// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will +// be replayed to all listeners when the node restarts and all successful listeners that received data +// prior to the halt will receive duplicate data. +func (tss *TraceStreamingService) HaltAppOnDeliveryError() bool { + return tss.haltAppOnDeliveryError +} + +// Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received +// Do we need this and an intermediate writer? We could just write directly to the buffer on calls to Write +// But then we don't support a Stream interface, which could be needed for other types of streamers +func (tss *TraceStreamingService) Stream(wg *sync.WaitGroup) error { + if tss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + tss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-tss.quitChan: + return + case by := <-tss.srcChan: + tss.stateCacheLock.Lock() + tss.stateCache = append(tss.stateCache, by) + tss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface +func (tss *TraceStreamingService) Close() error { + close(tss.quitChan) + return nil +} + +func (tss *TraceStreamingService) writeStateChange(ctx sdk.Context, event string, eventId int64) error { + // write all state changes cached for this stage + tss.stateCacheLock.Lock() + kodec := tss.codec.(*codec.ProtoCodec) + kvPair := new(types.StoreKVPair) + for i, stateChange := range tss.stateCache { + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) + if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + tss.stateCacheLock.Unlock() + return err + } + if err := tss.writeEventReqRes(ctx, key, kvPair); err != nil { + tss.stateCacheLock.Unlock() + return err + } + } + + // reset cache + tss.stateCache = nil + tss.stateCacheLock.Unlock() + + return nil +} + +func (tss *TraceStreamingService) writeEventReqRes(ctx sdk.Context, key string, data proto.Message) error { + var m = fmt.Sprintf("%v => data:omitted", key) + if tss.printDataToStdout { + m = fmt.Sprintf("%v => data:%v", key, data) + } + ctx.Logger().Debug(m) + return nil +} \ No newline at end of file diff --git a/plugin/plugins/trace/service/service_test.go b/plugin/plugins/trace/service/service_test.go new file mode 100644 index 000000000000..6f8243557f06 --- /dev/null +++ b/plugin/plugins/trace/service/service_test.go @@ -0,0 +1,181 @@ +package service + +import ( + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" + "sync" + "testing" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" +) + +var ( + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) + testStreamingService *TraceStreamingService + testListener1, testListener2 types.WriteListener + emptyContext = sdk.Context{} + loggerContext sdk.Context + + // test abci message types + mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + testBeginBlockReq = abci.RequestBeginBlock{ + Header: types1.Header{ + Height: 1, + }, + ByzantineValidators: []abci.Evidence{}, + Hash: mockHash, + LastCommitInfo: abci.LastCommitInfo{ + Round: 1, + Votes: []abci.VoteInfo{}, + }, + } + testBeginBlockRes = abci.ResponseBeginBlock{ + Events: []abci.Event{ + { + Type: "testEventType1", + }, + { + Type: "testEventType2", + }, + }, + } + testEndBlockReq = abci.RequestEndBlock{ + Height: 1, + } + testEndBlockRes = abci.ResponseEndBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &types1.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + } + mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} + testDeliverTxReq1 = abci.RequestDeliverTx{ + Tx: mockTxBytes1, + } + mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} + testDeliverTxReq2 = abci.RequestDeliverTx{ + Tx: mockTxBytes2, + } + mockTxResponseData1 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes1 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData1, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + mockTxResponseData2 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes2 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData2, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + + // mock store keys + mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") + mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") + + // mock state changes + mockKey1 = []byte{1, 2, 3} + mockValue1 = []byte{3, 2, 1} + mockKey2 = []byte{2, 3, 4} + mockValue2 = []byte{4, 3, 2} + mockKey3 = []byte{3, 4, 5} + mockValue3 = []byte{5, 4, 3} +) + +func TestIntermediateWriter(t *testing.T) { + outChan := make(chan []byte, 0) + iw := NewIntermediateWriter(outChan) + require.IsType(t, &IntermediateWriter{}, iw) + testBytes := []byte{1, 2, 3, 4, 5} + var length int + var err error + waitChan := make(chan struct{}, 0) + go func() { + length, err = iw.Write(testBytes) + waitChan <- struct{}{} + }() + receivedBytes := <-outChan + <-waitChan + require.Equal(t, len(testBytes), length) + require.Equal(t, testBytes, receivedBytes) + require.Nil(t, err) +} + +func TestKafkaStreamingService(t *testing.T) { + loggerContext = emptyContext.WithLogger(log.TestingLogger()) + testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} + tss, err := NewTraceStreamingService(testKeys, testMarshaller, true, false) + testStreamingService = tss + require.Nil(t, err) + require.IsType(t, &TraceStreamingService{}, testStreamingService) + require.Equal(t, testMarshaller, testStreamingService.codec) + testListener1 = testStreamingService.listeners[mockStoreKey1][0] + testListener2 = testStreamingService.listeners[mockStoreKey2][0] + wg := new(sync.WaitGroup) + testStreamingService.Stream(wg) + testListenBeginBlock(t) + testListenDeliverTx1(t) + testListenDeliverTx2(t) + testListenEndBlock(t) + testStreamingService.Close() + wg.Wait() +} + +func testListenBeginBlock(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenBeginBlock(loggerContext, testBeginBlockReq, testBeginBlockRes) + require.Nil(t, err) +} + +func testListenDeliverTx1(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenDeliverTx(loggerContext, testDeliverTxReq1, testDeliverTxRes1) + require.Nil(t, err) +} + +func testListenDeliverTx2(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenDeliverTx(loggerContext, testDeliverTxReq2, testDeliverTxRes2) + require.Nil(t, err) +} + +func testListenEndBlock(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenEndBlock(loggerContext, testEndBlockReq, testEndBlockRes) + require.Nil(t, err) +} diff --git a/plugin/plugins/trace/trace.go b/plugin/plugins/trace/trace.go new file mode 100644 index 000000000000..36d63305b886 --- /dev/null +++ b/plugin/plugins/trace/trace.go @@ -0,0 +1,113 @@ +package file + +import ( + "fmt" + "sync" + + "github.com/spf13/cast" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/plugins/trace/service" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// Plugin name and version +const ( + // PLUGIN_NAME is the name for this streaming service plugin + PLUGIN_NAME = "trace" + + // PLUGIN_VERSION is the version for this streaming service plugin + PLUGIN_VERSION = "0.0.1" +) + +// TOML configuration parameter keys +const ( + // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service + KEYS_PARAM = "keys" + + PRINT_DATA_TO_STDOUT_PARAM = "print_data_to_stdout" + + // HALT_APP_ON_DELIVERY_ERROR whether or not to halt the application when plugin fails to deliver message(s) + HALT_APP_ON_DELIVERY_ERROR = "halt_app_on_delivery_error" +) + +// Plugins is the exported symbol for loading this plugin +var Plugins = []plugin.Plugin{ + &streamingServicePlugin{}, +} + +type streamingServicePlugin struct { + tss *service.TraceStreamingService + opts serverTypes.AppOptions +} + +var _ plugin.StateStreamingPlugin = (*streamingServicePlugin)(nil) + +// Name satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Name() string { + return PLUGIN_NAME +} + +// Version satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Version() string { + return PLUGIN_VERSION +} + +// Init satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { + ssp.opts = env + return nil +} + +// Register satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Register( + bApp *baseapp.BaseApp, + marshaller codec.BinaryCodec, + keys map[string]*types.KVStoreKey, +) error { + // load all the params required for this plugin from the provided AppOptions + tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) + printDataToStdout := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRINT_DATA_TO_STDOUT_PARAM))) + haltAppOnDeliveryError := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, HALT_APP_ON_DELIVERY_ERROR))) + + + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) + var exposeStoreKeys []types.StoreKey + + if len(exposeKeyStrings) > 0 { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrings)) + for _, keyStr := range exposeKeyStrings { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } else { // if none are specified, we expose all the keys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + + var err error + ssp.tss, err = service.NewTraceStreamingService(exposeStoreKeys, marshaller, printDataToStdout, haltAppOnDeliveryError) + if err != nil { + return err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(ssp.tss) + return nil +} + +// Start satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Start(wg *sync.WaitGroup) error { + return ssp.tss.Stream(wg) +} + +// Close satisfies io.Closer +func (ssp *streamingServicePlugin) Close() error { + return ssp.tss.Close() +} diff --git a/plugin/streaming.go b/plugin/streaming.go new file mode 100644 index 000000000000..b0c3b7fc6652 --- /dev/null +++ b/plugin/streaming.go @@ -0,0 +1,27 @@ +package plugin + +import ( + "sync" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// STREAMING_TOML_KEY is the top-level TOML key for configuring streaming service plugins +const STREAMING_TOML_KEY = "streaming" + +// GLOBAL_WAIT_LIMIT_TOML_KEY is the TOML key for configuring the global wait limit +const GLOBAL_WAIT_LIMIT_TOML_KEY = "global_wait_limit" + +// StateStreamingPlugin interface for plugins that load a baseapp.StreamingService implementation from a plugin onto a baseapp.BaseApp +type StateStreamingPlugin interface { + // Register configures and registers the plugin streaming service with the BaseApp + Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error + + // Start starts the background streaming process of the plugin streaming service + Start(wg *sync.WaitGroup) error + + // Plugin is the base Plugin interface + Plugin +} diff --git a/sim-state-listening.mk b/sim-state-listening.mk new file mode 100644 index 000000000000..e01d67e91605 --- /dev/null +++ b/sim-state-listening.mk @@ -0,0 +1,42 @@ +#!/usr/bin/make -f + +################################################ +# Simulation tests with State Listening plugins +# +# This file is an extension for sims.mk +################################################ + +test-sim-nondeterminism-state-listening-file: + @echo "Running non-determinism-state-listening-file test..." + @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ + -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ + -StateListeningPlugin=file -HaltAppOnDeliveryError=true + +test-sim-nondeterminism-state-listening-trace: + @echo "Running non-determinism-state-listening-trace test..." + @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ + -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ + -StateListeningPlugin=trace -HaltAppOnDeliveryError=true + +test-sim-nondeterminism-state-listening-kafka: + @echo "Running non-determinism-state-listening-kafka test..." + @echo "Starting Kafka..." + @-docker-compose -f plugin/plugins/kafka/docker-compose.yml up -d zookeeper broker + + @-go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ + -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ + -StateListeningPlugin=kafka -HaltAppOnDeliveryError=false + + @echo "Stopping Kafka..." + @-docker-compose -f plugin/plugins/kafka/docker-compose.yml down + +test-sim-nondeterminism-state-listening-all: \ + test-sim-nondeterminism-state-listening-file \ + test-sim-nondeterminism-state-listening-trace \ + test-sim-nondeterminism-state-listening-kafka + +.PHONY: \ +test-sim-nondeterminism-state-listening-all \ +test-sim-nondeterminism-state-listening-file \ +test-sim-nondeterminism-state-listening-trace \ +test-sim-nondeterminism-state-listening-kafka diff --git a/simapp/app.go b/simapp/app.go index 8e9b94075df8..6d6f13cf0233 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -2,10 +2,12 @@ package simapp import ( "encoding/json" + "fmt" "io" "net/http" "os" "path/filepath" + "sync" "github.com/cosmos/cosmos-sdk/testutil/testdata_pulsar" @@ -22,12 +24,13 @@ import ( "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/loader" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" servertypes "github.com/cosmos/cosmos-sdk/server/types" simappparams "github.com/cosmos/cosmos-sdk/simapp/params" - "github.com/cosmos/cosmos-sdk/store/streaming" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" @@ -231,10 +234,30 @@ func NewSimApp( // not include this key. memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, "testingkey") - // configure state listening capabilities using AppOptions - // we are doing nothing with the returned streamingServices and waitGroup in this case - if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, keys); err != nil { - tmos.Exit(err.Error()) + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // this loads the preloaded and any plugins found in `plugins.dir` + // if their names match those in the `plugins.enabled` list. + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + tmos.Exit(err.Error()) + } + + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + tmos.Exit(err.Error()) + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + tmos.Exit(err.Error()) + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + tmos.Exit(err.Error()) + } } app := &SimApp{ diff --git a/simapp/sim_test.go b/simapp/sim_test.go index 70b2d9644a52..7a724c393ca6 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -1,13 +1,22 @@ package simapp import ( + "context" "encoding/json" + "flag" "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/server/types" + "github.com/spf13/cast" + "github.com/spf13/viper" + tmos "github.com/tendermint/tendermint/libs/os" "math/rand" "os" "runtime/debug" "strings" "testing" + "time" storetypes "github.com/cosmos/cosmos-sdk/store/types" "github.com/stretchr/testify/require" @@ -33,11 +42,22 @@ import ( "github.com/cosmos/cosmos-sdk/x/simulation" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + kafkaplugin "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka" + kafkaservice "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service" +) + +var ( + StateListeningPlugin string + HaltAppOnDeliveryError bool ) // Get flags every time the simulator is run func init() { GetSimulatorFlags() + // State listening flags + flag.StringVar(&StateListeningPlugin, "StateListeningPlugin", "", "State listening plugin name") + flag.BoolVar(&HaltAppOnDeliveryError, "HaltAppOnDeliveryError", true, "Halt app when state listeners fail") } type StoreKeysPrefixes struct { @@ -347,3 +367,245 @@ func TestAppStateDeterminism(t *testing.T) { } } } + +// TODO: Make another test for the fuzzer itself, which just has noOp txs +// and doesn't depend on the application. +func TestAppStateDeterminismWithStateListening(t *testing.T) { + if !FlagEnabledValue { + t.Skip("skipping application simulation") + } + + if StateListeningPlugin == "" { + t.Skip("state listening plugin flag not provided: -StateListeningPlugin=name") + } + + config := NewConfigFromFlags() + config.InitialBlockHeight = 1 + config.ExportParamsPath = "" + config.OnOperation = false + config.AllInvariants = false + config.ChainID = helpers.SimAppChainID + + numSeeds := 3 + numTimesToRunPerSeed := 5 + appHashList := make([]json.RawMessage, numTimesToRunPerSeed) + + // State listening plugin config + appOpts := loadAppOptions() + key := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ENABLED_TOML_KEY) + enabledPlugins := cast.ToStringSlice(appOpts.Get(key)) + for _, p := range enabledPlugins { + // Kafka plugin topic configuration + if kafkaplugin.PLUGIN_NAME == p { + prepKafkaTopics(appOpts) + break + } + } + + for i := 0; i < numSeeds; i++ { + config.Seed = rand.Int63() + + for j := 0; j < numTimesToRunPerSeed; j++ { + var logger log.Logger + if FlagVerboseValue { + logger = log.TestingLogger() + } else { + logger = log.NewNopLogger() + } + + db := dbm.NewMemDB() + app := NewSimApp( + logger, + db, + nil, + true, + map[int64]bool{}, + DefaultNodeHome, + FlagPeriodValue, + MakeTestEncodingConfig(), + appOpts, + interBlockCacheOpt(), + ) + + fmt.Printf( + "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n", + config.Seed, i+1, numSeeds, j+1, numTimesToRunPerSeed, + ) + + _, _, err := simulation.SimulateFromSeed( + t, + os.Stdout, + app.BaseApp, + AppStateFn(app.AppCodec(), app.SimulationManager()), + simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1 + SimulationOperations(app, app.AppCodec(), config), + app.ModuleAccountAddrs(), + config, + app.AppCodec(), + ) + require.NoError(t, err) + + if config.Commit { + PrintStats(db) + } + + appHash := app.LastCommitID().Hash + appHashList[j] = appHash + + if j != 0 { + require.Equal( + t, string(appHashList[0]), string(appHashList[j]), + "non-determinism in seed %d: %d/%d, attempt: %d/%d\n", config.Seed, i+1, numSeeds, j+1, numTimesToRunPerSeed, + ) + } + } + } +} + +func loadAppOptions() types.AppOptions { + // load plugin config + keys := make([]string, 0) // leave empty to listen to all store keys + m := make(map[string]interface{}) + m["plugins.on"] = true + m["plugins.enabled"] = []string{StateListeningPlugin} + m["plugins.dir"] = "" + // file plugin + m["plugins.streaming.file.keys"] = keys + m["plugins.streaming.file.write_dir"] = "" + m["plugins.streaming.file.prefix"] = "" + m["plugins.streaming.file.halt_app_on_delivery_error"] = HaltAppOnDeliveryError + // trace plugin + m["plugins.streaming.trace.keys"] = keys + m["plugins.streaming.trace.print_data_to_stdout"] = false + m["plugins.streaming.trace.halt_app_on_delivery_error"] = HaltAppOnDeliveryError + // kafka plugin + m["plugins.streaming.kafka.keys"] = keys + m["plugins.streaming.kafka.topic_prefix"] = "sim" + m["plugins.streaming.kafka.flush_timeout_ms"] = 5000 + m["plugins.streaming.kafka.halt_app_on_delivery_error"] = HaltAppOnDeliveryError + // Kafka plugin producer + m["plugins.streaming.kafka.producer.bootstrap_servers"] = "localhost:9092" + m["plugins.streaming.kafka.producer.client_id"] = "may-app-id" + m["plugins.streaming.kafka.producer.acks"] = "all" + m["plugins.streaming.kafka.producer.enable_idempotence"] = true + + vpr := viper.New() + for key, value := range m { + vpr.SetDefault(key, value) + } + + return vpr +} + +func prepKafkaTopics(opts types.AppOptions) { + // kafka topic setup + topicPrefix := cast.ToString(opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, kafkaplugin.PLUGIN_NAME, kafkaplugin.TOPIC_PREFIX_PARAM))) + bootstrapServers := cast.ToString(opts.Get(fmt.Sprintf("%s.%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, kafkaplugin.PLUGIN_NAME, kafkaplugin.PRODUCER_CONFIG_PARAM, "bootstrap_servers"))) + bootstrapServers = strings.ReplaceAll(bootstrapServers, "_", ".") + topics := []string{ + string(kafkaservice.BeginBlockReqTopic), + kafkaservice.BeginBlockResTopic, + kafkaservice.DeliverTxReqTopic, + kafkaservice.DeliverTxResTopic, + kafkaservice.EndBlockReqTopic, + kafkaservice.EndBlockResTopic, + kafkaservice.StateChangeTopic, + } + deleteTopics(topicPrefix, topics, bootstrapServers) + createTopics(topicPrefix, topics, bootstrapServers) +} + +func createTopics(topicPrefix string, topics []string, bootstrapServers string) { + + adminClient, err := kafka.NewAdminClient(&kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + "broker.version.fallback": "0.10.0.0", + "api.version.fallback.ms": 0, + }) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + tmos.Exit(err.Error()) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create topics on cluster. + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDuration, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("time.ParseDuration(60s)") + tmos.Exit(err.Error()) + } + + var _topics []kafka.TopicSpecification + for _, s := range topics { + _topics = append(_topics, + kafka.TopicSpecification{ + Topic: fmt.Sprintf("%s-%s", topicPrefix, s), + NumPartitions: 1, + ReplicationFactor: 1}) + } + results, err := adminClient.CreateTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDuration)) + if err != nil { + fmt.Printf("Problem during the topicPrefix creation: %v\n", err) + tmos.Exit(err.Error()) + } + + // Check for specific topicPrefix errors + for _, result := range results { + if result.Error.Code() != kafka.ErrNoError && + result.Error.Code() != kafka.ErrTopicAlreadyExists { + fmt.Printf("Topic creation failed for %s: %v", + result.Topic, result.Error.String()) + tmos.Exit(err.Error()) + } + } + + adminClient.Close() +} + +func deleteTopics(topicPrefix string, topics []string, bootstrapServers string) { + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + tmos.Exit(err.Error()) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Delete topics on cluster + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("ParseDuration(60s)") + tmos.Exit(err.Error()) + } + + var _topics []string + for _, s := range topics { + _topics = append(_topics, fmt.Sprintf("%s-%s", topicPrefix, s)) + } + + results, err := a.DeleteTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDur)) + if err != nil { + fmt.Printf("Failed to delete topics: %v\n", err) + tmos.Exit(err.Error()) + } + + // Print results + for _, result := range results { + fmt.Printf("%s\n", result) + } + + a.Close() +} diff --git a/store/streaming/README.md b/store/streaming/README.md deleted file mode 100644 index 46e343416a52..000000000000 --- a/store/streaming/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# State Streaming Service - -This package contains the constructors for the `StreamingService`s used to write state changes out from individual KVStores to a -file or stream, as described in [ADR-038](../../docs/architecture/adr-038-state-listening.md) and defined in [types/streaming.go](../../baseapp/streaming.go). -The child directories contain the implementations for specific output destinations. - -Currently, a `StreamingService` implementation that writes state changes out to files is supported, in the future support for additional -output destinations can be added. - -The `StreamingService` is configured from within an App using the `AppOptions` loaded from the app.toml file: - -```toml -[store] - streamers = [ # if len(streamers) > 0 we are streaming - "file", # name of the streaming service, used by constructor - ] - -[streamers] - [streamers.file] - keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - write_dir = "path to the write directory" - prefix = "optional prefix to prepend to the generated file names" -``` - -`store.streamers` contains a list of the names of the `StreamingService` implementations to employ which are used by `ServiceTypeFromString` -to return the `ServiceConstructor` for that particular implementation: - -```go -listeners := cast.ToStringSlice(appOpts.Get("store.streamers")) -for _, listenerName := range listeners { - constructor, err := ServiceTypeFromString(listenerName) - if err != nil { - // handle error - } -} -``` - -`streamers` contains a mapping of the specific `StreamingService` implementation name to the configuration parameters for that specific service. -`streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service and is required by every type of `StreamingService`. -In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. - -Additional configuration parameters are optional and specific to the implementation. -In the case of the file streaming service, `streamers.file.write_dir` contains the path to the -directory to write the files to, and `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions -with other App `StreamingService` output files. - -The `ServiceConstructor` accepts `AppOptions`, the store keys collected using `streamers.x.keys`, a `BinaryMarshaller` and -returns a `StreamingService` implementation. The `AppOptions` are passed in to provide access to any implementation specific configuration options, -e.g. in the case of the file streaming service the `streamers.file.write_dir` and `streamers.file.prefix`. - -```go -streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) -if err != nil { - // handler error -} -``` - -The returned `StreamingService` is loaded into the BaseApp using the BaseApp's `SetStreamingService` method. -The `Stream` method is called on the service to begin the streaming process. Depending on the implementation this process -may be synchronous or asynchronous with the message processing of the state machine. - -```go -bApp.SetStreamingService(streamingService) -wg := new(sync.WaitGroup) -quitChan := make(chan struct{}) -streamingService.Stream(wg, quitChan) -``` diff --git a/store/streaming/constructor.go b/store/streaming/constructor.go deleted file mode 100644 index e576f84b83d1..000000000000 --- a/store/streaming/constructor.go +++ /dev/null @@ -1,137 +0,0 @@ -package streaming - -import ( - "fmt" - "strings" - "sync" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - serverTypes "github.com/cosmos/cosmos-sdk/server/types" - "github.com/cosmos/cosmos-sdk/store/streaming/file" - "github.com/cosmos/cosmos-sdk/store/types" - - "github.com/spf13/cast" -) - -// ServiceConstructor is used to construct a streaming service -type ServiceConstructor func(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) - -// ServiceType enum for specifying the type of StreamingService -type ServiceType int - -const ( - Unknown ServiceType = iota - File - // add more in the future -) - -// ServiceTypeFromString returns the streaming.ServiceType corresponding to the provided name -func ServiceTypeFromString(name string) ServiceType { - switch strings.ToLower(name) { - case "file", "f": - return File - default: - return Unknown - } -} - -// String returns the string name of a streaming.ServiceType -func (sst ServiceType) String() string { - switch sst { - case File: - return "file" - default: - return "unknown" - } -} - -// ServiceConstructorLookupTable is a mapping of streaming.ServiceTypes to streaming.ServiceConstructors -var ServiceConstructorLookupTable = map[ServiceType]ServiceConstructor{ - File: NewFileStreamingService, -} - -// NewServiceConstructor returns the streaming.ServiceConstructor corresponding to the provided name -func NewServiceConstructor(name string) (ServiceConstructor, error) { - ssType := ServiceTypeFromString(name) - if ssType == Unknown { - return nil, fmt.Errorf("unrecognized streaming service name %s", name) - } - if constructor, ok := ServiceConstructorLookupTable[ssType]; ok && constructor != nil { - return constructor, nil - } - return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) -} - -// NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService -func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) { - filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) - fileDir := cast.ToString(opts.Get("streamers.file.write_dir")) - return file.NewStreamingService(fileDir, filePrefix, keys, marshaller) -} - -// LoadStreamingServices is a function for loading StreamingServices onto the BaseApp using the provided AppOptions, codec, and keys -// It returns the WaitGroup and quit channel used to synchronize with the streaming services and any error that occurs during the setup -func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions, appCodec codec.BinaryCodec, keys map[string]*types.KVStoreKey) ([]baseapp.StreamingService, *sync.WaitGroup, error) { - // waitgroup and quit channel for optional shutdown coordination of the streaming service(s) - wg := new(sync.WaitGroup) - // configure state listening capabilities using AppOptions - streamers := cast.ToStringSlice(appOpts.Get("store.streamers")) - activeStreamers := make([]baseapp.StreamingService, 0, len(streamers)) - for _, streamerName := range streamers { - // get the store keys allowed to be exposed for this streaming service - exposeKeyStrs := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("streamers.%s.keys", streamerName))) - var exposeStoreKeys []types.StoreKey - if exposeAll(exposeKeyStrs) { // if list contains `*`, expose all StoreKeys - exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) - for _, storeKey := range keys { - exposeStoreKeys = append(exposeStoreKeys, storeKey) - } - } else { - exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrs)) - for _, keyStr := range exposeKeyStrs { - if storeKey, ok := keys[keyStr]; ok { - exposeStoreKeys = append(exposeStoreKeys, storeKey) - } - } - } - if len(exposeStoreKeys) == 0 { // short circuit if we are not exposing anything - continue - } - // get the constructor for this streamer name - constructor, err := NewServiceConstructor(streamerName) - if err != nil { - // close any services we may have already spun up before hitting the error on this one - for _, activeStreamer := range activeStreamers { - activeStreamer.Close() - } - return nil, nil, err - } - // generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose - streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) - if err != nil { - // close any services we may have already spun up before hitting the error on this one - for _, activeStreamer := range activeStreamers { - activeStreamer.Close() - } - return nil, nil, err - } - // register the streaming service with the BaseApp - bApp.SetStreamingService(streamingService) - // kick off the background streaming service loop - streamingService.Stream(wg) - // add to the list of active streamers - activeStreamers = append(activeStreamers, streamingService) - } - // if there are no active streamers, activeStreamers is empty (len == 0) and the waitGroup is not waiting on anything - return activeStreamers, wg, nil -} - -func exposeAll(list []string) bool { - for _, ele := range list { - if ele == "*" { - return true - } - } - return false -} diff --git a/store/streaming/constructor_test.go b/store/streaming/constructor_test.go deleted file mode 100644 index 5f9d58016f68..000000000000 --- a/store/streaming/constructor_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package streaming - -import ( - "testing" - - "github.com/cosmos/cosmos-sdk/codec" - codecTypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/store/streaming/file" - "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/stretchr/testify/require" -) - -type fakeOptions struct{} - -func (f *fakeOptions) Get(string) interface{} { return nil } - -var ( - mockOptions = new(fakeOptions) - mockKeys = []types.StoreKey{sdk.NewKVStoreKey("mockKey1"), sdk.NewKVStoreKey("mockKey2")} - interfaceRegistry = codecTypes.NewInterfaceRegistry() - testMarshaller = codec.NewProtoCodec(interfaceRegistry) -) - -func TestStreamingServiceConstructor(t *testing.T) { - _, err := NewServiceConstructor("unexpectedName") - require.NotNil(t, err) - - constructor, err := NewServiceConstructor("file") - require.Nil(t, err) - var expectedType ServiceConstructor - require.IsType(t, expectedType, constructor) - - serv, err := constructor(mockOptions, mockKeys, testMarshaller) - require.Nil(t, err) - require.IsType(t, &file.StreamingService{}, serv) - listeners := serv.Listeners() - for _, key := range mockKeys { - _, ok := listeners[key] - require.True(t, ok) - } -} diff --git a/store/streaming/file/example_config.toml b/store/streaming/file/example_config.toml deleted file mode 100644 index 8202bd8ef559..000000000000 --- a/store/streaming/file/example_config.toml +++ /dev/null @@ -1,10 +0,0 @@ -[store] - streamers = [ # if len(streamers) > 0 we are streaming - "file", # name of the streaming service, used by constructor - ] - -[streamers] - [streamers.file] - keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - write_dir = "path to the write directory" - prefix = "optional prefix to prepend to the generated file names" diff --git a/store/streaming/file/service.go b/store/streaming/file/service.go deleted file mode 100644 index 02feb403e99b..000000000000 --- a/store/streaming/file/service.go +++ /dev/null @@ -1,279 +0,0 @@ -package file - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "sync" - - abci "github.com/tendermint/tendermint/abci/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var _ baseapp.StreamingService = &StreamingService{} - -// StreamingService is a concrete implementation of StreamingService that writes state changes out to files -type StreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all the WriteListeners write their data out to - filePrefix string // optional prefix for each of the generated files - writeDir string // directory to write files into - codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel to synchronize closure -} - -// IntermediateWriter is used so that we do not need to update the underlying io.Writer -// inside the StoreKVPairWriteListener everytime we begin writing to a new file -type IntermediateWriter struct { - outChan chan<- []byte -} - -// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel -func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { - return &IntermediateWriter{ - outChan: outChan, - } -} - -// Write satisfies io.Writer -func (iw *IntermediateWriter) Write(b []byte) (int, error) { - iw.outChan <- b - return len(b), nil -} - -// NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys -func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec) (*StreamingService, error) { - listenChan := make(chan []byte) - iw := NewIntermediateWriter(listenChan) - listener := types.NewStoreKVPairWriteListener(iw, c) - listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) - // in this case, we are using the same listener for each Store - for _, key := range storeKeys { - listeners[key] = append(listeners[key], listener) - } - // check that the writeDir exists and is writeable so that we can catch the error here at initialization if it is not - // we don't open a dstFile until we receive our first ABCI message - if err := isDirWriteable(writeDir); err != nil { - return nil, err - } - return &StreamingService{ - listeners: listeners, - srcChan: listenChan, - filePrefix: filePrefix, - writeDir: writeDir, - codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), - }, nil -} - -// Listeners satisfies the baseapp.StreamingService interface -// It returns the StreamingService's underlying WriteListeners -// Use for registering the underlying WriteListeners with the BaseApp -func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener { - return fss.listeners -} - -// ListenBeginBlock satisfies the baseapp.ABCIListener interface -// It writes the received BeginBlock request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // generate the new file - dstFile, err := fss.openBeginBlockFile(req) - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) { - fss.currentBlockNumber = req.GetHeader().Height - fss.currentTxIndex = 0 - fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) -} - -// ListenDeliverTx satisfies the baseapp.ABCIListener interface -// It writes the received DeliverTx request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { - // generate the new file - dstFile, err := fss.openDeliverTxFile() - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openDeliverTxFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - fss.currentTxIndex++ - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) -} - -// ListenEndBlock satisfies the baseapp.ABCIListener interface -// It writes the received EndBlock request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { - // generate the new file - dstFile, err := fss.openEndBlockFile() - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openEndBlockFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) -} - -// Stream satisfies the baseapp.StreamingService interface -// It spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs -// and caches them in the order they were received -// returns an error if it is called twice -func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { - if fss.quitChan != nil { - return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") - } - fss.quitChan = make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-fss.quitChan: - fss.quitChan = nil - return - case by := <-fss.srcChan: - fss.stateCacheLock.Lock() - fss.stateCache = append(fss.stateCache, by) - fss.stateCacheLock.Unlock() - } - } - }() - return nil -} - -// Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface -func (fss *StreamingService) Close() error { - close(fss.quitChan) - return nil -} - -// isDirWriteable checks if dir is writable by writing and removing a file -// to dir. It returns nil if dir is writable. -func isDirWriteable(dir string) error { - f := path.Join(dir, ".touch") - if err := ioutil.WriteFile(f, []byte(""), 0600); err != nil { - return err - } - return os.Remove(f) -} diff --git a/store/types/listening.go b/store/types/listening.go index 2294a5ada531..230374d50465 100644 --- a/store/types/listening.go +++ b/store/types/listening.go @@ -8,9 +8,10 @@ import ( // WriteListener interface for streaming data out from a listenkv.Store type WriteListener interface { + // OnWrite interface used for emitting updated KVPairs // if value is nil then it was deleted - // storeKey indicates the source KVStore, to facilitate using the the same WriteListener across separate KVStores - // delete bool indicates if it was a delete; true: delete, false: set + // storeKey indicates the source KVStore, to facilitate using the same WriteListener across separate KVStores + // delete bool indicates if it was a delete operation; true: delete, false: set OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error }