From 885b45ad0c466f1aafca1f9d9004163023815c81 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:04:36 -0600 Subject: [PATCH 01/43] patch adr-038 doc --- docs/architecture/adr-038-state-listening.md | 98 +++++++++----------- 1 file changed, 45 insertions(+), 53 deletions(-) diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index b9c209b4aa32..440faf01e726 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -338,18 +338,8 @@ off the channel despite the success status of the service. ```go func (app *BaseApp) Commit() (res abci.ResponseCommit) { - - ... - - var halt bool - - switch { - case app.haltHeight > 0 && uint64(header.Height) >= app.haltHeight: - halt = true - case app.haltTime > 0 && header.Time.Unix() >= int64(app.haltTime): - halt = true - } + ... // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel // but the BaseApp also imposes a global wait limit @@ -358,23 +348,13 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { select { case success := <- lis.ListenSuccess(): if success == false { - halt = true - break + app.halt() } case <- maxWait.C: - halt = true - break + app.halt() } } - if halt { - // Halt the binary and allow Tendermint to receive the ResponseCommit - // response with the commit ID hash. This will allow the node to successfully - // restart and process blocks assuming the halt configuration has been - // reset or moved to a more distant value. - app.halt() - } - ... } @@ -421,7 +401,7 @@ type StateStreamingPlugin interface { Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error // Start starts the background streaming process of the plugin streaming service - Start(wg *sync.WaitGroup) + Start(wg *sync.WaitGroup) error // Plugin is the base Plugin interface Plugin @@ -442,34 +422,37 @@ func NewSimApp( ... - // this loads the preloaded and any plugins found in `plugins.dir` - pluginLoader, err := loader.NewPluginLoader(appOpts, logger) - if err != nil { - // handle error - } - - // initialize the loaded plugins - if err := pluginLoader.Initialize(); err != nil { - // hanlde error - } - keys := sdk.NewKVStoreKeys( - authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, - minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, - govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, - evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, ) - // register the plugin(s) with the BaseApp - if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { - // handle error - } + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // this loads the preloaded and any plugins found in `plugins.dir` + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + // handle error + } - // start the plugin services, optionally use wg to synchronize shutdown using io.Closer - wg := new(sync.WaitGroup) - if err := pluginLoader.Start(wg); err != nil { - // handler error - } + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + // handle error + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + // handle error + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + // handler error + } + } ... @@ -496,11 +479,17 @@ to load plugins from, and `plugins.disabled` is a list of names for the plugins Configuration of a given plugin is ultimately specific to the plugin, but we will introduce some standards here: Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). +For streaming plugins a parameter `plugins.streaming.global_ack_wait_limit` is used to configure the maximum amount of time +the BaseApp will wait for positive acknowledgement of receipt by the external streaming services before it considers +the message relay to be a failure. + Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys -(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a mode (e.g. `plugins.streaming.file.mode`) -that signifies whether the service operates in a fire-and-forget capacity (`faf`) or the BaseApp should require positive -acknowledgement of message receipt by the service (`ack`). +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.ack`) +that signifies whether the service operates in a fire-and-forget capacity or the BaseApp should require positive +acknowledgement of message receipt by the service. In the case of "ack" mode, the service may also need to be +configured with an acknowledgement wait limit specific to that individual service (e.g. `plugins.streaming.kafka.ack_wait_limit`). +The file `StreamingService` does not have an individual `ack_wait_limit` since it operates synchronously with the App. e.g. @@ -510,11 +499,14 @@ e.g. disabled = ["list", "of", "plugin", "names", "to", "disable"] dir = "the directory to load non-preloaded plugins from; defaults to " [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their plugin name + # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services + # in milliseconds + global_ack_wait_limit = 500 [plugins.streaming.file] # the specific parameters for the file streaming service plugin keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - writeDir = "path to the write directory" + write_dir = "path to the write directory" prefix = "optional prefix to prepend to the generated file names" - mode = "faf" # faf == fire-and-forget; ack == require positive acknowledge of receipt + ack = "false" # false == fire-and-forget; true == sends a message receipt success/fail signal [plugins.streaming.kafka] ... [plugins.modules] From 00756c780510bf4eb4cbf2338784a73a32506ab5 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:06:01 -0600 Subject: [PATCH 02/43] plugin interfaces --- plugin/plugin.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ plugin/streaming.go | 27 +++++++++++++++++++++++++++ 2 files changed, 72 insertions(+) create mode 100644 plugin/plugin.go create mode 100644 plugin/streaming.go diff --git a/plugin/plugin.go b/plugin/plugin.go new file mode 100644 index 000000000000..4145cb48147f --- /dev/null +++ b/plugin/plugin.go @@ -0,0 +1,45 @@ +package plugin + +import ( + "io" + + serverTypes "github.com/cosmos/cosmos-sdk/server/types" +) + +const ( + // PLUGINS_SYMBOL is the symbol for loading Cosmos-SDK plugins from a linked .so file + PLUGINS_SYMBOL = "Plugins" + + // PLUGINS_TOML_KEY is the top-level TOML key for plugin configuration + PLUGINS_TOML_KEY = "plugins" + + // PLUGINS_ON_TOML_KEY is the second-level TOML key for turning on the plugin system as a whole + PLUGINS_ON_TOML_KEY = "on" + + // PLUGINS_DIR_TOML_KEY is the second-level TOML key for the directory to load plugins from + PLUGINS_DIR_TOML_KEY = "dir" + + // PLUGINS_DISABLED_TOML_KEY is the second-level TOML key for a list of plugins to disable + PLUGINS_DISABLED_TOML_KEY = "disabled" + + // DEFAULT_PLUGINS_DIRECTORY is the default directory to load plugins from + DEFAULT_PLUGINS_DIRECTORY = "src/github.com/cosmos/cosmos-sdk/plugin/plugins" +) + +// Plugin is the base interface for all kinds of cosmos-sdk plugins +// It will be included in interfaces of different Plugins +type Plugin interface { + // Name should return unique name of the plugin + Name() string + + // Version returns current version of the plugin + Version() string + + // Init is called once when the Plugin is being loaded + // The plugin is passed the AppOptions for configuration + // A plugin will not necessarily have a functional Init + Init(env serverTypes.AppOptions) error + + // Closer interface for shutting down the plugin process + io.Closer +} diff --git a/plugin/streaming.go b/plugin/streaming.go new file mode 100644 index 000000000000..e230aa524bde --- /dev/null +++ b/plugin/streaming.go @@ -0,0 +1,27 @@ +package plugin + +import ( + "sync" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// STREAMING_TOML_KEY is the top-level TOML key for configuring streaming service plugins +const STREAMING_TOML_KEY = "streaming" + +// GLOBAL_ACK_WAIT_LIMIT_TOML_KEY is the TOML key for configuring the global ack wait limit +const GLOBAL_ACK_WAIT_LIMIT_TOML_KEY = "global_ack_wait_limit" + +// StateStreamingPlugin interface for plugins that load a baseapp.StreamingService implementation from a plugin onto a baseapp.BaseApp +type StateStreamingPlugin interface { + // Register configures and registers the plugin streaming service with the BaseApp + Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error + + // Start starts the background streaming process of the plugin streaming service + Start(wg *sync.WaitGroup) error + + // Plugin is the base Plugin interface + Plugin +} From daed581666b8f1dc847b3e9388cfd0348256ece9 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:06:47 -0600 Subject: [PATCH 03/43] plugin loader/preloader --- plugin/Rules.mk | 9 + plugin/loader/Rules.mk | 13 ++ plugin/loader/load_nocgo.go | 18 ++ plugin/loader/load_noplugin.go | 17 ++ plugin/loader/load_unix.go | 33 ++++ plugin/loader/loader.go | 293 +++++++++++++++++++++++++++++++++ plugin/loader/preload.go | 13 ++ plugin/loader/preload.sh | 40 +++++ plugin/loader/preload_list | 6 + 9 files changed, 442 insertions(+) create mode 100644 plugin/Rules.mk create mode 100644 plugin/loader/Rules.mk create mode 100644 plugin/loader/load_nocgo.go create mode 100644 plugin/loader/load_noplugin.go create mode 100644 plugin/loader/load_unix.go create mode 100644 plugin/loader/loader.go create mode 100644 plugin/loader/preload.go create mode 100644 plugin/loader/preload.sh create mode 100644 plugin/loader/preload_list diff --git a/plugin/Rules.mk b/plugin/Rules.mk new file mode 100644 index 000000000000..1e26d2a3c692 --- /dev/null +++ b/plugin/Rules.mk @@ -0,0 +1,9 @@ +include mk/header.mk + +dir := $(d)/loader +include $(dir)/Rules.mk + +dir := $(d)/plugins +include $(dir)/Rules.mk + +include mk/footer.mk diff --git a/plugin/loader/Rules.mk b/plugin/loader/Rules.mk new file mode 100644 index 000000000000..dd842bfa1892 --- /dev/null +++ b/plugin/loader/Rules.mk @@ -0,0 +1,13 @@ +include mk/header.mk + +COSMOS_PLUGINS ?= +export COSMOS_PLUGINS + +$(d)/preload.go: d:=$(d) +$(d)/preload.go: $(d)/preload_list $(d)/preload.sh ALWAYS + $(d)/preload.sh > $@ + go fmt $@ >/dev/null + +DEPS_GO += $(d)/preload.go + +include mk/footer.mk diff --git a/plugin/loader/load_nocgo.go b/plugin/loader/load_nocgo.go new file mode 100644 index 000000000000..51d7c4a6e63f --- /dev/null +++ b/plugin/loader/load_nocgo.go @@ -0,0 +1,18 @@ +// +build !cgo,!noplugin +// +build linux darwin freebsd + +package loader + +import ( + "errors" + + cplugin "github.com/cosmos/cosmos-sdk/plugin" +) + +func init() { + loadPluginFunc = nocgoLoadPlugin +} + +func nocgoLoadPlugin(fi string) ([]cplugin.Plugin, error) { + return nil, errors.New("not built with cgo support") +} diff --git a/plugin/loader/load_noplugin.go b/plugin/loader/load_noplugin.go new file mode 100644 index 000000000000..1ababd520f2a --- /dev/null +++ b/plugin/loader/load_noplugin.go @@ -0,0 +1,17 @@ +// +build noplugin + +package loader + +import ( + "errors" + + cplugin "github.com/cosmos/cosmos-sdk/plugin" +) + +func init() { + loadPluginFunc = nopluginLoadPlugin +} + +func nopluginLoadPlugin(string) ([]cplugin.Plugin, error) { + return nil, errors.New("not built with plugin support") +} diff --git a/plugin/loader/load_unix.go b/plugin/loader/load_unix.go new file mode 100644 index 000000000000..9740e9d42a2f --- /dev/null +++ b/plugin/loader/load_unix.go @@ -0,0 +1,33 @@ +// +build cgo,!noplugin +// +build linux darwin freebsd + +package loader + +import ( + "errors" + "plugin" + + cplugin "github.com/cosmos/cosmos-sdk/plugin" +) + +func init() { + loadPluginFunc = unixLoadPlugin +} + +func unixLoadPlugin(fi string) ([]cplugin.Plugin, error) { + pl, err := plugin.Open(fi) + if err != nil { + return nil, err + } + pls, err := pl.Lookup(cplugin.PLUGINS_SYMBOL) + if err != nil { + return nil, err + } + + typePls, ok := pls.(*[]cplugin.Plugin) + if !ok { + return nil, errors.New("filed 'Plugins' didn't contain correct type") + } + + return *typePls, nil +} diff --git a/plugin/loader/loader.go b/plugin/loader/loader.go new file mode 100644 index 000000000000..3fd633d72f92 --- /dev/null +++ b/plugin/loader/loader.go @@ -0,0 +1,293 @@ +package loader + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + + "github.com/spf13/cast" + logging "github.com/tendermint/tendermint/libs/log" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + storeTypes "github.com/cosmos/cosmos-sdk/store/types" +) + +var preloadPlugins []plugin.Plugin + +// Preload adds one or more plugins to the preload list. This should _only_ be called during init. +func Preload(plugins ...plugin.Plugin) { + preloadPlugins = append(preloadPlugins, plugins...) +} + +var loadPluginFunc = func(string) ([]plugin.Plugin, error) { + return nil, fmt.Errorf("unsupported platform %s", runtime.GOOS) +} + +type loaderState int + +const ( + loaderLoading loaderState = iota + loaderInitializing + loaderInitialized + loaderInjecting + loaderInjected + loaderStarting + loaderStarted + loaderClosing + loaderClosed + loaderFailed +) + +func (ls loaderState) String() string { + switch ls { + case loaderLoading: + return "Loading" + case loaderInitializing: + return "Initializing" + case loaderInitialized: + return "Initialized" + case loaderInjecting: + return "Injecting" + case loaderInjected: + return "Injected" + case loaderStarting: + return "Starting" + case loaderStarted: + return "Started" + case loaderClosing: + return "Closing" + case loaderClosed: + return "Closed" + case loaderFailed: + return "Failed" + default: + return "Unknown" + } +} + +// PluginLoader keeps track of loaded plugins. +// +// To use: +// 1. Load any desired plugins with Load and LoadDirectory. Preloaded plugins +// will automatically be loaded. +// 2. Call Initialize to run all initialization logic. +// 3. Call Inject to register the plugins. +// 4. Optionally call Start to start plugins. +// 5. Call Close to close all plugins. +type PluginLoader struct { + state loaderState + plugins map[string]plugin.Plugin + started []plugin.Plugin + opts serverTypes.AppOptions + logger logging.Logger + disabled []string +} + +// NewPluginLoader creates new plugin loader +func NewPluginLoader(opts serverTypes.AppOptions, logger logging.Logger) (*PluginLoader, error) { + loader := &PluginLoader{plugins: make(map[string]plugin.Plugin, len(preloadPlugins)), opts: opts, logger: logger} + for _, v := range preloadPlugins { + if err := loader.Load(v); err != nil { + return nil, err + } + } + loader.disabled = cast.ToStringSlice(opts.Get(plugin.PLUGINS_DISABLED_TOML_KEY)) + pluginDir := cast.ToString(opts.Get(plugin.PLUGINS_DIR_TOML_KEY)) + if pluginDir == "" { + pluginDir = filepath.Join(os.Getenv("GOPATH"), plugin.DEFAULT_PLUGINS_DIRECTORY) + } + if err := loader.LoadDirectory(pluginDir); err != nil { + return nil, err + } + return loader, nil +} + +func (loader *PluginLoader) assertState(state loaderState) error { + if loader.state != state { + return fmt.Errorf("loader state must be %s, was %s", state, loader.state) + } + return nil +} + +func (loader *PluginLoader) transition(from, to loaderState) error { + if err := loader.assertState(from); err != nil { + return err + } + loader.state = to + return nil +} + +// Load loads a plugin into the plugin loader. +func (loader *PluginLoader) Load(pl plugin.Plugin) error { + if err := loader.assertState(loaderLoading); err != nil { + return err + } + + name := pl.Name() + if ppl, ok := loader.plugins[name]; ok { + // plugin is already loaded + return fmt.Errorf( + "plugin: %s, is duplicated in version: %s, "+ + "while trying to load dynamically: %s", + name, ppl.Version(), pl.Version()) + } + if sliceContainsStr(loader.disabled, name) { + loader.logger.Info("not loading disabled plugin", "plugin name", name) + return nil + } + loader.plugins[name] = pl + return nil +} + +func sliceContainsStr(slice []string, str string) bool { + for _, ele := range slice { + if ele == str { + return true + } + } + return false +} + +// LoadDirectory loads a directory of plugins into the plugin loader. +func (loader *PluginLoader) LoadDirectory(pluginDir string) error { + if err := loader.assertState(loaderLoading); err != nil { + return err + } + newPls, err := loader.loadDynamicPlugins(pluginDir) + if err != nil { + return err + } + + for _, pl := range newPls { + if err := loader.Load(pl); err != nil { + return err + } + } + return nil +} + +func (loader *PluginLoader) loadDynamicPlugins(pluginDir string) ([]plugin.Plugin, error) { + _, err := os.Stat(pluginDir) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + + var plugins []plugin.Plugin + + err = filepath.Walk(pluginDir, func(fi string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() { + if fi != pluginDir { + loader.logger.Info("found directory inside plugins directory", "directory", fi) + } + return nil + } + + if info.Mode().Perm()&0111 == 0 { + // file is not executable let's not load it + // this is to prevent loading plugins from for example non-executable + // mounts, some /tmp mounts are marked as such for security + loader.logger.Error("non-executable file in plugins directory", "file", fi) + return nil + } + + if newPlugins, err := loadPluginFunc(fi); err == nil { + plugins = append(plugins, newPlugins...) + } else { + return fmt.Errorf("loading plugin %s: %s", fi, err) + } + return nil + }) + + return plugins, err +} + +// Initialize initializes all loaded plugins +func (loader *PluginLoader) Initialize() error { + if err := loader.transition(loaderLoading, loaderInitializing); err != nil { + return err + } + for name, p := range loader.plugins { + if err := p.Init(loader.opts); err != nil { + loader.state = loaderFailed + return fmt.Errorf("unable to initialize plugin %s: %v", name, err) + } + } + + return loader.transition(loaderInitializing, loaderInitialized) +} + +// Inject hooks all the plugins into the BaseApp. +func (loader *PluginLoader) Inject(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*storeTypes.KVStoreKey) error { + if err := loader.transition(loaderInitialized, loaderInjecting); err != nil { + return err + } + + for _, pl := range loader.plugins { + if pl, ok := pl.(plugin.StateStreamingPlugin); ok { + if err := pl.Register(bApp, marshaller, keys); err != nil { + loader.state = loaderFailed + return err + } + } + } + + return loader.transition(loaderInjecting, loaderInjected) +} + +// Start starts all long-running plugins. +func (loader *PluginLoader) Start(wg *sync.WaitGroup) error { + if err := loader.transition(loaderInjected, loaderStarting); err != nil { + return err + } + for _, pl := range loader.plugins { + if pl, ok := pl.(plugin.StateStreamingPlugin); ok { + if err := pl.Start(wg); err != nil { + return err + } + loader.started = append(loader.started, pl) + } + } + + return loader.transition(loaderStarting, loaderStarted) +} + +// Close stops all long-running plugins. +func (loader *PluginLoader) Close() error { + switch loader.state { + case loaderClosing, loaderFailed, loaderClosed: + // nothing to do. + return nil + } + loader.state = loaderClosing + + var errs []string + started := loader.started + loader.started = nil + for _, pl := range started { + if err := pl.Close(); err != nil { + errs = append(errs, fmt.Sprintf( + "error closing plugin %s: %s", + pl.Name(), + err.Error(), + )) + } + } + if errs != nil { + loader.state = loaderFailed + return fmt.Errorf(strings.Join(errs, "\n")) + } + loader.state = loaderClosed + return nil +} diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go new file mode 100644 index 000000000000..3669b2505465 --- /dev/null +++ b/plugin/loader/preload.go @@ -0,0 +1,13 @@ +package loader + +import ( + file "github.com/cosmos/cosmos-sdk/plugin/plugins/file" +) + +// DO NOT EDIT THIS FILE +// This file is being generated as part of plugin build process +// To change it, modify the plugin/loader/preload.sh + +func init() { + Preload(file.Plugins...) +} diff --git a/plugin/loader/preload.sh b/plugin/loader/preload.sh new file mode 100644 index 000000000000..1cf448152050 --- /dev/null +++ b/plugin/loader/preload.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" + +LIST="$DIR/preload_list" + +to_preload() { + awk 'NF' $LIST | sed '/^#/d' + if [[ -n "$COSMOS_PLUGINS" ]]; then + for plugin in $COSMOS_PLUGINS; do + echo "$plugin github.com/cosmos/cosmos-sdk/plugin/plugins/$plugin *" + done + fi +} + +cat < Date: Mon, 29 Nov 2021 12:07:08 -0600 Subject: [PATCH 04/43] plugin documentation --- plugin/README.md | 155 +++++++++++++++++++++++++++++++++++++ plugin/example_config.toml | 13 ++++ 2 files changed, 168 insertions(+) create mode 100644 plugin/README.md create mode 100644 plugin/example_config.toml diff --git a/plugin/README.md b/plugin/README.md new file mode 100644 index 000000000000..9069ea7ab2ef --- /dev/null +++ b/plugin/README.md @@ -0,0 +1,155 @@ +# Comsos-SDK Plugins +This package contains an extensible plugin system for the Cosmos-SDK. Included in this top-level package is the base interface +for a Cosmos-SDK plugin, as well as more specific plugin interface definitions that build on top of this base interface. +The [loader](./loader) sub-directory contains the Go package and scripts for loading plugins into the SDK. The [plugins](./plugins) +sub-directory contains the preloaded plugins and a script for building them, this is also the directory that the plugin loader will look +for non-preloaded plugins by default. + +The base plugin interface is defined as: +```go +// Plugin is the base interface for all kinds of cosmos-sdk plugins +// It will be included in interfaces of different Plugins +type Plugin interface { + // Name should return unique name of the plugin + Name() string + + // Version returns current version of the plugin + Version() string + + // Init is called once when the Plugin is being loaded + // The plugin is passed the AppOptions for configuration + // A plugin will not necessarily have a functional Init + Init(env serverTypes.AppOptions) error + + // Closer interface to shutting down the plugin process + io.Closer +} +``` + +Specific plugin types extend this interface, enabling them to work with the loader tooling defined in the [loader sub-directory](./loader). + +The plugin system itself is configured using the `plugins` TOML mapping in the App's app.toml file. There are three +parameters for configuring the plugins: `plugins.on`, `plugins.disabled` and `plugins.dir`. `plugins.on` is a bool that +turns on or off the plugin system at large, `plugins.dir` directs the system to a directory to load plugins from, and +`plugins.disabled` is a list of names for the plugins we want to disable (useful for disabling preloaded plugins). + +```toml +[plugins] + on = false # turn the plugin system, as a whole, on or off + disabled = ["list", "of", "plugin", "names", "to", "disable"] + dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" +``` + +As mentioned above, some plugins can be preloaded. This means they do not need to be loaded from the specified `plugins.dir` and instead +are loaded by default. At this time the only preloaded plugin is the [file streaming service plugin](./plugins/file). +Plugins can be added to the preloaded set by adding the plugin to the [plugins dir](../../plugin/plugin.go) and modifying the [preload_list](../../plugin/loader/preload_list). + +In your application, if the `plugins.on` is set to `true` use this to direct the invocation of `NewPluginLoader` and walk through +the steps of plugin loading, initialization, injection, starting, and closure. + +e.g. in `NewSimApp`: + +```go +func NewSimApp( + logger log.Logger, db dbm.DB, traceStore io.Writer, loadLatest bool, skipUpgradeHeights map[int64]bool, + homePath string, invCheckPeriod uint, encodingConfig simappparams.EncodingConfig, + appOpts servertypes.AppOptions, baseAppOptions ...func(*baseapp.BaseApp), +) *SimApp { + + ... + + keys := sdk.NewKVStoreKeys( + authtypes.StoreKey, banktypes.StoreKey, stakingtypes.StoreKey, + minttypes.StoreKey, distrtypes.StoreKey, slashingtypes.StoreKey, + govtypes.StoreKey, paramstypes.StoreKey, ibchost.StoreKey, upgradetypes.StoreKey, + evidencetypes.StoreKey, ibctransfertypes.StoreKey, capabilitytypes.StoreKey, + ) + + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // this loads the preloaded and any plugins found in `plugins.dir` + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + // handle error + } + + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + // handle error + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + // handle error + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + // handler error + } + } + + ... + + return app +} +``` + +# State Streaming Plugin +The `BaseApp` package contains the interface for a `StreamingService` used to write state changes out from individual KVStores to a +file or stream, as described in [ADR-038](../docs/architecture/adr-038-state-listening.md). + +Specific `StreamingService` implementations are written and loaded as plugins by extending the above interface with a +`StateStreamingPlugin` interface that adds `Register` method used to register the plugin's `StreamingService` with the +`BaseApp` and a `Start` method to start the streaming service. + +```go +// StateStreamingPlugin interface for plugins that load a streaming.Service onto a baseapp.BaseApp +type StateStreamingPlugin interface { + // Register configures and registers the plugin streaming service with the BaseApp + Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error + + // Start starts the background streaming process of the plugin streaming service + Start(wg *sync.WaitGroup) error + + // Plugin is the base Plugin interface + Plugin +} +``` + +A `StateStreamingPlugin` is configured from within an App using the `AppOptions` loaded from the app.toml file. +Every `StateStreamingPlugin` will be configured within the `plugins.streaming` TOML mapping. The exact keys/parameters +present in this mapping will be dependent on the specific `StateStreamingPlugin`, but we will introduce some standards +here using the file `StateStreamingPlugin`: + +Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). +For streaming plugins a parameter `plugins.streaming.global_ack_wait_limit` is used to configure the maximum amount of time +the BaseApp will wait for positive acknowledgement of receipt by the external streaming services before it considers +the message relay to be a failure. + +Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). +It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.ack`) +that signifies whether the service operates in a fire-and-forget capacity or the BaseApp should require positive +acknowledgement of message receipt by the service. In the case of "ack" mode, the service may also need to be +configured with an acknowledgement wait limit specific to that individual service (e.g. `plugins.streaming.kafka.ack_wait_limit`). +The file `StreamingService` does not have an individual `ack_wait_limit` since it operates synchronously with the App. + +e.g. + +```toml +[plugins] + on = false # turn the plugin system, as a whole, on or off + disabled = ["list", "of", "plugin", "names", "to", "disable"] + dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" + [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName + # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services + # in milliseconds + global_ack_wait_limit = 500 + [plugins.streaming.file] # the specific parameters for the file streaming service plugin + keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] + write_dir = "path to the write directory" + prefix = "optional prefix to prepend to the generated file names" + ack = "false" # false == fire-and-forget; true == sends a message receipt success/fail signal +``` diff --git a/plugin/example_config.toml b/plugin/example_config.toml new file mode 100644 index 000000000000..2ae13b9fd2ee --- /dev/null +++ b/plugin/example_config.toml @@ -0,0 +1,13 @@ +[plugins] + on = false # turn the plugin system, as a whole, on or off + disabled = ["list", "of", "plugin", "names", "to", "disable"] + dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" + [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName + # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services + # in milliseconds + global_ack_wait_limit = 500 + [plugins.streaming.file] # the specific parameters for the file streaming service plugin + keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] + write_dir = "path to the write directory" + prefix = "optional prefix to prepend to the generated file names" + ack = "false" # false == fire-and-forget; true == sends a message receipt success/fail signal From 5d33e23152767f5b4ca63952362fba4d42959c06 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:07:20 -0600 Subject: [PATCH 05/43] file writing plugin --- plugin/plugins/.gitignore | 2 + plugin/plugins/Rules.mk | 24 ++ plugin/plugins/file/file.go | 113 ++++++ plugin/plugins/file/service/service.go | 359 +++++++++++++++++ plugin/plugins/file/service/service_test.go | 421 ++++++++++++++++++++ plugin/plugins/gen_main.sh | 20 + 6 files changed, 939 insertions(+) create mode 100644 plugin/plugins/.gitignore create mode 100644 plugin/plugins/Rules.mk create mode 100644 plugin/plugins/file/file.go create mode 100644 plugin/plugins/file/service/service.go create mode 100644 plugin/plugins/file/service/service_test.go create mode 100644 plugin/plugins/gen_main.sh diff --git a/plugin/plugins/.gitignore b/plugin/plugins/.gitignore new file mode 100644 index 000000000000..641a6c03eeba --- /dev/null +++ b/plugin/plugins/.gitignore @@ -0,0 +1,2 @@ +*.so +*/main diff --git a/plugin/plugins/Rules.mk b/plugin/plugins/Rules.mk new file mode 100644 index 000000000000..22553718c41c --- /dev/null +++ b/plugin/plugins/Rules.mk @@ -0,0 +1,24 @@ +include mk/header.mk + +$(d)_plugins:=$(d)/file +$(d)_plugins_so:=$(addsuffix .so,$($(d)_plugins)) +$(d)_plugins_main:=$(addsuffix /main/main.go,$($(d)_plugins)) + + +$($(d)_plugins_main): d:=$(d) +$($(d)_plugins_main): + $(d)/gen_main.sh "$(dir $@).." "$(call go-pkg-name,$(dir $@)/..)" + $(GOCC) fmt $@ >/dev/null + +$($(d)_plugins_so): %.so : %/main/main.go +$($(d)_plugins_so): $$(DEPS_GO) ALWAYS + $(GOCC) build -buildmode=plugin -pkgdir "$(GOPATH)/pkg/linux_amd64_dynlink" $(go-flags-with-tags) -o "$@" "$(call go-pkg-name,$(basename $@))/main" + chmod +x "$@" + +CLEAN += $($(d)_plugins_so) +CLEAN += $(foreach main_dir,$($(d)_plugins_main),$(dir $(main_dir))) + +build_plugins: $($(d)_plugins_so) + + +include mk/footer.mk diff --git a/plugin/plugins/file/file.go b/plugin/plugins/file/file.go new file mode 100644 index 000000000000..c898e8e0029b --- /dev/null +++ b/plugin/plugins/file/file.go @@ -0,0 +1,113 @@ +package file + +import ( + "fmt" + "sync" + "time" + + "github.com/spf13/cast" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/plugins/file/service" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// Plugin name and version +const ( + // PLUGIN_NAME is the name for this streaming service plugin + PLUGIN_NAME = "file" + + // PLUGIN_VERSION is the version for this streaming service plugin + PLUGIN_VERSION = "0.0.1" +) + +// TOML configuration parameter keys +const ( + // PREFIX_PARAM is an optional prefix to prepend to the files we write + PREFIX_PARAM = "prefix" + + // WRITE_DIR_PARAM is the directory we want to write files out to + WRITE_DIR_PARAM = "write_dir" + + // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service + KEYS_PARAM = "keys" + + // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode + ACK_MODE = "ack" +) + +const minWaitDuration = time.Millisecond * 10 + +// Plugins is the exported symbol for loading this plugin +var Plugins = []plugin.Plugin{ + &streamingServicePlugin{}, +} + +type streamingServicePlugin struct { + fss *service.FileStreamingService + opts serverTypes.AppOptions +} + +var _ plugin.StateStreamingPlugin = (*streamingServicePlugin)(nil) + +// Name satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Name() string { + return PLUGIN_NAME +} + +// Version satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Version() string { + return PLUGIN_VERSION +} + +// Init satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { + ssp.opts = env + return nil +} + +// Register satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error { + // load all the params required for this plugin from the provided AppOptions + tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) + filePrefix := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PREFIX_PARAM))) + fileDir := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, WRITE_DIR_PARAM))) + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) + var exposeStoreKeys []types.StoreKey + if len(exposeKeyStrings) > 0 { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrings)) + for _, keyStr := range exposeKeyStrings { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } else { // if none are specified, we expose all the keys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) + var err error + ssp.fss, err = service.NewFileStreamingService(fileDir, filePrefix, exposeStoreKeys, marshaller, ack) + if err != nil { + return err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(ssp.fss) + return nil +} + +// Start satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Start(wg *sync.WaitGroup) error { + return ssp.fss.Stream(wg) +} + +// Close satisfies io.Closer +func (ssp *streamingServicePlugin) Close() error { + return ssp.fss.Close() +} diff --git a/plugin/plugins/file/service/service.go b/plugin/plugins/file/service/service.go new file mode 100644 index 000000000000..c8b7923e1dd2 --- /dev/null +++ b/plugin/plugins/file/service/service.go @@ -0,0 +1,359 @@ +package service + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +/* +The naming schema and data format for the files this service writes out to is as such: + +After every `BeginBlock` request a new file is created with the name `block-{N}-begin`, where N is the block number. All +subsequent state changes are written out to this file until the first `DeliverTx` request is received. At the head of these files, +the length-prefixed protobuf encoded `BeginBlock` request is written, and the response is written at the tail. + +After every `DeliverTx` request a new file is created with the name `block-{N}-tx-{M}` where N is the block number and M +is the tx number in the block (i.e. 0, 1, 2...). All subsequent state changes are written out to this file until the next +`DeliverTx` request is received or an `EndBlock` request is received. At the head of these files, the length-prefixed protobuf +encoded `DeliverTx` request is written, and the response is written at the tail. + +After every `EndBlock` request a new file is created with the name `block-{N}-end`, where N is the block number. All +subsequent state changes are written out to this file until the next `BeginBlock` request is received. At the head of these files, +the length-prefixed protobuf encoded `EndBlock` request is written, and the response is written at the tail. +*/ + +var _ baseapp.StreamingService = (*FileStreamingService)(nil) + +// FileStreamingService is a concrete implementation of streaming.Service that writes state changes out to files +type FileStreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + filePrefix string // optional prefix for each of the generated files + writeDir string // directory to write files into + codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + + ack bool // true == fire-and-forget; false == sends success/failure signal + ackStatus bool // success/failure status, to be sent to ackChan + ackChan chan bool // the channel used to send the success/failure signal +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener +// everytime we begin writing to a new file +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewFileStreamingService creates a new FileStreamingService for the provided writeDir, (optional) filePrefix, and storeKeys +func NewFileStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec, + ack bool) (*FileStreamingService, error) { + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + // check that the writeDir exists and is writeable so that we can catch the error here at initialization if it is not + // we don't open a dstFile until we receive our first ABCI message + if err := isDirWriteable(writeDir); err != nil { + return nil, err + } + return &FileStreamingService{ + listeners: listeners, + srcChan: listenChan, + filePrefix: filePrefix, + writeDir: writeDir, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + ack: ack, + ackChan: make(chan bool), + }, nil +} + +// Listeners returns the FileStreamingService's underlying WriteListeners, use for registering them with the BaseApp +func (fss *FileStreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return fss.listeners +} + +// ListenBeginBlock satisfies the Hook interface +// It writes out the received BeginBlock request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (fss *FileStreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { + // reset the ack status + fss.ackStatus = true + // generate the new file + dstFile, err := fss.openBeginBlockFile(req) + if err != nil { + fss.ackStatus = false + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + fss.ackStatus = false + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + fss.ackStatus = false + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + fss.ackStatus = false + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + fss.ackStatus = false + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + fss.ackStatus = false + return err + } + // close file + if err := dstFile.Close(); err != nil { + fss.ackStatus = false + return err + } + return nil +} + +func (fss *FileStreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) { + fss.currentBlockNumber = req.GetHeader().Height + fss.currentTxIndex = 0 + fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) +} + +// ListenDeliverTx satisfies the Hook interface +// It writes out the received DeliverTx request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (fss *FileStreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { + // generate the new file + dstFile, err := fss.openDeliverTxFile() + if err != nil { + fss.ackStatus = false + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + fss.ackStatus = false + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + fss.ackStatus = false + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + fss.ackStatus = false + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + fss.ackStatus = false + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + fss.ackStatus = false + return err + } + // close file + if err := dstFile.Close(); err != nil { + fss.ackStatus = false + return err + } + return nil +} + +func (fss *FileStreamingService) openDeliverTxFile() (*os.File, error) { + fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + fss.currentTxIndex++ + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) +} + +// ListenEndBlock satisfies the Hook interface +// It writes out the received EndBlock request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (fss *FileStreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { + // generate the new file + dstFile, err := fss.openEndBlockFile() + if err != nil { + fss.ackStatus = false + return err + } + // write req to file + lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) + if err != nil { + fss.ackStatus = false + return err + } + if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { + fss.ackStatus = false + return err + } + // write all state changes cached for this stage to file + fss.stateCacheLock.Lock() + for _, stateChange := range fss.stateCache { + if _, err = dstFile.Write(stateChange); err != nil { + fss.stateCache = nil + fss.stateCacheLock.Unlock() + fss.ackStatus = false + return err + } + } + // reset cache + fss.stateCache = nil + fss.stateCacheLock.Unlock() + // write res to file + lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) + if err != nil { + fss.ackStatus = false + return err + } + if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { + fss.ackStatus = false + return err + } + // close file + if err := dstFile.Close(); err != nil { + fss.ackStatus = false + return err + } + return nil +} + +func (fss *FileStreamingService) openEndBlockFile() (*os.File, error) { + fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) + if fss.filePrefix != "" { + fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) + } + return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) +} + +// Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received +// Do we need this and an intermediate writer? We could just write directly to the buffer on calls to Write +// But then we don't support a Stream interface, which could be needed for other types of streamers +func (fss *FileStreamingService) Stream(wg *sync.WaitGroup) error { + if fss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + fss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-fss.quitChan: + return + case by := <-fss.srcChan: + fss.stateCacheLock.Lock() + fss.stateCache = append(fss.stateCache, by) + fss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface +func (fss *FileStreamingService) Close() error { + close(fss.quitChan) + return nil +} + +// ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service +// after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt +func (fss *FileStreamingService) ListenSuccess() <-chan bool { + // if we are operating in fire-and-forget mode, immediately send a "success" signal + if !fss.ack { + go func() { + fss.ackChan <- true + }() + } else { + go func() { + // the FileStreamingService operating synchronously, but this will signify whether an error occurred + // during it's processing cycle + fss.ackChan <- fss.ackStatus + }() + } + return fss.ackChan +} + +// SetAckMode is used to set the ack mode for testing purposes +func (fss *FileStreamingService) SetAckMode(on bool) { + fss.ack = on +} + +// SetAckStatus is used to set the ack status for testing purposes +func (fss *FileStreamingService) SetAckStatus(status bool) { + fss.ackStatus = status +} + +// isDirWriteable checks if dir is writable by writing and removing a file +// to dir. It returns nil if dir is writable. +func isDirWriteable(dir string) error { + f := path.Join(dir, ".touch") + if err := ioutil.WriteFile(f, []byte(""), 0600); err != nil { + return err + } + return os.Remove(f) +} diff --git a/plugin/plugins/file/service/service_test.go b/plugin/plugins/file/service/service_test.go new file mode 100644 index 000000000000..c7deab81791f --- /dev/null +++ b/plugin/plugins/file/service/service_test.go @@ -0,0 +1,421 @@ +package service + +import ( + "encoding/binary" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sync" + "testing" + + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" +) + +var ( + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) + testStreamingService *FileStreamingService + testListener1, testListener2 types.WriteListener + emptyContext = sdk.Context{} + + // test abci message types + mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + testBeginBlockReq = abci.RequestBeginBlock{ + Header: types1.Header{ + Height: 1, + }, + ByzantineValidators: []abci.Evidence{}, + Hash: mockHash, + LastCommitInfo: abci.LastCommitInfo{ + Round: 1, + Votes: []abci.VoteInfo{}, + }, + } + testBeginBlockRes = abci.ResponseBeginBlock{ + Events: []abci.Event{ + { + Type: "testEventType1", + }, + { + Type: "testEventType2", + }, + }, + } + testEndBlockReq = abci.RequestEndBlock{ + Height: 1, + } + testEndBlockRes = abci.ResponseEndBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &types1.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + } + mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} + testDeliverTxReq1 = abci.RequestDeliverTx{ + Tx: mockTxBytes1, + } + mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} + testDeliverTxReq2 = abci.RequestDeliverTx{ + Tx: mockTxBytes2, + } + mockTxResponseData1 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes1 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData1, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + mockTxResponseData2 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes2 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData2, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + + // mock store keys + mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") + mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") + + // file stuff + testPrefix = "testPrefix" + testDir = "./.test" + + // mock state changes + mockKey1 = []byte{1, 2, 3} + mockValue1 = []byte{3, 2, 1} + mockKey2 = []byte{2, 3, 4} + mockValue2 = []byte{4, 3, 2} + mockKey3 = []byte{3, 4, 5} + mockValue3 = []byte{5, 4, 3} +) + +func TestIntermediateWriter(t *testing.T) { + outChan := make(chan []byte, 0) + iw := NewIntermediateWriter(outChan) + require.IsType(t, &IntermediateWriter{}, iw) + testBytes := []byte{1, 2, 3, 4, 5} + var length int + var err error + waitChan := make(chan struct{}, 0) + go func() { + length, err = iw.Write(testBytes) + waitChan <- struct{}{} + }() + receivedBytes := <-outChan + <-waitChan + require.Equal(t, len(testBytes), length) + require.Equal(t, testBytes, receivedBytes) + require.Nil(t, err) +} + +func TestFileStreamingService(t *testing.T) { + if os.Getenv("CI") != "" { + t.Skip("Skipping TestFileStreamingService in CI environment") + } + err := os.Mkdir(testDir, 0700) + require.Nil(t, err) + defer os.RemoveAll(testDir) + + testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} + testStreamingService, err = NewFileStreamingService(testDir, testPrefix, testKeys, testMarshaller, false) + require.Nil(t, err) + require.IsType(t, &FileStreamingService{}, testStreamingService) + require.Equal(t, testPrefix, testStreamingService.filePrefix) + require.Equal(t, testDir, testStreamingService.writeDir) + require.Equal(t, testMarshaller, testStreamingService.codec) + testListener1 = testStreamingService.listeners[mockStoreKey1][0] + testListener2 = testStreamingService.listeners[mockStoreKey2][0] + wg := new(sync.WaitGroup) + testStreamingService.Stream(wg) + testListenBeginBlock(t) + testListenDeliverTx1(t) + testListenDeliverTx2(t) + testListenEndBlock(t) + + // status is success but not operating in ack mode + success := <-testStreamingService.ListenSuccess() + require.Equal(t, success, true) + + // status is failure but not operating in ack mode + testStreamingService.SetAckStatus(false) + success = <-testStreamingService.ListenSuccess() + require.Equal(t, success, true) + + // status is failure and operating in ack mode + testStreamingService.SetAckMode(true) + success = <-testStreamingService.ListenSuccess() + require.Equal(t, success, false) + + // status is success and operating in ack mode + testStreamingService.SetAckStatus(true) + success = <-testStreamingService.ListenSuccess() + require.Equal(t, success, true) + + testStreamingService.Close() + wg.Wait() +} + +func testListenBeginBlock(t *testing.T) { + expectedBeginBlockReqBytes, err := testMarshaller.Marshal(&testBeginBlockReq) + require.Nil(t, err) + expectedBeginBlockResBytes, err := testMarshaller.Marshal(&testBeginBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-begin", testPrefix, testBeginBlockReq.GetHeader().Height) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedBeginBlockReqBytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedBeginBlockResBytes, segments[4]) +} + +func testListenDeliverTx1(t *testing.T) { + expectedDeliverTxReq1Bytes, err := testMarshaller.Marshal(&testDeliverTxReq1) + require.Nil(t, err) + expectedDeliverTxRes1Bytes, err := testMarshaller.Marshal(&testDeliverTxRes1) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 0) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedDeliverTxReq1Bytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedDeliverTxRes1Bytes, segments[4]) +} + +func testListenDeliverTx2(t *testing.T) { + expectedDeliverTxReq2Bytes, err := testMarshaller.Marshal(&testDeliverTxReq2) + require.Nil(t, err) + expectedDeliverTxRes2Bytes, err := testMarshaller.Marshal(&testDeliverTxRes2) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 1) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedDeliverTxReq2Bytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedDeliverTxRes2Bytes, segments[4]) +} + +func testListenEndBlock(t *testing.T) { + expectedEndBlockReqBytes, err := testMarshaller.Marshal(&testEndBlockReq) + require.Nil(t, err) + expectedEndBlockResBytes, err := testMarshaller.Marshal(&testEndBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) + require.Nil(t, err) + + // load the file, checking that it was created with the expected name + fileName := fmt.Sprintf("%s-block-%d-end", testPrefix, testEndBlockReq.Height) + fileBytes, err := readInFile(fileName) + require.Nil(t, err) + + // segment the file into the separate gRPC messages and check the correctness of each + segments, err := segmentBytes(fileBytes) + require.Nil(t, err) + require.Equal(t, 5, len(segments)) + require.Equal(t, expectedEndBlockReqBytes, segments[0]) + require.Equal(t, expectedKVPair1, segments[1]) + require.Equal(t, expectedKVPair2, segments[2]) + require.Equal(t, expectedKVPair3, segments[3]) + require.Equal(t, expectedEndBlockResBytes, segments[4]) +} + +func readInFile(name string) ([]byte, error) { + path := filepath.Join(testDir, name) + return ioutil.ReadFile(path) +} + +// Returns all of the protobuf messages contained in the byte array as an array of byte arrays +// The messages have their length prefix removed +func segmentBytes(bz []byte) ([][]byte, error) { + var err error + segments := make([][]byte, 0) + for len(bz) > 0 { + var segment []byte + segment, bz, err = getHeadSegment(bz) + if err != nil { + return nil, err + } + segments = append(segments, segment) + } + return segments, nil +} + +// Returns the bytes for the leading protobuf object in the byte array (removing the length prefix) and returns the remainder of the byte array +func getHeadSegment(bz []byte) ([]byte, []byte, error) { + size, prefixSize := binary.Uvarint(bz) + if prefixSize < 0 { + return nil, nil, fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", prefixSize) + } + if size > uint64(len(bz)-prefixSize) { + return nil, nil, fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-prefixSize) + } + return bz[prefixSize:(uint64(prefixSize) + size)], bz[uint64(prefixSize)+size:], nil +} diff --git a/plugin/plugins/gen_main.sh b/plugin/plugins/gen_main.sh new file mode 100644 index 000000000000..8fbc11465148 --- /dev/null +++ b/plugin/plugins/gen_main.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +dir=${1:?first parameter with dir to work in is required} +pkg=${2:?second parameter with full name of the package is required} +main_pkg="$dir/main" + +shortpkg="uniquepkgname" + +mkdir -p "$main_pkg" + +cat > "$main_pkg/main.go" < Date: Mon, 29 Nov 2021 12:07:54 -0600 Subject: [PATCH 06/43] baseapp pkg updates --- baseapp/abci.go | 16 ++++++++++++++++ baseapp/baseapp.go | 5 +++++ baseapp/options.go | 7 +++++++ baseapp/streaming.go | 3 +++ store/types/listening.go | 5 +++-- 5 files changed, 34 insertions(+), 2 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index c9b1a6fad98a..079c150294be 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -338,6 +338,22 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { app.halt() } + // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel + // but the BaseApp also imposes a global wait limit + if app.globalWaitLimit > 0 { + maxWait := time.NewTicker(app.globalWaitLimit) + for _, lis := range app.abciListeners { + select { + case success := <-lis.ListenSuccess(): + if success == false { + app.halt() + } + case <-maxWait.C: + app.halt() + } + } + } + if app.snapshotInterval > 0 && uint64(header.Height)%app.snapshotInterval == 0 { go app.snapshot(header.Height) } diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index 45c65b8030ee..b49cc1f43d1b 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "time" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" @@ -128,6 +129,10 @@ type BaseApp struct { // nolint: maligned // abciListeners for hooking into the ABCI message processing of the BaseApp // and exposing the requests and responses to external consumers abciListeners []ABCIListener + + // globalWaitTime is the maximum amount of time the BaseApp will wait for positive acknowledgement of message + // receipt from ABCIListeners before halting + globalWaitLimit time.Duration } // NewBaseApp returns a reference to an initialized BaseApp. It accepts a diff --git a/baseapp/options.go b/baseapp/options.go index 4b24c108da06..b21fb39034f2 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -3,6 +3,7 @@ package baseapp import ( "fmt" "io" + "time" dbm "github.com/tendermint/tm-db" @@ -245,3 +246,9 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { // BaseApp will pass BeginBlock, DeliverTx, and EndBlock requests and responses to the streaming services to update their ABCI context app.abciListeners = append(app.abciListeners, s) } + +// SetGlobalWaitLimit is used to set the maximum amount of time the BaseApp will wait for positive acknowledgement +// of message receipt from ABCIListeners before halting +func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { + app.globalWaitLimit = t +} diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 39e0f1ca6e9b..f37de0d87928 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -18,6 +18,9 @@ type ABCIListener interface { ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service + // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt + ListenSuccess() <-chan bool } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks diff --git a/store/types/listening.go b/store/types/listening.go index 2294a5ada531..230374d50465 100644 --- a/store/types/listening.go +++ b/store/types/listening.go @@ -8,9 +8,10 @@ import ( // WriteListener interface for streaming data out from a listenkv.Store type WriteListener interface { + // OnWrite interface used for emitting updated KVPairs // if value is nil then it was deleted - // storeKey indicates the source KVStore, to facilitate using the the same WriteListener across separate KVStores - // delete bool indicates if it was a delete; true: delete, false: set + // storeKey indicates the source KVStore, to facilitate using the same WriteListener across separate KVStores + // delete bool indicates if it was a delete operation; true: delete, false: set OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error } From bdbeaabdfbd798ab3c929007d29fcdaf9787f8fa Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:08:05 -0600 Subject: [PATCH 07/43] simapp integration --- simapp/app.go | 39 ++++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/simapp/app.go b/simapp/app.go index 8e9b94075df8..7f1490c0b60d 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -2,10 +2,12 @@ package simapp import ( "encoding/json" + "fmt" "io" "net/http" "os" "path/filepath" + "sync" "github.com/cosmos/cosmos-sdk/testutil/testdata_pulsar" @@ -22,12 +24,13 @@ import ( "github.com/cosmos/cosmos-sdk/client/grpc/tmservice" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/loader" "github.com/cosmos/cosmos-sdk/server" "github.com/cosmos/cosmos-sdk/server/api" "github.com/cosmos/cosmos-sdk/server/config" servertypes "github.com/cosmos/cosmos-sdk/server/types" simappparams "github.com/cosmos/cosmos-sdk/simapp/params" - "github.com/cosmos/cosmos-sdk/store/streaming" storetypes "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/module" @@ -231,10 +234,36 @@ func NewSimApp( // not include this key. memKeys := sdk.NewMemoryStoreKeys(capabilitytypes.MemStoreKey, "testingkey") - // configure state listening capabilities using AppOptions - // we are doing nothing with the returned streamingServices and waitGroup in this case - if _, _, err := streaming.LoadStreamingServices(bApp, appOpts, appCodec, keys); err != nil { - tmos.Exit(err.Error()) + pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) + if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // set the global wait limit for state streaming plugin message receipt acknowledgement + globalWaitLimitKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.GLOBAL_ACK_WAIT_LIMIT_TOML_KEY) + globalWaitLimit := cast.ToDuration(appOpts.Get(globalWaitLimitKey)) + if globalWaitLimit > 0 { + bApp.SetGlobalWaitLimit(globalWaitLimit) + } + + // this loads the preloaded and any plugins found in `plugins.dir` + pluginLoader, err := loader.NewPluginLoader(appOpts, logger) + if err != nil { + // handle error + } + + // initialize the loaded plugins + if err := pluginLoader.Initialize(); err != nil { + // handle error + } + + // register the plugin(s) with the BaseApp + if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { + // handle error + } + + // start the plugin services, optionally use wg to synchronize shutdown using io.Closer + wg := new(sync.WaitGroup) + if err := pluginLoader.Start(wg); err != nil { + // handler error + } } app := &SimApp{ From f2306ff4476f502d383a5b96c22bbfa8eb70a025 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:14:54 -0600 Subject: [PATCH 08/43] remove old store/streaming pkg --- store/streaming/README.md | 67 ---- store/streaming/constructor.go | 137 -------- store/streaming/constructor_test.go | 43 --- store/streaming/file/example_config.toml | 10 - store/streaming/file/service.go | 279 ---------------- store/streaming/file/service_test.go | 401 ----------------------- 6 files changed, 937 deletions(-) delete mode 100644 store/streaming/README.md delete mode 100644 store/streaming/constructor.go delete mode 100644 store/streaming/constructor_test.go delete mode 100644 store/streaming/file/example_config.toml delete mode 100644 store/streaming/file/service.go delete mode 100644 store/streaming/file/service_test.go diff --git a/store/streaming/README.md b/store/streaming/README.md deleted file mode 100644 index 46e343416a52..000000000000 --- a/store/streaming/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# State Streaming Service - -This package contains the constructors for the `StreamingService`s used to write state changes out from individual KVStores to a -file or stream, as described in [ADR-038](../../docs/architecture/adr-038-state-listening.md) and defined in [types/streaming.go](../../baseapp/streaming.go). -The child directories contain the implementations for specific output destinations. - -Currently, a `StreamingService` implementation that writes state changes out to files is supported, in the future support for additional -output destinations can be added. - -The `StreamingService` is configured from within an App using the `AppOptions` loaded from the app.toml file: - -```toml -[store] - streamers = [ # if len(streamers) > 0 we are streaming - "file", # name of the streaming service, used by constructor - ] - -[streamers] - [streamers.file] - keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - write_dir = "path to the write directory" - prefix = "optional prefix to prepend to the generated file names" -``` - -`store.streamers` contains a list of the names of the `StreamingService` implementations to employ which are used by `ServiceTypeFromString` -to return the `ServiceConstructor` for that particular implementation: - -```go -listeners := cast.ToStringSlice(appOpts.Get("store.streamers")) -for _, listenerName := range listeners { - constructor, err := ServiceTypeFromString(listenerName) - if err != nil { - // handle error - } -} -``` - -`streamers` contains a mapping of the specific `StreamingService` implementation name to the configuration parameters for that specific service. -`streamers.x.keys` contains the list of `StoreKey` names for the KVStores to expose using this service and is required by every type of `StreamingService`. -In order to expose *all* KVStores, we can include `*` in this list. An empty list is equivalent to turning the service off. - -Additional configuration parameters are optional and specific to the implementation. -In the case of the file streaming service, `streamers.file.write_dir` contains the path to the -directory to write the files to, and `streamers.file.prefix` contains an optional prefix to prepend to the output files to prevent potential collisions -with other App `StreamingService` output files. - -The `ServiceConstructor` accepts `AppOptions`, the store keys collected using `streamers.x.keys`, a `BinaryMarshaller` and -returns a `StreamingService` implementation. The `AppOptions` are passed in to provide access to any implementation specific configuration options, -e.g. in the case of the file streaming service the `streamers.file.write_dir` and `streamers.file.prefix`. - -```go -streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) -if err != nil { - // handler error -} -``` - -The returned `StreamingService` is loaded into the BaseApp using the BaseApp's `SetStreamingService` method. -The `Stream` method is called on the service to begin the streaming process. Depending on the implementation this process -may be synchronous or asynchronous with the message processing of the state machine. - -```go -bApp.SetStreamingService(streamingService) -wg := new(sync.WaitGroup) -quitChan := make(chan struct{}) -streamingService.Stream(wg, quitChan) -``` diff --git a/store/streaming/constructor.go b/store/streaming/constructor.go deleted file mode 100644 index e576f84b83d1..000000000000 --- a/store/streaming/constructor.go +++ /dev/null @@ -1,137 +0,0 @@ -package streaming - -import ( - "fmt" - "strings" - "sync" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - serverTypes "github.com/cosmos/cosmos-sdk/server/types" - "github.com/cosmos/cosmos-sdk/store/streaming/file" - "github.com/cosmos/cosmos-sdk/store/types" - - "github.com/spf13/cast" -) - -// ServiceConstructor is used to construct a streaming service -type ServiceConstructor func(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) - -// ServiceType enum for specifying the type of StreamingService -type ServiceType int - -const ( - Unknown ServiceType = iota - File - // add more in the future -) - -// ServiceTypeFromString returns the streaming.ServiceType corresponding to the provided name -func ServiceTypeFromString(name string) ServiceType { - switch strings.ToLower(name) { - case "file", "f": - return File - default: - return Unknown - } -} - -// String returns the string name of a streaming.ServiceType -func (sst ServiceType) String() string { - switch sst { - case File: - return "file" - default: - return "unknown" - } -} - -// ServiceConstructorLookupTable is a mapping of streaming.ServiceTypes to streaming.ServiceConstructors -var ServiceConstructorLookupTable = map[ServiceType]ServiceConstructor{ - File: NewFileStreamingService, -} - -// NewServiceConstructor returns the streaming.ServiceConstructor corresponding to the provided name -func NewServiceConstructor(name string) (ServiceConstructor, error) { - ssType := ServiceTypeFromString(name) - if ssType == Unknown { - return nil, fmt.Errorf("unrecognized streaming service name %s", name) - } - if constructor, ok := ServiceConstructorLookupTable[ssType]; ok && constructor != nil { - return constructor, nil - } - return nil, fmt.Errorf("streaming service constructor of type %s not found", ssType.String()) -} - -// NewFileStreamingService is the streaming.ServiceConstructor function for creating a FileStreamingService -func NewFileStreamingService(opts serverTypes.AppOptions, keys []types.StoreKey, marshaller codec.BinaryCodec) (baseapp.StreamingService, error) { - filePrefix := cast.ToString(opts.Get("streamers.file.prefix")) - fileDir := cast.ToString(opts.Get("streamers.file.write_dir")) - return file.NewStreamingService(fileDir, filePrefix, keys, marshaller) -} - -// LoadStreamingServices is a function for loading StreamingServices onto the BaseApp using the provided AppOptions, codec, and keys -// It returns the WaitGroup and quit channel used to synchronize with the streaming services and any error that occurs during the setup -func LoadStreamingServices(bApp *baseapp.BaseApp, appOpts serverTypes.AppOptions, appCodec codec.BinaryCodec, keys map[string]*types.KVStoreKey) ([]baseapp.StreamingService, *sync.WaitGroup, error) { - // waitgroup and quit channel for optional shutdown coordination of the streaming service(s) - wg := new(sync.WaitGroup) - // configure state listening capabilities using AppOptions - streamers := cast.ToStringSlice(appOpts.Get("store.streamers")) - activeStreamers := make([]baseapp.StreamingService, 0, len(streamers)) - for _, streamerName := range streamers { - // get the store keys allowed to be exposed for this streaming service - exposeKeyStrs := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("streamers.%s.keys", streamerName))) - var exposeStoreKeys []types.StoreKey - if exposeAll(exposeKeyStrs) { // if list contains `*`, expose all StoreKeys - exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) - for _, storeKey := range keys { - exposeStoreKeys = append(exposeStoreKeys, storeKey) - } - } else { - exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrs)) - for _, keyStr := range exposeKeyStrs { - if storeKey, ok := keys[keyStr]; ok { - exposeStoreKeys = append(exposeStoreKeys, storeKey) - } - } - } - if len(exposeStoreKeys) == 0 { // short circuit if we are not exposing anything - continue - } - // get the constructor for this streamer name - constructor, err := NewServiceConstructor(streamerName) - if err != nil { - // close any services we may have already spun up before hitting the error on this one - for _, activeStreamer := range activeStreamers { - activeStreamer.Close() - } - return nil, nil, err - } - // generate the streaming service using the constructor, appOptions, and the StoreKeys we want to expose - streamingService, err := constructor(appOpts, exposeStoreKeys, appCodec) - if err != nil { - // close any services we may have already spun up before hitting the error on this one - for _, activeStreamer := range activeStreamers { - activeStreamer.Close() - } - return nil, nil, err - } - // register the streaming service with the BaseApp - bApp.SetStreamingService(streamingService) - // kick off the background streaming service loop - streamingService.Stream(wg) - // add to the list of active streamers - activeStreamers = append(activeStreamers, streamingService) - } - // if there are no active streamers, activeStreamers is empty (len == 0) and the waitGroup is not waiting on anything - return activeStreamers, wg, nil -} - -func exposeAll(list []string) bool { - for _, ele := range list { - if ele == "*" { - return true - } - } - return false -} diff --git a/store/streaming/constructor_test.go b/store/streaming/constructor_test.go deleted file mode 100644 index 5f9d58016f68..000000000000 --- a/store/streaming/constructor_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package streaming - -import ( - "testing" - - "github.com/cosmos/cosmos-sdk/codec" - codecTypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/store/streaming/file" - "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/stretchr/testify/require" -) - -type fakeOptions struct{} - -func (f *fakeOptions) Get(string) interface{} { return nil } - -var ( - mockOptions = new(fakeOptions) - mockKeys = []types.StoreKey{sdk.NewKVStoreKey("mockKey1"), sdk.NewKVStoreKey("mockKey2")} - interfaceRegistry = codecTypes.NewInterfaceRegistry() - testMarshaller = codec.NewProtoCodec(interfaceRegistry) -) - -func TestStreamingServiceConstructor(t *testing.T) { - _, err := NewServiceConstructor("unexpectedName") - require.NotNil(t, err) - - constructor, err := NewServiceConstructor("file") - require.Nil(t, err) - var expectedType ServiceConstructor - require.IsType(t, expectedType, constructor) - - serv, err := constructor(mockOptions, mockKeys, testMarshaller) - require.Nil(t, err) - require.IsType(t, &file.StreamingService{}, serv) - listeners := serv.Listeners() - for _, key := range mockKeys { - _, ok := listeners[key] - require.True(t, ok) - } -} diff --git a/store/streaming/file/example_config.toml b/store/streaming/file/example_config.toml deleted file mode 100644 index 8202bd8ef559..000000000000 --- a/store/streaming/file/example_config.toml +++ /dev/null @@ -1,10 +0,0 @@ -[store] - streamers = [ # if len(streamers) > 0 we are streaming - "file", # name of the streaming service, used by constructor - ] - -[streamers] - [streamers.file] - keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - write_dir = "path to the write directory" - prefix = "optional prefix to prepend to the generated file names" diff --git a/store/streaming/file/service.go b/store/streaming/file/service.go deleted file mode 100644 index 02feb403e99b..000000000000 --- a/store/streaming/file/service.go +++ /dev/null @@ -1,279 +0,0 @@ -package file - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path" - "path/filepath" - "sync" - - abci "github.com/tendermint/tendermint/abci/types" - - "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/cosmos/cosmos-sdk/codec" - "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" -) - -var _ baseapp.StreamingService = &StreamingService{} - -// StreamingService is a concrete implementation of StreamingService that writes state changes out to files -type StreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all the WriteListeners write their data out to - filePrefix string // optional prefix for each of the generated files - writeDir string // directory to write files into - codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel to synchronize closure -} - -// IntermediateWriter is used so that we do not need to update the underlying io.Writer -// inside the StoreKVPairWriteListener everytime we begin writing to a new file -type IntermediateWriter struct { - outChan chan<- []byte -} - -// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel -func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { - return &IntermediateWriter{ - outChan: outChan, - } -} - -// Write satisfies io.Writer -func (iw *IntermediateWriter) Write(b []byte) (int, error) { - iw.outChan <- b - return len(b), nil -} - -// NewStreamingService creates a new StreamingService for the provided writeDir, (optional) filePrefix, and storeKeys -func NewStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec) (*StreamingService, error) { - listenChan := make(chan []byte) - iw := NewIntermediateWriter(listenChan) - listener := types.NewStoreKVPairWriteListener(iw, c) - listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) - // in this case, we are using the same listener for each Store - for _, key := range storeKeys { - listeners[key] = append(listeners[key], listener) - } - // check that the writeDir exists and is writeable so that we can catch the error here at initialization if it is not - // we don't open a dstFile until we receive our first ABCI message - if err := isDirWriteable(writeDir); err != nil { - return nil, err - } - return &StreamingService{ - listeners: listeners, - srcChan: listenChan, - filePrefix: filePrefix, - writeDir: writeDir, - codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), - }, nil -} - -// Listeners satisfies the baseapp.StreamingService interface -// It returns the StreamingService's underlying WriteListeners -// Use for registering the underlying WriteListeners with the BaseApp -func (fss *StreamingService) Listeners() map[types.StoreKey][]types.WriteListener { - return fss.listeners -} - -// ListenBeginBlock satisfies the baseapp.ABCIListener interface -// It writes the received BeginBlock request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // generate the new file - dstFile, err := fss.openBeginBlockFile(req) - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openBeginBlockFile(req abci.RequestBeginBlock) (*os.File, error) { - fss.currentBlockNumber = req.GetHeader().Height - fss.currentTxIndex = 0 - fileName := fmt.Sprintf("block-%d-begin", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) -} - -// ListenDeliverTx satisfies the baseapp.ABCIListener interface -// It writes the received DeliverTx request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenDeliverTx(ctx sdk.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error { - // generate the new file - dstFile, err := fss.openDeliverTxFile() - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openDeliverTxFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-tx-%d", fss.currentBlockNumber, fss.currentTxIndex) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - fss.currentTxIndex++ - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) -} - -// ListenEndBlock satisfies the baseapp.ABCIListener interface -// It writes the received EndBlock request and response and the resulting state changes -// out to a file as described in the above the naming schema -func (fss *StreamingService) ListenEndBlock(ctx sdk.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error { - // generate the new file - dstFile, err := fss.openEndBlockFile() - if err != nil { - return err - } - // write req to file - lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - return err - } - // write all state changes cached for this stage to file - fss.stateCacheLock.Lock() - for _, stateChange := range fss.stateCache { - if _, err = dstFile.Write(stateChange); err != nil { - fss.stateCache = nil - fss.stateCacheLock.Unlock() - return err - } - } - // reset cache - fss.stateCache = nil - fss.stateCacheLock.Unlock() - // write res to file - lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) - if err != nil { - return err - } - if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - return err - } - // close file - return dstFile.Close() -} - -func (fss *StreamingService) openEndBlockFile() (*os.File, error) { - fileName := fmt.Sprintf("block-%d-end", fss.currentBlockNumber) - if fss.filePrefix != "" { - fileName = fmt.Sprintf("%s-%s", fss.filePrefix, fileName) - } - return os.OpenFile(filepath.Join(fss.writeDir, fileName), os.O_CREATE|os.O_WRONLY, 0600) -} - -// Stream satisfies the baseapp.StreamingService interface -// It spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs -// and caches them in the order they were received -// returns an error if it is called twice -func (fss *StreamingService) Stream(wg *sync.WaitGroup) error { - if fss.quitChan != nil { - return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") - } - fss.quitChan = make(chan struct{}) - wg.Add(1) - go func() { - defer wg.Done() - for { - select { - case <-fss.quitChan: - fss.quitChan = nil - return - case by := <-fss.srcChan: - fss.stateCacheLock.Lock() - fss.stateCache = append(fss.stateCache, by) - fss.stateCacheLock.Unlock() - } - } - }() - return nil -} - -// Close satisfies the io.Closer interface, which satisfies the baseapp.StreamingService interface -func (fss *StreamingService) Close() error { - close(fss.quitChan) - return nil -} - -// isDirWriteable checks if dir is writable by writing and removing a file -// to dir. It returns nil if dir is writable. -func isDirWriteable(dir string) error { - f := path.Join(dir, ".touch") - if err := ioutil.WriteFile(f, []byte(""), 0600); err != nil { - return err - } - return os.Remove(f) -} diff --git a/store/streaming/file/service_test.go b/store/streaming/file/service_test.go deleted file mode 100644 index 1276b163642d..000000000000 --- a/store/streaming/file/service_test.go +++ /dev/null @@ -1,401 +0,0 @@ -package file - -import ( - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/cosmos/cosmos-sdk/codec" - codecTypes "github.com/cosmos/cosmos-sdk/codec/types" - "github.com/cosmos/cosmos-sdk/store/types" - sdk "github.com/cosmos/cosmos-sdk/types" - - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - types1 "github.com/tendermint/tendermint/proto/tendermint/types" -) - -var ( - interfaceRegistry = codecTypes.NewInterfaceRegistry() - testMarshaller = codec.NewProtoCodec(interfaceRegistry) - testStreamingService *StreamingService - testListener1, testListener2 types.WriteListener - emptyContext = sdk.Context{} - - // test abci message types - mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} - testBeginBlockReq = abci.RequestBeginBlock{ - Header: types1.Header{ - Height: 1, - }, - ByzantineValidators: []abci.Evidence{}, - Hash: mockHash, - LastCommitInfo: abci.LastCommitInfo{ - Round: 1, - Votes: []abci.VoteInfo{}, - }, - } - testBeginBlockRes = abci.ResponseBeginBlock{ - Events: []abci.Event{ - { - Type: "testEventType1", - }, - { - Type: "testEventType2", - }, - }, - } - testEndBlockReq = abci.RequestEndBlock{ - Height: 1, - } - testEndBlockRes = abci.ResponseEndBlock{ - Events: []abci.Event{}, - ConsensusParamUpdates: &types1.ConsensusParams{}, - ValidatorUpdates: []abci.ValidatorUpdate{}, - } - mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} - testDeliverTxReq1 = abci.RequestDeliverTx{ - Tx: mockTxBytes1, - } - mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} - testDeliverTxReq2 = abci.RequestDeliverTx{ - Tx: mockTxBytes2, - } - mockTxResponseData1 = []byte{1, 3, 5, 7, 9} - testDeliverTxRes1 = abci.ResponseDeliverTx{ - Events: []abci.Event{}, - Code: 1, - Codespace: "mockCodeSpace", - Data: mockTxResponseData1, - GasUsed: 2, - GasWanted: 3, - Info: "mockInfo", - Log: "mockLog", - } - mockTxResponseData2 = []byte{1, 3, 5, 7, 9} - testDeliverTxRes2 = abci.ResponseDeliverTx{ - Events: []abci.Event{}, - Code: 1, - Codespace: "mockCodeSpace", - Data: mockTxResponseData2, - GasUsed: 2, - GasWanted: 3, - Info: "mockInfo", - Log: "mockLog", - } - - // mock store keys - mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") - mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") - - // file stuff - testPrefix = "testPrefix" - testDir = "./.test" - - // mock state changes - mockKey1 = []byte{1, 2, 3} - mockValue1 = []byte{3, 2, 1} - mockKey2 = []byte{2, 3, 4} - mockValue2 = []byte{4, 3, 2} - mockKey3 = []byte{3, 4, 5} - mockValue3 = []byte{5, 4, 3} -) - -func TestIntermediateWriter(t *testing.T) { - outChan := make(chan []byte, 0) - iw := NewIntermediateWriter(outChan) - require.IsType(t, &IntermediateWriter{}, iw) - testBytes := []byte{1, 2, 3, 4, 5} - var length int - var err error - waitChan := make(chan struct{}, 0) - go func() { - length, err = iw.Write(testBytes) - waitChan <- struct{}{} - }() - receivedBytes := <-outChan - <-waitChan - require.Equal(t, len(testBytes), length) - require.Equal(t, testBytes, receivedBytes) - require.Nil(t, err) -} - -func TestFileStreamingService(t *testing.T) { - if os.Getenv("CI") != "" { - t.Skip("Skipping TestFileStreamingService in CI environment") - } - err := os.Mkdir(testDir, 0700) - require.Nil(t, err) - defer os.RemoveAll(testDir) - - testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - testStreamingService, err = NewStreamingService(testDir, testPrefix, testKeys, testMarshaller) - require.Nil(t, err) - require.IsType(t, &StreamingService{}, testStreamingService) - require.Equal(t, testPrefix, testStreamingService.filePrefix) - require.Equal(t, testDir, testStreamingService.writeDir) - require.Equal(t, testMarshaller, testStreamingService.codec) - testListener1 = testStreamingService.listeners[mockStoreKey1][0] - testListener2 = testStreamingService.listeners[mockStoreKey2][0] - wg := new(sync.WaitGroup) - testStreamingService.Stream(wg) - testListenBeginBlock(t) - testListenDeliverTx1(t) - testListenDeliverTx2(t) - testListenEndBlock(t) - testStreamingService.Close() - wg.Wait() -} - -func testListenBeginBlock(t *testing.T) { - expectedBeginBlockReqBytes, err := testMarshaller.Marshal(&testBeginBlockReq) - require.Nil(t, err) - expectedBeginBlockResBytes, err := testMarshaller.Marshal(&testBeginBlockRes) - require.Nil(t, err) - - // write state changes - testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) - - // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey1.Name(), - Key: mockKey1, - Value: mockValue1, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey2.Name(), - Key: mockKey2, - Value: mockValue2, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey1.Name(), - Key: mockKey3, - Value: mockValue3, - Delete: false, - }) - require.Nil(t, err) - - // send the ABCI messages - err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) - require.Nil(t, err) - - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-begin", testPrefix, testBeginBlockReq.GetHeader().Height) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedBeginBlockReqBytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedBeginBlockResBytes, segments[4]) -} - -func testListenDeliverTx1(t *testing.T) { - expectedDeliverTxReq1Bytes, err := testMarshaller.Marshal(&testDeliverTxReq1) - require.Nil(t, err) - expectedDeliverTxRes1Bytes, err := testMarshaller.Marshal(&testDeliverTxRes1) - require.Nil(t, err) - - // write state changes - testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) - - // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey1.Name(), - Key: mockKey1, - Value: mockValue1, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey2.Name(), - Key: mockKey2, - Value: mockValue2, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey2.Name(), - Key: mockKey3, - Value: mockValue3, - Delete: false, - }) - require.Nil(t, err) - - // send the ABCI messages - err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) - require.Nil(t, err) - - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 0) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedDeliverTxReq1Bytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedDeliverTxRes1Bytes, segments[4]) -} - -func testListenDeliverTx2(t *testing.T) { - expectedDeliverTxReq2Bytes, err := testMarshaller.Marshal(&testDeliverTxReq2) - require.Nil(t, err) - expectedDeliverTxRes2Bytes, err := testMarshaller.Marshal(&testDeliverTxRes2) - require.Nil(t, err) - - // write state changes - testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) - - // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey2.Name(), - Key: mockKey1, - Value: mockValue1, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey1.Name(), - Key: mockKey2, - Value: mockValue2, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey2.Name(), - Key: mockKey3, - Value: mockValue3, - Delete: false, - }) - require.Nil(t, err) - - // send the ABCI messages - err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) - require.Nil(t, err) - - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-tx-%d", testPrefix, testBeginBlockReq.GetHeader().Height, 1) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedDeliverTxReq2Bytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedDeliverTxRes2Bytes, segments[4]) -} - -func testListenEndBlock(t *testing.T) { - expectedEndBlockReqBytes, err := testMarshaller.Marshal(&testEndBlockReq) - require.Nil(t, err) - expectedEndBlockResBytes, err := testMarshaller.Marshal(&testEndBlockRes) - require.Nil(t, err) - - // write state changes - testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) - testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) - testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) - - // expected KV pairs - expectedKVPair1, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey1.Name(), - Key: mockKey1, - Value: mockValue1, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair2, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey1.Name(), - Key: mockKey2, - Value: mockValue2, - Delete: false, - }) - require.Nil(t, err) - expectedKVPair3, err := testMarshaller.Marshal(&types.StoreKVPair{ - StoreKey: mockStoreKey2.Name(), - Key: mockKey3, - Value: mockValue3, - Delete: false, - }) - require.Nil(t, err) - - // send the ABCI messages - err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) - require.Nil(t, err) - - // load the file, checking that it was created with the expected name - fileName := fmt.Sprintf("%s-block-%d-end", testPrefix, testEndBlockReq.Height) - fileBytes, err := readInFile(fileName) - require.Nil(t, err) - - // segment the file into the separate gRPC messages and check the correctness of each - segments, err := segmentBytes(fileBytes) - require.Nil(t, err) - require.Equal(t, 5, len(segments)) - require.Equal(t, expectedEndBlockReqBytes, segments[0]) - require.Equal(t, expectedKVPair1, segments[1]) - require.Equal(t, expectedKVPair2, segments[2]) - require.Equal(t, expectedKVPair3, segments[3]) - require.Equal(t, expectedEndBlockResBytes, segments[4]) -} - -func readInFile(name string) ([]byte, error) { - path := filepath.Join(testDir, name) - return ioutil.ReadFile(path) -} - -// Returns all of the protobuf messages contained in the byte array as an array of byte arrays -// The messages have their length prefix removed -func segmentBytes(bz []byte) ([][]byte, error) { - var err error - segments := make([][]byte, 0) - for len(bz) > 0 { - var segment []byte - segment, bz, err = getHeadSegment(bz) - if err != nil { - return nil, err - } - segments = append(segments, segment) - } - return segments, nil -} - -// Returns the bytes for the leading protobuf object in the byte array (removing the length prefix) and returns the remainder of the byte array -func getHeadSegment(bz []byte) ([]byte, []byte, error) { - size, prefixSize := binary.Uvarint(bz) - if prefixSize < 0 { - return nil, nil, fmt.Errorf("invalid number of bytes read from length-prefixed encoding: %d", prefixSize) - } - if size > uint64(len(bz)-prefixSize) { - return nil, nil, fmt.Errorf("not enough bytes to read; want: %v, got: %v", size, len(bz)-prefixSize) - } - return bz[prefixSize:(uint64(prefixSize) + size)], bz[uint64(prefixSize)+size:], nil -} From 72f213b081f1ada2f0c4a377fb457ca2b8358536 Mon Sep 17 00:00:00 2001 From: i-norden Date: Mon, 29 Nov 2021 12:21:26 -0600 Subject: [PATCH 09/43] fixes --- simapp/app.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/simapp/app.go b/simapp/app.go index 7f1490c0b60d..c29f7ce0cdce 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -237,7 +237,7 @@ func NewSimApp( pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) if cast.ToBool(appOpts.Get(pluginsOnKey)) { // set the global wait limit for state streaming plugin message receipt acknowledgement - globalWaitLimitKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.GLOBAL_ACK_WAIT_LIMIT_TOML_KEY) + globalWaitLimitKey := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, plugin.GLOBAL_ACK_WAIT_LIMIT_TOML_KEY) globalWaitLimit := cast.ToDuration(appOpts.Get(globalWaitLimitKey)) if globalWaitLimit > 0 { bApp.SetGlobalWaitLimit(globalWaitLimit) @@ -246,23 +246,23 @@ func NewSimApp( // this loads the preloaded and any plugins found in `plugins.dir` pluginLoader, err := loader.NewPluginLoader(appOpts, logger) if err != nil { - // handle error + tmos.Exit(err.Error()) } // initialize the loaded plugins if err := pluginLoader.Initialize(); err != nil { - // handle error + tmos.Exit(err.Error()) } // register the plugin(s) with the BaseApp if err := pluginLoader.Inject(bApp, appCodec, keys); err != nil { - // handle error + tmos.Exit(err.Error()) } // start the plugin services, optionally use wg to synchronize shutdown using io.Closer wg := new(sync.WaitGroup) if err := pluginLoader.Start(wg); err != nil { - // handler error + tmos.Exit(err.Error()) } } From 3879ae0a6e1fd473ca6cae9940bde5937b3b4a04 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 16:32:37 -0600 Subject: [PATCH 10/43] did not port from cherry pick --- baseapp/abci.go | 1 + 1 file changed, 1 insertion(+) diff --git a/baseapp/abci.go b/baseapp/abci.go index 079c150294be..d5db413d6d26 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -8,6 +8,7 @@ import ( "sort" "strings" "syscall" + "time" "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" From 283f2c4329e37b344ae025050b2682fb4ec42a54 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 16:53:24 -0600 Subject: [PATCH 11/43] add trace plugin --- .gitignore | 2 + plugin/loader/preload.go | 6 +- plugin/loader/preload.sh | 3 + plugin/loader/preload_list | 1 + plugin/plugins/trace/README.md | 77 +++++ plugin/plugins/trace/service/service.go | 316 +++++++++++++++++++ plugin/plugins/trace/service/service_test.go | 190 +++++++++++ plugin/plugins/trace/trace.go | 106 +++++++ 8 files changed, 699 insertions(+), 2 deletions(-) mode change 100644 => 100755 plugin/loader/preload.sh create mode 100644 plugin/plugins/trace/README.md create mode 100644 plugin/plugins/trace/service/service.go create mode 100644 plugin/plugins/trace/service/service_test.go create mode 100644 plugin/plugins/trace/trace.go diff --git a/.gitignore b/.gitignore index 146e44fc94f5..695dcd8cfb27 100644 --- a/.gitignore +++ b/.gitignore @@ -45,6 +45,8 @@ vagrant # IDE .idea *.iml +*.ipr +*.iws .dir-locals.el .vscode diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go index 3669b2505465..c1db03b66d66 100644 --- a/plugin/loader/preload.go +++ b/plugin/loader/preload.go @@ -1,7 +1,8 @@ package loader import ( - file "github.com/cosmos/cosmos-sdk/plugin/plugins/file" + pluginfile "github.com/cosmos/cosmos-sdk/plugin/plugins/file" + plugintrace "github.com/cosmos/cosmos-sdk/plugin/plugins/trace" ) // DO NOT EDIT THIS FILE @@ -9,5 +10,6 @@ import ( // To change it, modify the plugin/loader/preload.sh func init() { - Preload(file.Plugins...) + Preload(pluginfile.Plugins...) + Preload(plugintrace.Plugins...) } diff --git a/plugin/loader/preload.sh b/plugin/loader/preload.sh old mode 100644 new mode 100755 index 1cf448152050..52db5a345004 --- a/plugin/loader/preload.sh +++ b/plugin/loader/preload.sh @@ -15,6 +15,7 @@ to_preload() { cat < +- [Running the plugin](#running-the-plugin) +- [Plugin design](#plugin-design) + + +## Running the plugin + +The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload_list` for how to enable and disable default plugins. For lighter unit test run: `./plugin/plugins/kafka/service/service_test.go`. + +1. Copy the content below to `~/app.toml`. + + ``` + # app.toml + + # This is a TOML config file. + # For more information, see https://github.com/toml-lang/toml + + ############################################################################### + ### Base Configuration ### + ############################################################################### + + # Impose a global wait limit threshold for ListenSuccess() messages of external streaming services. (seconds) + # It is recomended to set this higher then the average block commit time. + globalWaitLimit = 30 + + + ############################################################################### + ### Plugin system configuration ### + ############################################################################### + + [plugins] + + # turn the plugin system, as a whole, on or off + on = true + + # list of plugins to disable + disabled = [] + + # The directory to load non-preloaded plugins from; defaults to + dir = "" + + # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName + [plugins.streaming] + + + ############################################################################### + ### Trace Plugin configuration ### + ############################################################################### + + # The specific parameters for the Kafka streaming service plugin + [plugins.streaming.trace] + + # List of store keys we want to expose for this streaming service. + keys = [] + + # Timeout threshold for which a particular block's messages must be delivered to + # external streaming service before signaling back to `app.Commit()` call. + # This threshold is used to synchronize the work between `app.Commit()` and the + # `ABCIListener.ListenSuccess()` call. `ListenSucess()` will allow up to the + # specified threshold for services to complete writing messages. The completion + # is signaled when `ListenEndBlock` has finished writting. + # This value MUST BE less than the 'globalWaitLimit' threshold as not to trigger + # the 'globalWaitLimit' timeout which will halt the app. + deliveredBlockTimeoutSeconds = 2 + ``` + +2. Run `make test-sim-nondeterminism` and wait for the tests to finish. + + +## Plugin design +The plugin is an example implementation of [ADR-038 State Listening](https://docs.cosmos.network/master/architecture/adr-038-state-listening.html) where state change events get logged at `DEBUG` level. diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go new file mode 100644 index 000000000000..a31073c749d2 --- /dev/null +++ b/plugin/plugins/trace/service/service.go @@ -0,0 +1,316 @@ +package service + +import ( + "errors" + "fmt" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/gogo/protobuf/proto" + "sync" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +var _ baseapp.StreamingService = (*TraceStreamingService)(nil) + +// Event message key enum types for listen events. +type Event string +const ( + BeginBlockEvent Event = "BEGIN_BLOCK" + EndBlockEvent = "END_BLOCK" + DeliverTxEvent = "DELIVER_TX" +) + +// EventType message key enum types for the event types. +type EventType string +const ( + RequestEventType EventType = "REQUEST" + ResponseEventType = "RESPONSE" + StateChangeEventType = "STATE_CHANGE" +) + +// LogMsgFmt message output format +const ( + LogMsgFmt = `block_height:%d => event:%s => event_id:%d => event_type:%s => event_type_id:%d` +) + +// TraceStreamingService is a concrete implementation of streaming.Service that writes state changes to log file. +type TraceStreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + successChan chan bool // channel used for signaling success or failure of message delivery to external service + deliveredMessages bool // True if messages were delivered, false otherwise. + deliveredBlockChan chan struct{} // channel used for signaling the delivery of all messages for the current block. + deliverBlockWaitLimit time.Duration // the time to wait for service to deliver current block messages before timing out. + printDataToStdout bool // Print types.StoreKVPair data stored in each event to stdout. +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener +// everytime we begin writing +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewTraceStreamingService creates a new TraceStreamingService for the provided +// storeKeys, BinaryCodec and deliverBlockWaitLimit (in milliseconds) +func NewTraceStreamingService( + storeKeys []types.StoreKey, + c codec.BinaryCodec, + deliverBlockWaitLimit time.Duration, + printDataToStdout bool, +) (*TraceStreamingService, error) { + successChan := make(chan bool, 1) + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + + tss := &TraceStreamingService{ + listeners: listeners, + srcChan: listenChan, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + successChan: successChan, + deliveredMessages: true, + deliverBlockWaitLimit: deliverBlockWaitLimit, + printDataToStdout: printDataToStdout, + } + + return tss, nil +} + +// Listeners returns the TraceStreamingService's underlying WriteListeners, use for registering them with the BaseApp +func (tss *TraceStreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return tss.listeners +} + +// ListenBeginBlock satisfies the Hook interface +// It writes out the received BeginBlockEvent request and response and the resulting state changes to the log +func (tss *TraceStreamingService) ListenBeginBlock( + ctx sdk.Context, + req abci.RequestBeginBlock, + res abci.ResponseBeginBlock, +) error { + tss.setBeginBlock(req) + eventId := int64(1) + eventTypeId := 1 + + // write req + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, BeginBlockEvent, eventId, RequestEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &req); err != nil { + return err + } + + // write state changes + if err := tss.writeStateChange(ctx, string(BeginBlockEvent), eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, EndBlockEvent, 1, ResponseEventType, 1) + if err := tss.writeEventReqRes(ctx, key, &res); err != nil { + return err + } + + return nil +} + +func (tss *TraceStreamingService) setBeginBlock(req abci.RequestBeginBlock) { + tss.currentBlockNumber = req.GetHeader().Height + tss.currentTxIndex = 0 + tss.deliveredBlockChan = make(chan struct{}) + tss.deliveredMessages = true // Reset to true. Will be set to false when delivery of any message fails. +} + +// ListenDeliverTx satisfies the Hook interface +// It writes out the received DeliverTxEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (tss *TraceStreamingService) ListenDeliverTx( + ctx sdk.Context, + req abci.RequestDeliverTx, + res abci.ResponseDeliverTx, +) error { + eventId := tss.getDeliverTxId() + eventTypeId := 1 + + // write req + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, DeliverTxEvent, eventId, RequestEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &req); err != nil { + return err + } + + // write state changes + if err := tss.writeStateChange(ctx, DeliverTxEvent, eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, DeliverTxEvent, eventId, ResponseEventType, 1) + if err := tss.writeEventReqRes(ctx, key, &res); err != nil { + return err + } + + return nil +} + +func (tss *TraceStreamingService) getDeliverTxId() int64 { + tss.currentTxIndex++ + return tss.currentTxIndex +} + +// ListenEndBlock satisfies the Hook interface +// It writes out the received EndBlockEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (tss *TraceStreamingService) ListenEndBlock( + ctx sdk.Context, + req abci.RequestEndBlock, + res abci.ResponseEndBlock, +) error { + eventId := int64(1) + eventTypeId := 1 + + // write req + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, EndBlockEvent, eventId, RequestEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &req); err != nil { + return err + } + + // write state changes + if err := tss.writeStateChange(ctx, EndBlockEvent, eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, EndBlockEvent, eventId, ResponseEventType, eventTypeId) + if err := tss.writeEventReqRes(ctx, key, &res); err != nil { + return err + } + + // Acknowledge that the EndBlockEvent request, response and state changes have been written + close(tss.deliveredBlockChan) + return nil +} + +// ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service +// after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt. +// For fire-and-forget model, set the chan to always be `true`: +// +// func (tss *TraceStreamingService) ListenSuccess() <-chan bool { +// tss.successChan <- true +// return tss.successChan +// } +func (tss *TraceStreamingService) ListenSuccess() <-chan bool { + // Synchronize the work between app.Commit() and message writes for the current block. + // Wait until ListenEndBlock() is finished or timeout is reached before responding back. + var deliveredBlock bool + maxWait := time.NewTicker(tss.deliverBlockWaitLimit) + defer maxWait.Stop() + loop: + for { + select { + case <-tss.deliveredBlockChan: + deliveredBlock = true + break loop + case <-maxWait.C: + deliveredBlock = false + break loop + } + } + + if deliveredBlock == false { + tss.deliveredMessages = false + } + + tss.successChan <- tss.deliveredMessages + return tss.successChan +} + +// Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received +// Do we need this and an intermediate writer? We could just write directly to the buffer on calls to Write +// But then we don't support a Stream interface, which could be needed for other types of streamers +func (tss *TraceStreamingService) Stream(wg *sync.WaitGroup) error { + if tss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + tss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-tss.quitChan: + return + case by := <-tss.srcChan: + tss.stateCacheLock.Lock() + tss.stateCache = append(tss.stateCache, by) + tss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface +func (tss *TraceStreamingService) Close() error { + close(tss.quitChan) + return nil +} + +func (tss *TraceStreamingService) writeStateChange(ctx sdk.Context, event string, eventId int64) error { + // write all state changes cached for this stage + tss.stateCacheLock.Lock() + kodec := tss.codec.(*codec.ProtoCodec) + kvPair := new(types.StoreKVPair) + for i, stateChange := range tss.stateCache { + key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) + if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + return err + } + if err := tss.writeEventReqRes(ctx, key, kvPair); err != nil { + return err + } + } + + // reset cache + tss.stateCache = nil + tss.stateCacheLock.Unlock() + + return nil +} + +func (tss *TraceStreamingService) writeEventReqRes(ctx sdk.Context, key string, data proto.Message) error { + var m = fmt.Sprintf("%v => data:omitted", key) + if tss.printDataToStdout { + m = fmt.Sprintf("%v => data:%v", key, data) + } + ctx.Logger().Debug(m) + return nil +} \ No newline at end of file diff --git a/plugin/plugins/trace/service/service_test.go b/plugin/plugins/trace/service/service_test.go new file mode 100644 index 000000000000..65e6e6c7b83c --- /dev/null +++ b/plugin/plugins/trace/service/service_test.go @@ -0,0 +1,190 @@ +package service + +import ( + "github.com/tendermint/tendermint/libs/log" + "sync" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" +) + +var ( + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) + testStreamingService *TraceStreamingService + testListener1, testListener2 types.WriteListener + emptyContext = sdk.Context{} + loggerContext sdk.Context + + // test abci message types + mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + testBeginBlockReq = abci.RequestBeginBlock{ + Header: types1.Header{ + Height: 1, + }, + ByzantineValidators: []abci.Evidence{}, + Hash: mockHash, + LastCommitInfo: abci.LastCommitInfo{ + Round: 1, + Votes: []abci.VoteInfo{}, + }, + } + testBeginBlockRes = abci.ResponseBeginBlock{ + Events: []abci.Event{ + { + Type: "testEventType1", + }, + { + Type: "testEventType2", + }, + }, + } + testEndBlockReq = abci.RequestEndBlock{ + Height: 1, + } + testEndBlockRes = abci.ResponseEndBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &types1.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + } + mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} + testDeliverTxReq1 = abci.RequestDeliverTx{ + Tx: mockTxBytes1, + } + mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} + testDeliverTxReq2 = abci.RequestDeliverTx{ + Tx: mockTxBytes2, + } + mockTxResponseData1 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes1 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData1, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + mockTxResponseData2 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes2 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData2, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + + // mock store keys + mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") + mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") + + // mock state changes + mockKey1 = []byte{1, 2, 3} + mockValue1 = []byte{3, 2, 1} + mockKey2 = []byte{2, 3, 4} + mockValue2 = []byte{4, 3, 2} + mockKey3 = []byte{3, 4, 5} + mockValue3 = []byte{5, 4, 3} + + // maximum amount of time ListenSuccess() will wait receipt + // that all current block messages were delivered to the service. + deliverBlockWaitLimit = time.Duration(1000) + + // print event data in stdout + printDataToStdout = true +) + +func TestIntermediateWriter(t *testing.T) { + outChan := make(chan []byte, 0) + iw := NewIntermediateWriter(outChan) + require.IsType(t, &IntermediateWriter{}, iw) + testBytes := []byte{1, 2, 3, 4, 5} + var length int + var err error + waitChan := make(chan struct{}, 0) + go func() { + length, err = iw.Write(testBytes) + waitChan <- struct{}{} + }() + receivedBytes := <-outChan + <-waitChan + require.Equal(t, len(testBytes), length) + require.Equal(t, testBytes, receivedBytes) + require.Nil(t, err) +} + +func TestKafkaStreamingService(t *testing.T) { + loggerContext = emptyContext.WithLogger(log.TestingLogger()) + testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} + tss, err := NewTraceStreamingService(testKeys, testMarshaller, deliverBlockWaitLimit, printDataToStdout) + testStreamingService = tss + require.Nil(t, err) + require.IsType(t, &TraceStreamingService{}, testStreamingService) + require.Equal(t, testMarshaller, testStreamingService.codec) + testListener1 = testStreamingService.listeners[mockStoreKey1][0] + testListener2 = testStreamingService.listeners[mockStoreKey2][0] + wg := new(sync.WaitGroup) + testStreamingService.Stream(wg) + testListenBeginBlock(t) + testListenDeliverTx1(t) + testListenDeliverTx2(t) + testListenEndBlock(t) + testStreamingService.Close() + wg.Wait() +} + +func testListenBeginBlock(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenBeginBlock(loggerContext, testBeginBlockReq, testBeginBlockRes) + require.Nil(t, err) +} + +func testListenDeliverTx1(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenDeliverTx(loggerContext, testDeliverTxReq1, testDeliverTxRes1) + require.Nil(t, err) +} + +func testListenDeliverTx2(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenDeliverTx(loggerContext, testDeliverTxReq2, testDeliverTxRes2) + require.Nil(t, err) +} + +func testListenEndBlock(t *testing.T) { + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // send the ABCI messages + err := testStreamingService.ListenEndBlock(loggerContext, testEndBlockReq, testEndBlockRes) + require.Nil(t, err) +} diff --git a/plugin/plugins/trace/trace.go b/plugin/plugins/trace/trace.go new file mode 100644 index 000000000000..5dc06def4fe8 --- /dev/null +++ b/plugin/plugins/trace/trace.go @@ -0,0 +1,106 @@ +package file + +import ( + "fmt" + "sync" + + "github.com/spf13/cast" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/plugins/trace/service" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// Plugin name and version +const ( + // PLUGIN_NAME is the name for this streaming service plugin + PLUGIN_NAME = "trace" + + // PLUGIN_VERSION is the version for this streaming service plugin + PLUGIN_VERSION = "0.0.1" +) + +// TOML configuration parameter keys +const ( + // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service + KEYS_PARAM = "keys" + + // DELIVER_BLOCK_WAIT_LIMIT_PARAM is the timeout setting used in the implementation of ABCIListener.ListenSuccess() + DELIVER_BLOCK_WAIT_LIMIT_PARAM = "deliver_block_wait_limit" + + PRINT_DATA_TO_STDOUT_PARAM = "print_data_to_stdout" +) + +// Plugins is the exported symbol for loading this plugin +var Plugins = []plugin.Plugin{ + &streamingServicePlugin{}, +} + +type streamingServicePlugin struct { + tss *service.TraceStreamingService + opts serverTypes.AppOptions +} + +var _ plugin.StateStreamingPlugin = (*streamingServicePlugin)(nil) + +// Name satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Name() string { + return PLUGIN_NAME +} + +// Version satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Version() string { + return PLUGIN_VERSION +} + +// Init satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { + ssp.opts = env + return nil +} + +// Register satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error { + // load all the params required for this plugin from the provided AppOptions + deliverBlockWaitLimit := cast.ToDuration(ssp.opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME, DELIVER_BLOCK_WAIT_LIMIT_PARAM))) + printDataToStdout := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME, PRINT_DATA_TO_STDOUT_PARAM))) + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME, KEYS_PARAM))) + var exposeStoreKeys []types.StoreKey + + if len(exposeKeyStrings) > 0 { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrings)) + for _, keyStr := range exposeKeyStrings { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } else { // if none are specified, we expose all the keys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + + var err error + ssp.tss, err = service.NewTraceStreamingService(exposeStoreKeys, marshaller, deliverBlockWaitLimit, printDataToStdout) + if err != nil { + return err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(ssp.tss) + return nil +} + +// Start satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Start(wg *sync.WaitGroup) error { + return ssp.tss.Stream(wg) +} + +// Close satisfies io.Closer +func (ssp *streamingServicePlugin) Close() error { + return ssp.tss.Close() +} From 6d1944c18a6db58b8a90511db71346b1ed4a4226 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 17:22:36 -0600 Subject: [PATCH 12/43] update readme --- plugin/plugins/trace/README.md | 84 ++++++++++++++++------------------ 1 file changed, 39 insertions(+), 45 deletions(-) diff --git a/plugin/plugins/trace/README.md b/plugin/plugins/trace/README.md index 8dd0f59bbebc..754c0db04f39 100644 --- a/plugin/plugins/trace/README.md +++ b/plugin/plugins/trace/README.md @@ -15,59 +15,53 @@ The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload 1. Copy the content below to `~/app.toml`. - ``` - # app.toml + ``` + # app.toml - # This is a TOML config file. - # For more information, see https://github.com/toml-lang/toml - - ############################################################################### - ### Base Configuration ### - ############################################################################### - - # Impose a global wait limit threshold for ListenSuccess() messages of external streaming services. (seconds) - # It is recomended to set this higher then the average block commit time. - globalWaitLimit = 30 - - - ############################################################################### - ### Plugin system configuration ### - ############################################################################### - - [plugins] - - # turn the plugin system, as a whole, on or off - on = true + ... - # list of plugins to disable - disabled = [] + ############################################################################### + ### Plugin system configuration ### + ############################################################################### - # The directory to load non-preloaded plugins from; defaults to - dir = "" + [plugins] - # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName - [plugins.streaming] + # turn the plugin system, as a whole, on or off + on = true + # list of plugins to disable + disabled = [] - ############################################################################### - ### Trace Plugin configuration ### - ############################################################################### + # The directory to load non-preloaded plugins from; defaults to + dir = "" - # The specific parameters for the Kafka streaming service plugin - [plugins.streaming.trace] + # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName + [plugins.streaming] - # List of store keys we want to expose for this streaming service. - keys = [] - - # Timeout threshold for which a particular block's messages must be delivered to - # external streaming service before signaling back to `app.Commit()` call. - # This threshold is used to synchronize the work between `app.Commit()` and the - # `ABCIListener.ListenSuccess()` call. `ListenSucess()` will allow up to the - # specified threshold for services to complete writing messages. The completion - # is signaled when `ListenEndBlock` has finished writting. - # This value MUST BE less than the 'globalWaitLimit' threshold as not to trigger - # the 'globalWaitLimit' timeout which will halt the app. - deliveredBlockTimeoutSeconds = 2 + ############################################################################### + ### Trace Plugin configuration ### + ############################################################################### + + # The specific parameters for the Kafka streaming service plugin + [plugins.streaming.trace] + + # List of store keys we want to expose for this streaming service. + keys = [] + + # Timeout threshold for which a particular block's messages must be delivered to + # external streaming service before signaling back to the `ack` channel. + # If the `ack` is set to `false` this setting will be ignored. + # Note: This setting MUST be less then `plugins.global_ack_wait_limit`. + # Otherwise, the application will halt without committing blocks. + # In milliseconds. + deliver_block_wait_limit = 2000 + + # In addition to block event info, print the data to stdout as well. + print_data_to_stdout = false + + # whether to operate in fire-and-forget or success/failure acknowledgement mode + # false == fire-and-forget; true == sends a message receipt success/fail signal + ack = "false" ``` 2. Run `make test-sim-nondeterminism` and wait for the tests to finish. From 98240f5e6f776feac131ed3342da5369b4b3c0f6 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 20:44:17 -0600 Subject: [PATCH 13/43] updates trace plugin --- baseapp/abci.go | 1 + plugin/example_config.toml | 70 ++++++++++++++++++++----- plugin/plugins/trace/README.md | 10 +--- plugin/plugins/trace/service/service.go | 58 ++++++++------------ plugin/plugins/trace/trace.go | 22 +++++--- 5 files changed, 97 insertions(+), 64 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index d5db413d6d26..571d63084f14 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -343,6 +343,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { // but the BaseApp also imposes a global wait limit if app.globalWaitLimit > 0 { maxWait := time.NewTicker(app.globalWaitLimit) + defer maxWait.Stop() for _, lis := range app.abciListeners { select { case success := <-lis.ListenSuccess(): diff --git a/plugin/example_config.toml b/plugin/example_config.toml index 2ae13b9fd2ee..9fa53604f386 100644 --- a/plugin/example_config.toml +++ b/plugin/example_config.toml @@ -1,13 +1,59 @@ +############################################################################### +### Plugin system configuration ### +############################################################################### + [plugins] - on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] - dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" - [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName - # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services - # in milliseconds - global_ack_wait_limit = 500 - [plugins.streaming.file] # the specific parameters for the file streaming service plugin - keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] - write_dir = "path to the write directory" - prefix = "optional prefix to prepend to the generated file names" - ack = "false" # false == fire-and-forget; true == sends a message receipt success/fail signal + +# turn the plugin system, as a whole, on or off +on = true + +# List of plugin names to enable from the plugin/plugins/* +disabled = [] + +# The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins +dir = "" + +# a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName +[plugins.streaming] + +# maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services +# in milliseconds +global_ack_wait_limit = 5000 + +############################################################################### +### File plugin configuration ### +############################################################################### + +# the specific parameters for the file streaming service plugin +[plugins.streaming.file] + +# List of store keys to expose to this streaming service. +# Leaving this blank will include all store keys. +keys = [] + +# Path to the write directory +write_dir = "" + +# Optional prefix to prepend to the generated file names +prefix = "" + +# whether to operate in fire-and-forget or success/failure acknowledgement mode +# false == fire-and-forget; true == sends a message receipt success/fail signal +ack = "false" + +############################################################################### +### Trace Plugin configuration ### +############################################################################### + +# The specific parameters for the Kafka streaming service plugin +[plugins.streaming.trace] + +# List of store keys we want to expose for this streaming service. +keys = [] + +# In addition to block event info, print the data to stdout as well. +print_data_to_stdout = false + +# whether to operate in fire-and-forget or success/failure acknowledgement mode +# false == fire-and-forget; true == sends a message receipt success/fail signal +ack = "false" diff --git a/plugin/plugins/trace/README.md b/plugin/plugins/trace/README.md index 754c0db04f39..09e51fadbe9b 100644 --- a/plugin/plugins/trace/README.md +++ b/plugin/plugins/trace/README.md @@ -48,21 +48,13 @@ The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload # List of store keys we want to expose for this streaming service. keys = [] - # Timeout threshold for which a particular block's messages must be delivered to - # external streaming service before signaling back to the `ack` channel. - # If the `ack` is set to `false` this setting will be ignored. - # Note: This setting MUST be less then `plugins.global_ack_wait_limit`. - # Otherwise, the application will halt without committing blocks. - # In milliseconds. - deliver_block_wait_limit = 2000 - # In addition to block event info, print the data to stdout as well. print_data_to_stdout = false # whether to operate in fire-and-forget or success/failure acknowledgement mode # false == fire-and-forget; true == sends a message receipt success/fail signal ack = "false" - ``` + ``` 2. Run `make test-sim-nondeterminism` and wait for the tests to finish. diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go index a31073c749d2..073a38a8a45f 100644 --- a/plugin/plugins/trace/service/service.go +++ b/plugin/plugins/trace/service/service.go @@ -5,10 +5,8 @@ import ( "fmt" "github.com/cosmos/cosmos-sdk/baseapp" "github.com/gogo/protobuf/proto" - "sync" - "time" - abci "github.com/tendermint/tendermint/abci/types" + "sync" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/types" @@ -49,10 +47,10 @@ type TraceStreamingService struct { currentTxIndex int64 // the index of the current tx quitChan chan struct{} // channel used for synchronize closure successChan chan bool // channel used for signaling success or failure of message delivery to external service - deliveredMessages bool // True if messages were delivered, false otherwise. - deliveredBlockChan chan struct{} // channel used for signaling the delivery of all messages for the current block. - deliverBlockWaitLimit time.Duration // the time to wait for service to deliver current block messages before timing out. printDataToStdout bool // Print types.StoreKVPair data stored in each event to stdout. + ack bool // true == fire-and-forget; false == sends success/failure signal + ackStatus bool // success/failure status to be sent to ackChan + ackChan chan bool // channel used to send a success/failure signal } // IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener @@ -79,8 +77,8 @@ func (iw *IntermediateWriter) Write(b []byte) (int, error) { func NewTraceStreamingService( storeKeys []types.StoreKey, c codec.BinaryCodec, - deliverBlockWaitLimit time.Duration, printDataToStdout bool, + ack bool, ) (*TraceStreamingService, error) { successChan := make(chan bool, 1) listenChan := make(chan []byte) @@ -99,9 +97,9 @@ func NewTraceStreamingService( stateCache: make([][]byte, 0), stateCacheLock: new(sync.Mutex), successChan: successChan, - deliveredMessages: true, - deliverBlockWaitLimit: deliverBlockWaitLimit, printDataToStdout: printDataToStdout, + ack: ack, + ackChan: make(chan bool), } return tss, nil @@ -145,9 +143,9 @@ func (tss *TraceStreamingService) ListenBeginBlock( func (tss *TraceStreamingService) setBeginBlock(req abci.RequestBeginBlock) { tss.currentBlockNumber = req.GetHeader().Height + // reset on new block tss.currentTxIndex = 0 - tss.deliveredBlockChan = make(chan struct{}) - tss.deliveredMessages = true // Reset to true. Will be set to false when delivery of any message fails. + tss.ackStatus = true } // ListenDeliverTx satisfies the Hook interface @@ -214,8 +212,6 @@ func (tss *TraceStreamingService) ListenEndBlock( return err } - // Acknowledge that the EndBlockEvent request, response and state changes have been written - close(tss.deliveredBlockChan) return nil } @@ -228,29 +224,19 @@ func (tss *TraceStreamingService) ListenEndBlock( // return tss.successChan // } func (tss *TraceStreamingService) ListenSuccess() <-chan bool { - // Synchronize the work between app.Commit() and message writes for the current block. - // Wait until ListenEndBlock() is finished or timeout is reached before responding back. - var deliveredBlock bool - maxWait := time.NewTicker(tss.deliverBlockWaitLimit) - defer maxWait.Stop() - loop: - for { - select { - case <-tss.deliveredBlockChan: - deliveredBlock = true - break loop - case <-maxWait.C: - deliveredBlock = false - break loop - } - } - - if deliveredBlock == false { - tss.deliveredMessages = false + // if we are operating in fire-and-forget mode, immediately send a "success" signal + if !tss.ack { + go func() { + tss.ackChan <- true + }() + } else { + go func() { + // the TraceStreamingService operating synchronously, but this will signify whether an error occurred + // during it's processing cycle + tss.ackChan <- tss.ackStatus + }() } - - tss.successChan <- tss.deliveredMessages - return tss.successChan + return tss.ackChan } // Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received @@ -292,9 +278,11 @@ func (tss *TraceStreamingService) writeStateChange(ctx sdk.Context, event string for i, stateChange := range tss.stateCache { key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + tss.ackStatus = false return err } if err := tss.writeEventReqRes(ctx, key, kvPair); err != nil { + tss.ackStatus = false return err } } diff --git a/plugin/plugins/trace/trace.go b/plugin/plugins/trace/trace.go index 5dc06def4fe8..2c55ca1aa20e 100644 --- a/plugin/plugins/trace/trace.go +++ b/plugin/plugins/trace/trace.go @@ -28,10 +28,10 @@ const ( // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service KEYS_PARAM = "keys" - // DELIVER_BLOCK_WAIT_LIMIT_PARAM is the timeout setting used in the implementation of ABCIListener.ListenSuccess() - DELIVER_BLOCK_WAIT_LIMIT_PARAM = "deliver_block_wait_limit" - PRINT_DATA_TO_STDOUT_PARAM = "print_data_to_stdout" + + // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode + ACK_MODE = "ack" ) // Plugins is the exported symbol for loading this plugin @@ -63,12 +63,18 @@ func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { } // Register satisfies the plugin.StateStreamingPlugin interface -func (ssp *streamingServicePlugin) Register(bApp *baseapp.BaseApp, marshaller codec.BinaryCodec, keys map[string]*types.KVStoreKey) error { +func (ssp *streamingServicePlugin) Register( + bApp *baseapp.BaseApp, + marshaller codec.BinaryCodec, + keys map[string]*types.KVStoreKey, +) error { // load all the params required for this plugin from the provided AppOptions - deliverBlockWaitLimit := cast.ToDuration(ssp.opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME, DELIVER_BLOCK_WAIT_LIMIT_PARAM))) - printDataToStdout := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME, PRINT_DATA_TO_STDOUT_PARAM))) + tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) + printDataToStdout := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRINT_DATA_TO_STDOUT_PARAM))) + ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) + // get the store keys allowed to be exposed for this streaming service - exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME, KEYS_PARAM))) + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) var exposeStoreKeys []types.StoreKey if len(exposeKeyStrings) > 0 { @@ -86,7 +92,7 @@ func (ssp *streamingServicePlugin) Register(bApp *baseapp.BaseApp, marshaller co } var err error - ssp.tss, err = service.NewTraceStreamingService(exposeStoreKeys, marshaller, deliverBlockWaitLimit, printDataToStdout) + ssp.tss, err = service.NewTraceStreamingService(exposeStoreKeys, marshaller, printDataToStdout, ack) if err != nil { return err } From 2421b33843af5d9909e895042f7cf8cea7adea7b Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 20:50:12 -0600 Subject: [PATCH 14/43] fix trace test --- plugin/plugins/trace/service/service_test.go | 18 ++++++++---------- 1 file changed, 8 insertions(+), 10 deletions(-) diff --git a/plugin/plugins/trace/service/service_test.go b/plugin/plugins/trace/service/service_test.go index 65e6e6c7b83c..86f6e31881e2 100644 --- a/plugin/plugins/trace/service/service_test.go +++ b/plugin/plugins/trace/service/service_test.go @@ -1,15 +1,13 @@ package service import ( - "github.com/tendermint/tendermint/libs/log" - "sync" - "testing" - "time" - "github.com/cosmos/cosmos-sdk/codec" codecTypes "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/log" + "sync" + "testing" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" @@ -98,12 +96,12 @@ var ( mockKey3 = []byte{3, 4, 5} mockValue3 = []byte{5, 4, 3} - // maximum amount of time ListenSuccess() will wait receipt - // that all current block messages were delivered to the service. - deliverBlockWaitLimit = time.Duration(1000) - // print event data in stdout printDataToStdout = true + + // false == fire-and-forget; true == sends a message receipt success/fail signal + ack = false + ) func TestIntermediateWriter(t *testing.T) { @@ -128,7 +126,7 @@ func TestIntermediateWriter(t *testing.T) { func TestKafkaStreamingService(t *testing.T) { loggerContext = emptyContext.WithLogger(log.TestingLogger()) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - tss, err := NewTraceStreamingService(testKeys, testMarshaller, deliverBlockWaitLimit, printDataToStdout) + tss, err := NewTraceStreamingService(testKeys, testMarshaller, printDataToStdout, ack) testStreamingService = tss require.Nil(t, err) require.IsType(t, &TraceStreamingService{}, testStreamingService) From 07aaf169703b72032f810845dccd2dba6781cc5d Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 20:54:10 -0600 Subject: [PATCH 15/43] remove unused params --- plugin/plugins/trace/service/service.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go index 073a38a8a45f..0d9d008d7746 100644 --- a/plugin/plugins/trace/service/service.go +++ b/plugin/plugins/trace/service/service.go @@ -46,7 +46,6 @@ type TraceStreamingService struct { currentBlockNumber int64 // the current block number currentTxIndex int64 // the index of the current tx quitChan chan struct{} // channel used for synchronize closure - successChan chan bool // channel used for signaling success or failure of message delivery to external service printDataToStdout bool // Print types.StoreKVPair data stored in each event to stdout. ack bool // true == fire-and-forget; false == sends success/failure signal ackStatus bool // success/failure status to be sent to ackChan @@ -80,7 +79,6 @@ func NewTraceStreamingService( printDataToStdout bool, ack bool, ) (*TraceStreamingService, error) { - successChan := make(chan bool, 1) listenChan := make(chan []byte) iw := NewIntermediateWriter(listenChan) listener := types.NewStoreKVPairWriteListener(iw, c) @@ -96,7 +94,6 @@ func NewTraceStreamingService( codec: c, stateCache: make([][]byte, 0), stateCacheLock: new(sync.Mutex), - successChan: successChan, printDataToStdout: printDataToStdout, ack: ack, ackChan: make(chan bool), @@ -218,11 +215,6 @@ func (tss *TraceStreamingService) ListenEndBlock( // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt. // For fire-and-forget model, set the chan to always be `true`: -// -// func (tss *TraceStreamingService) ListenSuccess() <-chan bool { -// tss.successChan <- true -// return tss.successChan -// } func (tss *TraceStreamingService) ListenSuccess() <-chan bool { // if we are operating in fire-and-forget mode, immediately send a "success" signal if !tss.ack { From 70b3a08bf2aec429c30fc911df5c28a7d18f8890 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 21:49:43 -0600 Subject: [PATCH 16/43] add kafka plugin --- go.mod | 1 + go.sum | 45 +- plugin/example_config.toml | 44 ++ plugin/loader/preload.go | 2 + plugin/loader/preload_list | 1 + plugin/plugins/kafka/README.md | 317 +++++++++++ plugin/plugins/kafka/docker-compose.yml | 178 ++++++ plugin/plugins/kafka/kafka.go | 154 +++++ plugin/plugins/kafka/service/service.go | 371 ++++++++++++ plugin/plugins/kafka/service/service_test.go | 564 +++++++++++++++++++ 10 files changed, 1634 insertions(+), 43 deletions(-) create mode 100644 plugin/plugins/kafka/README.md create mode 100644 plugin/plugins/kafka/docker-compose.yml create mode 100644 plugin/plugins/kafka/kafka.go create mode 100644 plugin/plugins/kafka/service/service.go create mode 100644 plugin/plugins/kafka/service/service_test.go diff --git a/go.mod b/go.mod index 62e25f28ea8d..5471f3adeac0 100644 --- a/go.mod +++ b/go.mod @@ -10,6 +10,7 @@ require ( github.com/cockroachdb/apd/v2 v2.0.2 github.com/coinbase/rosetta-sdk-go v0.7.8 github.com/confio/ics23/go v0.7.0 + github.com/confluentinc/confluent-kafka-go v1.8.2 github.com/cosmos/btcutil v1.0.4 github.com/cosmos/cosmos-proto v1.0.0-alpha7 github.com/cosmos/cosmos-sdk/api v0.1.0 diff --git a/go.sum b/go.sum index 0cafef3401a8..109483b982fd 100644 --- a/go.sum +++ b/go.sum @@ -221,8 +221,9 @@ github.com/coinbase/rosetta-sdk-go v0.7.8 h1:op/O3/ZngTfcrZnp3p/TziRfKGdo7AUZGUm github.com/coinbase/rosetta-sdk-go v0.7.8/go.mod h1:vB6hZ0ZnZmln3ThA4x0mZvOAPDJ5BhfgnjH76hxoy10= github.com/confio/ics23/go v0.7.0 h1:00d2kukk7sPoHWL4zZBZwzxnpA2pec1NPdwbSokJ5w8= github.com/confio/ics23/go v0.7.0/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4uraToOaKg= +github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E= +github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= -github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -289,9 +290,7 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -318,12 +317,9 @@ github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= -github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= -github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -334,7 +330,6 @@ github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4 github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= @@ -348,9 +343,7 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -369,11 +362,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -393,11 +383,8 @@ github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2 github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -483,16 +470,12 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= @@ -607,7 +590,6 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -668,7 +650,6 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= @@ -684,7 +665,6 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -720,7 +700,6 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.5.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= @@ -730,7 +709,6 @@ github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554 h1:nDOkLO7klmnEw1 github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554/go.mod h1:9+Pb2/tg1PvEgW7aFx4bFhDE4bvbI03zuJ8kb7nJ9Jc= github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= @@ -810,11 +788,9 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= @@ -826,7 +802,6 @@ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2 github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= @@ -846,13 +821,11 @@ github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1t github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= @@ -869,7 +842,6 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= -github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= @@ -879,14 +851,10 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= -github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= @@ -900,7 +868,6 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= -github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -909,7 +876,6 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -1042,7 +1008,6 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= @@ -1054,7 +1019,6 @@ github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4l github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1093,7 +1057,6 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1142,10 +1105,8 @@ github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+l github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1847,7 +1808,6 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1861,7 +1821,6 @@ gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= diff --git a/plugin/example_config.toml b/plugin/example_config.toml index 9fa53604f386..101dc2ce0949 100644 --- a/plugin/example_config.toml +++ b/plugin/example_config.toml @@ -57,3 +57,47 @@ print_data_to_stdout = false # whether to operate in fire-and-forget or success/failure acknowledgement mode # false == fire-and-forget; true == sends a message receipt success/fail signal ack = "false" + +############################################################################### +### Kafka Plugin configuration ### +############################################################################### + +# The specific parameters for the Kafka streaming service plugin +[plugins.streaming.kafka] + +# List of store keys we want to expose for this streaming service. +keys = [] + +# Optional topic prefix for the topic(s) where data will be stored +topic_prefix = "block" + +# Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) +flush_timeout_ms = 1500 + +# whether to operate in fire-and-forget or success/failure acknowledgement mode +# false == fire-and-forget; true == sends a message receipt success/fail signal +ack = "false" + +# Producer configuration properties. +# The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. +# For a full list of producer configuration properties +# see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md +[plugins.streaming.kafka.producer] + +# Initial list of brokers as a comma seperated list of broker host or host:port[, host:port[,...]] +bootstrap_servers = "localhost:9092" + +# Client identifier +client_id = "my-app-id" + +# This field indicates the number of acknowledgements the leader +# broker must receive from ISR brokers before responding to the request +acks = "all" + +# When set to true, the producer will ensure that messages +# are successfully produced exactly once and in the original produce order. +# The following configuration properties are adjusted automatically (if not modified by the user) +# when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), +# retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. +# Producer instantation will fail if user-supplied configuration is incompatible. +enable_idempotence = true diff --git a/plugin/loader/preload.go b/plugin/loader/preload.go index c1db03b66d66..c801954b61c7 100644 --- a/plugin/loader/preload.go +++ b/plugin/loader/preload.go @@ -2,6 +2,7 @@ package loader import ( pluginfile "github.com/cosmos/cosmos-sdk/plugin/plugins/file" + pluginkafka "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka" plugintrace "github.com/cosmos/cosmos-sdk/plugin/plugins/trace" ) @@ -11,5 +12,6 @@ import ( func init() { Preload(pluginfile.Plugins...) + Preload(pluginkafka.Plugins...) Preload(plugintrace.Plugins...) } diff --git a/plugin/loader/preload_list b/plugin/loader/preload_list index 50083006a4e7..79741e5f097a 100644 --- a/plugin/loader/preload_list +++ b/plugin/loader/preload_list @@ -4,4 +4,5 @@ # name go-path number of the sub-plugin or * file github.com/cosmos/cosmos-sdk/plugin/plugins/file * +kafka github.com/cosmos/cosmos-sdk/plugin/plugins/kafka * trace github.com/cosmos/cosmos-sdk/plugin/plugins/trace * diff --git a/plugin/plugins/kafka/README.md b/plugin/plugins/kafka/README.md new file mode 100644 index 000000000000..e82ee49a6961 --- /dev/null +++ b/plugin/plugins/kafka/README.md @@ -0,0 +1,317 @@ +# Kafka Indexing Plugin + +This plugin demonstrates how to listen to state changes of individual `KVStores` as described in [ADR-038 State Listening](https://github.com/vulcanize/cosmos-sdk/blob/adr038_plugin_proposal/docs/architecture/adr-038-state-listening.md) and index the data in Kafka. + + + + + - [Dependencies](#dependencies) + - [Running the plugin](#running-the-plugin) + - [Plugin design](#plugin-design) + - [Channel-Based producer](#channel-based-producer) + - [Delivery Report handler](#delivery-report-handler) + - [Message serde](#message-serde) + - [Confluent Platform](#confluent-platform) + - [Docker](#docker) + - [Schema Registry](#schema-registry) + - [KSQL examples](#ksql-examples) + + + +## Dependencies + +To test and run the examples, you must have `docker` and `docker-compose` installed on your system. Use the links below for installation instructions. + +* [Docker](https://www.docker.com/get-started) +* [Docker Compose] + + +## Running the plugin + +The plugin has been hooked up to run with `test-sim-nondeterminism` task. For a lighter test you can run `./plugin/plugins/kafka/service/service_test.go`. The [KSQ examples](#ksql-examples) below will work with both test scenarios. + +1. Spin up the docker images of the Confluent Platform following the instructions in the [Confluent Platform](#confluent-platform) section. Once the docker images are up and running you'll be able to access the platform on [localhost:9021](localhost:9021). +2. Copy the content below to `~/app.toml`. + + ``` + # app.toml + + ... + + ############################################################################### + ### Plugin system configuration ### + ############################################################################### + + [plugins] + + # turn the plugin system, as a whole, on or off + on = true + + # List of plugin names to disable + disabled = ["file", "trace"] + + # The directory to load non-preloaded plugins from; defaults to ./plugin/plugins + dir = "" + + # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName + [plugins.streaming] + + # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services + # in milliseconds + global_ack_wait_limit = 500 + + ############################################################################### + ### Kafka Plugin configuration ### + ############################################################################### + + # The specific parameters for the Kafka streaming service plugin + [plugins.streaming.kafka] + + # List of store keys we want to expose for this streaming service. + keys = [] + + # Optional topic prefix for the topic(s) where data will be stored + topic_prefix = "block" + + # Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) + flush_timeout_ms = 1500 + + # whether to operate in fire-and-forget or success/failure acknowledgement mode + # false == fire-and-forget; true == sends a message receipt success/fail signal + ack = "false" + + # Producer configuration properties. + # The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. + # For a full list of producer configuration properties + # see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md + [plugins.streaming.kafka.producer] + + # Initial list of brokers as a comma seperated list of broker host or host:port[, host:port[,...]] + bootstrap_servers = "localhost:9092" + + # Client identifier + client_id = "my-app-id" + + # This field indicates the number of acknowledgements the leader + # broker must receive from ISR brokers before responding to the request + acks = "all" + + # When set to true, the producer will ensure that messages + # are successfully produced exactly once and in the original produce order. + # The following configuration properties are adjusted automatically (if not modified by the user) + # when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), + # retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. + # Producer instantation will fail if user-supplied configuration is incompatible. + enable_idempotence = true + ``` +3. Run `make test-sim-nondeterminism` and wait for the tests to finish. +4. Go to the [KSQ examples](#ksql-examples) section and go through the examples. + + +## Plugin design +The plugin was build using [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go); a lightwieght wrapper around [librdkafka](https://github.com/edenhill/librdkafka). + +This particular implmentation uses: +* `Channel-Based producer` - Faster than the function-based `producer.Produce()`. +* `Delivery reports handler` - Notifies the application of success or failure to deliver messages to Kafka. + +### Channel-Based producer +The plugin uses the `producer.Producerchannel()` to deliver messages to Kafka. + + +Pros: +* Proper channel backpressure if `librdkafka`'s internal queue is full. The queue size can be controlled by setting. +* Message order is preserved (guaranteed by the producer API). +* Faster than the `function-based` async producer. + +Cons: +* Double queueing: messages are first queued in the channel and the inside librdkafka. the Size of the channel is configurable via `queue.buffering.max.messages`. + +### Delivery Report handler +Producing is an asynchronous operation. Therefore, the client notifies the application (per-message) of success or failure through delivery reports. Deliver reports are by default emmitted on the `producer.Events()` channel as `*kafka.Message`. One needs to check `msg.TopicPartition.Error` for `nil` to find out if the message was successfully delivered or not. + +Pros: +* Can be used to propagate success or failures to the application. +* Can be used to track the messages produced. +* Can be turned off by setting `"go.delivery.reports": false` for a fire-and-forget scenario. + +Cons: +* Must be handled in a go routine which makes it difficult to propagate errors to the `WriterListner.onWrite()`. + +### Message serde + +As of this writing there is no `golang` support for `serialization/deserialization` of proto messages for the Confluent Schema Registry. Because of this limitiation, the Marshalled JSON data is saved instead. + +Note, you can register the proto messages with the schema registry by generating the `Java` code and using the supported [Java](https://github.com/confluentinc/schema-registry/blob/master/protobuf-serializer/src/main/java/io/confluent/kafka/serializers/protobuf/KafkaProtobufSerializer.java) client library for the schema registry to automatically register the proto messages. + +#### Message `key` +To be able to identify an track messages in Kafka the `key` is made up of the following properties: +* `block height` - BIGINT +* `event` - BEGIN_BLOCK, END_BLOCK, DELIVER_TX +* `event_id` - BIGINT (increments for DELIVER_TX) +* `event_type` - REQUEST, RESPONSE, STATE_CHANGE +* `event_type_id` - BIGINT (increments for STATE_CHANGE) + +Example: +``` +// first tx +{ + "block_height": 1, + "event": "DELIVER_TX", + "event_id": 1, + "event_type": "REQUEST", + "event_type_id ": 1 +} + +// second tx +{ + "block_height": 1, + "event": "DELIVER_TX", + "event_id": 2, // incrementing + "event_type": "REQUEST", + "event_type_id ": 1 +} +``` + +#### Message `value` + +The `value` structure is the Marshalled JSON structure of the request, response or the state change for begin block, end block, and deliver tx events. + +Example: +``` +{ + "BLOCK_HEIGHT": 1, + "EVENT": "BEGIN_BLOCK", + "EVENT_ID": 1, + "EVENT_TYPE": "STATE_CHANGE", + "EVENT_TYPE_ID": 1, + "STORE_KEY": "mockStore1", + "DELETE": false, + "KEY": "AQID", + "VALUE": "AwIB" +} +``` + +## Confluent Platform + +### Docker + +Spin up Confluent Platform. +``` +cd .../cosmos-sdk/plugin/plugins/kafka/docker-compose.yml +``` + +``` +docker-compose up -d +Creating network "kafka_default" with the default driver +Creating zookeeper ... done +Creating broker ... done +Creating schema-registry ... done +Creating rest-proxy ... done +Creating connect ... done +Creating ksqldb-server ... done +Creating ksql-datagen ... done +Creating ksqldb-cli ... done +Creating control-center ... done +``` + +Check status +``` +docker-compose ps + Name Command State Ports +--------------------------------------------------------------------------------------------------------- +broker /etc/confluent/docker/run Up 0.0.0.0:9092->9092/tcp, 0.0.0.0:9101->9101/tcp +connect /etc/confluent/docker/run Up 0.0.0.0:8083->8083/tcp, 9092/tcp +control-center /etc/confluent/docker/run Up 0.0.0.0:9021->9021/tcp +ksql-datagen bash -c echo Waiting for K ... Up +ksqldb-cli /bin/sh Up +ksqldb-server /etc/confluent/docker/run Up 0.0.0.0:8088->8088/tcp +rest-proxy /etc/confluent/docker/run Up 0.0.0.0:8082->8082/tcp +schema-registry /etc/confluent/docker/run Up 0.0.0.0:8081->8081/tcp +zookeeper /etc/confluent/docker/run Up 0.0.0.0:2181->2181/tcp, 2888/tcp, 3888/tcp +``` + + + +### Schema Registry + +Because `golang` lacks support to be able to register Protobuf messages with the schema registry, one needs to generate the Java code from the proto messages and use the [KafkaProtobufSerializer.java](https://github.com/confluentinc/schema-registry/blob/master/protobuf-serializer/src/main/java/io/confluent/kafka/serializers/protobuf/KafkaProtobufSerializer.java) to automatically register them. The Java libraries make this process exctreamly easy. Take a look [here](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html) fro an example of how this is achived. + + +### KSQL examples + +One huge advante of using Kafka with the Confluent Platform is the KSQL streaming engine. KSQL allows us to be able to write queries and create streams or tables from one or multiple Kafka topics (through joins) without having to write any code. + +Examples: + +Create a structured stream from the `block-state-change` topic containig raw data. This will make it easier to be able to fitler out specific events. +``` +CREATE OR REPLACE STREAM state_change_stream ( + block_height BIGINT KEY, /* k1 */ + event STRING KEY, /* k2 */ + event_id BIGINT KEY, /* k3 */ + event_type STRING KEY, /* k4 */ + event_type_id BIGINT KEY, /* k5 */ + store_key STRING, + `delete` BOOLEAN, + key STRING, + value STRING /* this may be a STRUC depending on the store type */ +) WITH (KAFKA_TOPIC='block-state-change', KEY_FORMAT='JSON', VALUE_FORMAT='JSON'); +``` + +Run the below query to see the messages in of this new stream. + +``` +SELECT * FROM state_change_stream EMIT CHANGES LIMIT 1; +``` + +Result: +``` +{ + "BLOCK_HEIGHT": 1, + "EVENT": "BEGIN_BLOCK", + "EVENT_ID": 1, + "EVENT_TYPE": "STATE_CHANGE", + "EVENT_TYPE_ID": 1, + "STORE_KEY": "mockStore1", + "delete": false, + "KEY": "AQID", + "VALUE": "AwIB" +} +``` + +Lets take it one step further and create a stream that contains only `DELIVER_TX` events. + +``` +SET 'processing.guarantee' = 'exactly_once'; + +CREATE OR REPLACE STREAM deliver_tx_state_change_stream + AS SELECT * + FROM STATE_CHANGE_STREAM + WHERE event = 'DELIVER_TX' + EMIT CHANGES; +``` + +Lets take a look at what the data looks like. + +``` +SELECT * FROM deliver_tx_state_change_stream EMIT CHANGES LIMIT 1; +``` + +Result: + +``` +{ + "BLOCK_HEIGHT": 2, + "EVENT": "BEGIN_BLOCK", + "EVENT_ID": 1, + "EVENT_TYPE": "STATE_CHANGE", + "EVENT_TYPE_ID": 1, + "STORE_KEY": "acc", + "delete": false, + "KEY": "AQBhNv4khMI7PylvV6i1lSlSCleL", + "VALUE": "CiAvY29zbW9zLmF1dGgudjFiZXRhMS5CYXNlQWNjb3VudBJ8Ci1jb3Ntb3MxcXBzbmRsM3lzbnByazBlZmRhdDYzZHY0OTlmcTU0dXR0eWdncGsSRgofL2Nvc21vcy5jcnlwdG8uc2VjcDI1NmsxLlB1YktleRIjCiECcyIkZHE6G+gkK2TJEjko3LjNFgZ4Fmfu90jDkjlbojcYygEgAQ==" +} +``` + +Check out the [docs](https://docs.ksqldb.io/en/latest/) and this [post](https://www.confluent.io/blog/ksqldb-0-15-reads-more-message-keys-supports-more-data-types/) for more complex examples and a deeper understanding of KSQL. diff --git a/plugin/plugins/kafka/docker-compose.yml b/plugin/plugins/kafka/docker-compose.yml new file mode 100644 index 000000000000..017fe9a4d621 --- /dev/null +++ b/plugin/plugins/kafka/docker-compose.yml @@ -0,0 +1,178 @@ +--- +version: '2' +services: + zookeeper: + image: confluentinc/cp-zookeeper:7.0.0 + hostname: zookeeper + container_name: zookeeper + ports: + - "2181:2181" + environment: + ZOOKEEPER_CLIENT_PORT: 2181 + ZOOKEEPER_TICK_TIME: 2000 + + broker: + image: confluentinc/cp-server:7.0.0 + hostname: broker + container_name: broker + depends_on: + - zookeeper + ports: + - "9092:9092" + - "9101:9101" + environment: + KAFKA_BROKER_ID: 1 + KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' + KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT + KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://broker:29092,PLAINTEXT_HOST://localhost:9092 + KAFKA_METRIC_REPORTERS: io.confluent.metrics.reporter.ConfluentMetricsReporter + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 + KAFKA_CONFLUENT_LICENSE_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_CONFLUENT_BALANCER_TOPIC_REPLICATION_FACTOR: 1 + KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 + KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 + KAFKA_JMX_PORT: 9101 + KAFKA_JMX_HOSTNAME: localhost + KAFKA_CONFLUENT_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + CONFLUENT_METRICS_REPORTER_BOOTSTRAP_SERVERS: broker:29092 + CONFLUENT_METRICS_REPORTER_TOPIC_REPLICAS: 1 + CONFLUENT_METRICS_ENABLE: 'true' + CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' + + schema-registry: + image: confluentinc/cp-schema-registry:7.0.0 + hostname: schema-registry + container_name: schema-registry + depends_on: + - broker + ports: + - "8081:8081" + environment: + SCHEMA_REGISTRY_HOST_NAME: schema-registry + SCHEMA_REGISTRY_KAFKASTORE_BOOTSTRAP_SERVERS: 'broker:29092' + SCHEMA_REGISTRY_LISTENERS: http://0.0.0.0:8081 + + connect: + image: cnfldemos/cp-server-connect-datagen:0.5.0-6.2.0 + hostname: connect + container_name: connect + depends_on: + - broker + - schema-registry + ports: + - "8083:8083" + environment: + CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' + CONNECT_REST_ADVERTISED_HOST_NAME: connect + CONNECT_REST_PORT: 8083 + CONNECT_GROUP_ID: compose-connect-group + CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs + CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_OFFSET_FLUSH_INTERVAL_MS: 10000 + CONNECT_OFFSET_STORAGE_TOPIC: docker-connect-offsets + CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_STATUS_STORAGE_TOPIC: docker-connect-status + CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: 1 + CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter + CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter + CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 + # CLASSPATH required due to CC-2422 + CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.0.0.jar + CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components" + CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR + + control-center: + image: confluentinc/cp-enterprise-control-center:7.0.0 + hostname: control-center + container_name: control-center + depends_on: + - broker + - schema-registry + - connect + - ksqldb-server + ports: + - "9021:9021" + environment: + CONTROL_CENTER_BOOTSTRAP_SERVERS: 'broker:29092' + CONTROL_CENTER_CONNECT_CONNECT-DEFAULT_CLUSTER: 'connect:8083' + CONTROL_CENTER_KSQL_KSQLDB1_URL: "http://ksqldb-server:8088" + CONTROL_CENTER_KSQL_KSQLDB1_ADVERTISED_URL: "http://localhost:8088" + CONTROL_CENTER_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + CONTROL_CENTER_REPLICATION_FACTOR: 1 + CONTROL_CENTER_INTERNAL_TOPICS_PARTITIONS: 1 + CONTROL_CENTER_MONITORING_INTERCEPTOR_TOPIC_PARTITIONS: 1 + CONFLUENT_METRICS_TOPIC_REPLICATION: 1 + PORT: 9021 + + ksqldb-server: + image: confluentinc/cp-ksqldb-server:7.0.0 + hostname: ksqldb-server + container_name: ksqldb-server + depends_on: + - broker + - connect + ports: + - "8088:8088" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + KSQL_BOOTSTRAP_SERVERS: "broker:29092" + KSQL_HOST_NAME: ksqldb-server + KSQL_LISTENERS: "http://0.0.0.0:8088" + KSQL_CACHE_MAX_BYTES_BUFFERING: 0 + KSQL_KSQL_SCHEMA_REGISTRY_URL: "http://schema-registry:8081" + KSQL_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" + KSQL_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" + KSQL_KSQL_CONNECT_URL: "http://connect:8083" + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_REPLICATION_FACTOR: 1 + KSQL_KSQL_LOGGING_PROCESSING_TOPIC_AUTO_CREATE: 'true' + KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' + + ksqldb-cli: + image: confluentinc/cp-ksqldb-cli:7.0.0 + container_name: ksqldb-cli + depends_on: + - broker + - connect + - ksqldb-server + entrypoint: /bin/sh + tty: true + + ksql-datagen: + image: confluentinc/ksqldb-examples:7.0.0 + hostname: ksql-datagen + container_name: ksql-datagen + depends_on: + - ksqldb-server + - broker + - schema-registry + - connect + command: "bash -c 'echo Waiting for Kafka to be ready... && \ + cub kafka-ready -b broker:29092 1 40 && \ + echo Waiting for Confluent Schema Registry to be ready... && \ + cub sr-ready schema-registry 8081 40 && \ + echo Waiting a few seconds for topic creation to finish... && \ + sleep 11 && \ + tail -f /dev/null'" + environment: + KSQL_CONFIG_DIR: "/etc/ksql" + STREAMS_BOOTSTRAP_SERVERS: broker:29092 + STREAMS_SCHEMA_REGISTRY_HOST: schema-registry + STREAMS_SCHEMA_REGISTRY_PORT: 8081 + + rest-proxy: + image: confluentinc/cp-kafka-rest:7.0.0 + depends_on: + - broker + - schema-registry + ports: + - 8082:8082 + hostname: rest-proxy + container_name: rest-proxy + environment: + KAFKA_REST_HOST_NAME: rest-proxy + KAFKA_REST_BOOTSTRAP_SERVERS: 'broker:29092' + KAFKA_REST_LISTENERS: "http://0.0.0.0:8082" + KAFKA_REST_SCHEMA_REGISTRY_URL: 'http://schema-registry:8081' \ No newline at end of file diff --git a/plugin/plugins/kafka/kafka.go b/plugin/plugins/kafka/kafka.go new file mode 100644 index 000000000000..714b61178869 --- /dev/null +++ b/plugin/plugins/kafka/kafka.go @@ -0,0 +1,154 @@ +package file + +import ( + "errors" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "strings" + "sync" + + "github.com/spf13/cast" + + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service" + serverTypes "github.com/cosmos/cosmos-sdk/server/types" + "github.com/cosmos/cosmos-sdk/store/types" +) + +// Plugin name and version +const ( + // PLUGIN_NAME is the name for this streaming service plugin + PLUGIN_NAME = "kafka" + + // PLUGIN_VERSION is the version for this streaming service plugin + PLUGIN_VERSION = "0.0.1" +) + +// TOML configuration parameter keys +const ( + // TOPIC_PREFIX_PARAM is the Kafka topic where events will be streamed to + TOPIC_PREFIX_PARAM = "topic_prefix" + + // FLUSH_TIMEOUT_MS_PARAM is the timeout setting passed to the producer.Flush(timeoutMs) + FLUSH_TIMEOUT_MS_PARAM = "flush_timeout_ms" + + // PRODUCER_CONFIG_PARAM is a map of the Kafka Producer configuration properties + PRODUCER_CONFIG_PARAM = "producer" + + // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service + KEYS_PARAM = "keys" + + // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode + ACK_MODE = "ack" +) + +// Plugins is the exported symbol for loading this plugin +var Plugins = []plugin.Plugin{ + &streamingServicePlugin{}, +} + +type streamingServicePlugin struct { + kss *service.KafkaStreamingService + opts serverTypes.AppOptions +} + +var _ plugin.StateStreamingPlugin = (*streamingServicePlugin)(nil) + +// Name satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Name() string { + return PLUGIN_NAME +} + +// Version satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Version() string { + return PLUGIN_VERSION +} + +// Init satisfies the plugin.Plugin interface +func (ssp *streamingServicePlugin) Init(env serverTypes.AppOptions) error { + ssp.opts = env + return nil +} + +// Register satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Register( + bApp *baseapp.BaseApp, + marshaller codec.BinaryCodec, + keys map[string]*types.KVStoreKey, +) error { + // load all the params required for this plugin from the provided AppOptions + tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) + topicPrefix := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, TOPIC_PREFIX_PARAM))) + flushTimeoutMs := cast.ToInt(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, FLUSH_TIMEOUT_MS_PARAM))) + ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) + producerConfig := cast.ToStringMap(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM))) + // get the store keys allowed to be exposed for this streaming service + exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) + var exposeStoreKeys []types.StoreKey + + if len(exposeKeyStrings) > 0 { + exposeStoreKeys = make([]types.StoreKey, 0, len(exposeKeyStrings)) + for _, keyStr := range exposeKeyStrings { + if storeKey, ok := keys[keyStr]; ok { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + } else { // if none are specified, we expose all the keys + exposeStoreKeys = make([]types.StoreKey, 0, len(keys)) + for _, storeKey := range keys { + exposeStoreKeys = append(exposeStoreKeys, storeKey) + } + } + + // Validate minimum producer config properties + producerConfigKey := fmt.Sprintf("%s.%s.%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM) + + if len(producerConfig) == 0 { + m := fmt.Sprintf("Failed to register plugin. Empty properties for '%s': " + + "client will not be able to connect to Kafka cluster", producerConfigKey) + return errors.New(m) + } else { + bootstrapServers := cast.ToString(producerConfig["bootstrap_servers"]) + if len(bootstrapServers) == 0 { + m := fmt.Sprintf("Failed to register plugin. No \"%s.%s\" configured:" + + " client will not be able to connect to Kafka cluster", producerConfigKey, "bootstrap_servers") + return errors.New(m) + } + if strings.TrimSpace(bootstrapServers) == "" { + m := fmt.Sprintf("Failed to register plugin. Empty \"%s.%s\" configured:" + + " client will not be able to connect to Kafka cluster", producerConfigKey, "bootstrap_servers") + return errors.New(m) + } + } + + // load producer config into a kafka.ConfigMap + producerConfigMap := kafka.ConfigMap{} + for key, element := range producerConfig { + key = strings.ReplaceAll(key, "_", ".") + if err := producerConfigMap.SetKey(key, element); err != nil { + return err + } + } + + var err error + ssp.kss, err = service.NewKafkaStreamingService( + bApp.Logger(), producerConfigMap, topicPrefix, flushTimeoutMs, exposeStoreKeys, marshaller, ack) + if err != nil { + return err + } + // register the streaming service with the BaseApp + bApp.SetStreamingService(ssp.kss) + return nil +} + +// Start satisfies the plugin.StateStreamingPlugin interface +func (ssp *streamingServicePlugin) Start(wg *sync.WaitGroup) error { + return ssp.kss.Stream(wg) +} + +// Close satisfies io.Closer +func (ssp *streamingServicePlugin) Close() error { + return ssp.kss.Close() +} diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go new file mode 100644 index 000000000000..0dc7441c0002 --- /dev/null +++ b/plugin/plugins/kafka/service/service.go @@ -0,0 +1,371 @@ +package service + +import ( + "errors" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/cosmos/cosmos-sdk/baseapp" + "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + "sync" + + "github.com/cosmos/cosmos-sdk/codec" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" +) + +/* +This service writes all messages to a single topicPrefix with only one partition to maintain the order of messages. + +The naming schema and data format for the messages this service writes out to Kafka is as such: + +After every `BeginBlockEvent` request a new message key prefix is created with the name `block-{N}-begin`, where N is the block number. +All subsequent state changes are written out to this topicPrefix until the first `DeliverTxEvent` request is received. At the head of these files, +the length-prefixed protobuf encoded `BeginBlockEvent` request is written, and the response is written at the tail. + +After every `DeliverTxEvent` request a new file is created with the name `block-{N}-tx-{M}` where N is the block number and M +is the tx number in the block (i.e. 0, 1, 2...). All subsequent state changes are written out to this file until the next +`DeliverTxEvent` request is received or an `EndBlockEvent` request is received. At the head of these files, the length-prefixed protobuf +encoded `DeliverTxEvent` request is written, and the response is written at the tail. + +After every `EndBlockEvent` request a new file is created with the name `block-{N}-end`, where N is the block number. All +subsequent state changes are written out to this file until the next `BeginBlockEvent` request is received. At the head of these files, +the length-prefixed protobuf encoded `EndBlockEvent` request is written, and the response is written at the tail. +*/ + +// Event Kafka message key enum types for listen events. +type Event string +const ( + BeginBlockEvent Event = "BEGIN_BLOCK" + EndBlockEvent = "END_BLOCK" + DeliverTxEvent = "DELIVER_TX" +) + +// EventType Kafka message key enum types for the event types. +type EventType string +const ( + RequestEventType EventType = "REQUEST" + ResponseEventType = "RESPONSE" + StateChangeEventType = "STATE_CHANGE" +) + +// EventTypeValueTypeTopic Kafka topic name enum types +type EventTypeValueTypeTopic string +const ( + BeginBlockReqTopic EventTypeValueTypeTopic = "begin-block-req" + BeginBlockResTopic = "begin-block-res" + EndBlockReqTopic = "end-block-req" + EndBlockResTopic = "end-block-res" + DeliverTxReqTopic = "deliver-tx-req" + DeliverTxResTopic = "deliver-tx-res" + StateChangeTopic = "state-change" +) + +// MsgKeyFtm Kafka message composite key format enum types +const ( + MsgKeyFtm = `{"block_height":%d,"event":"%s","event_id":%d,"event_type":"%s","event_type_id":%d}` +) + +var _ baseapp.StreamingService = (*KafkaStreamingService)(nil) + +// KafkaStreamingService is a concrete implementation of streaming.Service that writes state changes out to Kafka +type KafkaStreamingService struct { + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + topicPrefix string // topicPrefix prefix name + producer *kafka.Producer // the producer instance that will be used to send messages to Kafka + flushTimeoutMs int // the time to wait for outstanding messages and requests to complete delivery (milliseconds) + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + ack bool // true == fire-and-forget; false == sends success/failure signal + ackStatus bool // success/failure status to be sent to ackChan + ackChan chan bool // channel used to send a success/failure signal +} + +// IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener +// everytime we begin writing to Kafka topic(s) +type IntermediateWriter struct { + outChan chan<- []byte +} + +// NewIntermediateWriter create an instance of an intermediateWriter that sends to the provided channel +func NewIntermediateWriter(outChan chan<- []byte) *IntermediateWriter { + return &IntermediateWriter{ + outChan: outChan, + } +} + +// Write satisfies io.Writer +func (iw *IntermediateWriter) Write(b []byte) (int, error) { + iw.outChan <- b + return len(b), nil +} + +// NewKafkaStreamingService creates a new KafkaStreamingService +func NewKafkaStreamingService( + logger log.Logger, + producerConfig kafka.ConfigMap, + topicPrefix string, + flushTimeoutMs int, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + ack bool, +) (*KafkaStreamingService, error) { + listenChan := make(chan []byte) + iw := NewIntermediateWriter(listenChan) + listener := types.NewStoreKVPairWriteListener(iw, c) + listeners := make(map[types.StoreKey][]types.WriteListener, len(storeKeys)) + // in this case, we are using the same listener for each Store + for _, key := range storeKeys { + listeners[key] = append(listeners[key], listener) + } + // Initialize the producer and connect to Kafka cluster + p, err := kafka.NewProducer(&producerConfig) + if err != nil { + return nil, err + } + + logger.Debug("Created Producer: ", "producer", p) + + kss := &KafkaStreamingService{ + listeners: listeners, + srcChan: listenChan, + topicPrefix: topicPrefix, + producer: p, + flushTimeoutMs: flushTimeoutMs, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + ack: ack, + ackChan: make (chan bool), + } + + go func() { + for e := range p.Events() { + switch ev := e.(type) { + case *kafka.Message: + pTopic := ev.TopicPartition.Topic + partition := ev.TopicPartition.Partition + offset := ev.TopicPartition.Offset + key := string(ev.Key) + if err := ev.TopicPartition.Error; err != nil { + logger.Debug("Delivery failed: ", "topic", pTopic, "partition", partition, "key", key, "err", err) + kss.ackStatus = false + } else { + logger.Debug("Delivered message:", "topic", pTopic, "partition", partition, "offset", offset, "key", key) + } + } + } + }() + + return kss, nil +} + +// Listeners returns the KafkaStreamingService's underlying WriteListeners, use for registering them with the BaseApp +func (kss *KafkaStreamingService) Listeners() map[types.StoreKey][]types.WriteListener { + return kss.listeners +} + +// ListenBeginBlock satisfies the Hook interface +// It writes out the received BeginBlockEvent request and response and the resulting state changes out to a Kafka topicPrefix +// as described in the above the naming schema +func (kss *KafkaStreamingService) ListenBeginBlock( + ctx sdk.Context, + req abci.RequestBeginBlock, + res abci.ResponseBeginBlock, +) error { + kss.setBeginBlock(req) + eventId := int64(1) + eventTypeId := 1 + + // write req + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, BeginBlockEvent, eventId, RequestEventType, eventTypeId) + if err := kss.writeAsJsonToKafka(ctx, string(BeginBlockReqTopic), key, &req); err != nil { + return err + } + + // write state changes + if err := kss.writeStateChange(ctx, string(BeginBlockEvent), eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, EndBlockEvent, 1, ResponseEventType, 1) + if err := kss.writeAsJsonToKafka(ctx, BeginBlockResTopic, key, &res); err != nil { + return err + } + + return nil +} + +func (kss *KafkaStreamingService) setBeginBlock(req abci.RequestBeginBlock) { + kss.currentBlockNumber = req.GetHeader().Height + kss.currentTxIndex = 0 + kss.ackStatus = true +} + +// ListenDeliverTx satisfies the Hook interface +// It writes out the received DeliverTxEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (kss *KafkaStreamingService) ListenDeliverTx( + ctx sdk.Context, + req abci.RequestDeliverTx, + res abci.ResponseDeliverTx, +) error { + eventId := kss.getDeliverTxId() + eventTypeId := 1 + + // write req + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, DeliverTxEvent, eventId, RequestEventType, eventTypeId) + if err := kss.writeAsJsonToKafka(ctx, DeliverTxReqTopic, key, &req); err != nil { + return err + } + + // write state changes + if err := kss.writeStateChange(ctx, DeliverTxEvent, eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, DeliverTxEvent, eventId, ResponseEventType, 1) + if err := kss.writeAsJsonToKafka(ctx, DeliverTxResTopic, key, &res); err != nil { + return err + } + + return nil +} + +func (kss *KafkaStreamingService) getDeliverTxId() int64 { + kss.currentTxIndex++ + return kss.currentTxIndex +} + +// ListenEndBlock satisfies the Hook interface +// It writes out the received EndBlockEvent request and response and the resulting state changes out to a file as described +// in the above the naming schema +func (kss *KafkaStreamingService) ListenEndBlock( + ctx sdk.Context, + req abci.RequestEndBlock, + res abci.ResponseEndBlock, +) error { + eventId := int64(1) + eventTypeId := 1 + + // write req + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, EndBlockEvent, eventId, RequestEventType, eventTypeId) + if err := kss.writeAsJsonToKafka(ctx, EndBlockReqTopic, key, &req); err != nil { + return err + } + + // write state changes + if err := kss.writeStateChange(ctx, EndBlockEvent, eventId); err != nil { + return err + } + + // write res + key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, EndBlockEvent, eventId, ResponseEventType, eventTypeId) + if err := kss.writeAsJsonToKafka(ctx, EndBlockResTopic, key, &res); err != nil { + return err + } + + return nil +} + +// ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service +// after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt +func (kss *KafkaStreamingService) ListenSuccess() <-chan bool { + // if we are operating in fire-and-forget mode, immediately send a "success" signal + if !kss.ack { + go func() { + kss.ackChan <- true + }() + } else { + go func() { + // the KafkaStreamingService operating synchronously, but this will signify whether an error occurred + // during it's processing cycle + kss.ackChan <- kss.ackStatus + }() + } + return kss.ackChan +} + +// Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received +// Do we need this and an intermediate writer? We could just write directly to the buffer on calls to Write +// But then we don't support a Stream interface, which could be needed for other types of streamers +func (kss *KafkaStreamingService) Stream(wg *sync.WaitGroup) error { + if kss.quitChan != nil { + return errors.New("`Stream` has already been called. The stream needs to be closed before it can be started again") + } + kss.quitChan = make(chan struct{}) + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-kss.quitChan: + return + case by := <-kss.srcChan: + kss.stateCacheLock.Lock() + kss.stateCache = append(kss.stateCache, by) + kss.stateCacheLock.Unlock() + } + } + }() + return nil +} + +// Close satisfies the io.Closer interface +func (kss *KafkaStreamingService) Close() error { + close(kss.quitChan) + kss.producer.Flush(kss.flushTimeoutMs) + kss.producer.Close() + return nil +} + +func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event string, eventId int64) error { + // write all state changes cached for this stage to Kafka + kss.stateCacheLock.Lock() + kodec := kss.codec.(*codec.ProtoCodec) + kvPair := new(types.StoreKVPair) + for i, stateChange := range kss.stateCache { + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) + if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + kss.ackStatus = false + return err + } + if err := kss.writeAsJsonToKafka(ctx, StateChangeTopic, key, kvPair); err != nil { + kss.ackStatus = false + return err + } + } + + // reset cache + kss.stateCache = nil + kss.stateCacheLock.Unlock() + + return nil +} + +func (kss *KafkaStreamingService) writeAsJsonToKafka(ctx sdk.Context, topic string, key string, data proto.Message) error { + kodec := kss.codec.(*codec.ProtoCodec) + json, err := kodec.MarshalJSON(data) + if err != nil { + kss.ackStatus = false + return err + } + if len(kss.topicPrefix) > 0 { + topic = fmt.Sprintf("%s-%s", kss.topicPrefix, topic) + } + kss.producer.ProduceChannel() <- &kafka.Message{ + TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, + Value: json, + Key: []byte(key), + } + + return nil +} \ No newline at end of file diff --git a/plugin/plugins/kafka/service/service_test.go b/plugin/plugins/kafka/service/service_test.go new file mode 100644 index 000000000000..012959a0698f --- /dev/null +++ b/plugin/plugins/kafka/service/service_test.go @@ -0,0 +1,564 @@ +package service + +import ( + "context" + "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/tendermint/tendermint/libs/log" + "os" + "os/signal" + "sync" + "syscall" + "testing" + "time" + + "github.com/cosmos/cosmos-sdk/codec" + codecTypes "github.com/cosmos/cosmos-sdk/codec/types" + "github.com/cosmos/cosmos-sdk/store/types" + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + types1 "github.com/tendermint/tendermint/proto/tendermint/types" +) + +var ( + interfaceRegistry = codecTypes.NewInterfaceRegistry() + testMarshaller = codec.NewProtoCodec(interfaceRegistry) + testStreamingService *KafkaStreamingService + testListener1, testListener2 types.WriteListener + emptyContext = sdk.Context{} + logger log.Logger + + // test abci message types + mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} + testBeginBlockReq = abci.RequestBeginBlock{ + Header: types1.Header{ + Height: 1, + }, + ByzantineValidators: []abci.Evidence{}, + Hash: mockHash, + LastCommitInfo: abci.LastCommitInfo{ + Round: 1, + Votes: []abci.VoteInfo{}, + }, + } + testBeginBlockRes = abci.ResponseBeginBlock{ + Events: []abci.Event{ + { + Type: "testEventType1", + }, + { + Type: "testEventType2", + }, + }, + } + testEndBlockReq = abci.RequestEndBlock{ + Height: 1, + } + testEndBlockRes = abci.ResponseEndBlock{ + Events: []abci.Event{}, + ConsensusParamUpdates: &types1.ConsensusParams{}, + ValidatorUpdates: []abci.ValidatorUpdate{}, + } + mockTxBytes1 = []byte{9, 8, 7, 6, 5, 4, 3, 2, 1} + testDeliverTxReq1 = abci.RequestDeliverTx{ + Tx: mockTxBytes1, + } + mockTxBytes2 = []byte{8, 7, 6, 5, 4, 3, 2} + testDeliverTxReq2 = abci.RequestDeliverTx{ + Tx: mockTxBytes2, + } + mockTxResponseData1 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes1 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData1, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + mockTxResponseData2 = []byte{1, 3, 5, 7, 9} + testDeliverTxRes2 = abci.ResponseDeliverTx{ + Events: []abci.Event{}, + Code: 1, + Codespace: "mockCodeSpace", + Data: mockTxResponseData2, + GasUsed: 2, + GasWanted: 3, + Info: "mockInfo", + Log: "mockLog", + } + + // mock store keys + mockStoreKey1 = sdk.NewKVStoreKey("mockStore1") + mockStoreKey2 = sdk.NewKVStoreKey("mockStore2") + + // Kafka stuff + bootstrapServers = "localhost:9092" + topicPrefix = "block" + flushTimeoutMs = 15000 + topics = []string{ + string(BeginBlockReqTopic), + BeginBlockResTopic, + DeliverTxReqTopic, + DeliverTxResTopic, + EndBlockReqTopic, + EndBlockResTopic, + StateChangeTopic, + } + + producerConfig = kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + "client.id": "testKafkaStreamService", + "security.protocol": "PLAINTEXT", + "enable.idempotence": "true", + // Best practice for Kafka producer to prevent data loss + "acks": "all", + } + + // mock state changes + mockKey1 = []byte{1, 2, 3} + mockValue1 = []byte{3, 2, 1} + mockKey2 = []byte{2, 3, 4} + mockValue2 = []byte{4, 3, 2} + mockKey3 = []byte{3, 4, 5} + mockValue3 = []byte{5, 4, 3} + + // false == fire-and-forget; true == sends a message receipt success/fail signal + ack = false +) + +func TestIntermediateWriter(t *testing.T) { + outChan := make(chan []byte, 0) + iw := NewIntermediateWriter(outChan) + require.IsType(t, &IntermediateWriter{}, iw) + testBytes := []byte{1, 2, 3, 4, 5} + var length int + var err error + waitChan := make(chan struct{}, 0) + go func() { + length, err = iw.Write(testBytes) + waitChan <- struct{}{} + }() + receivedBytes := <-outChan + <-waitChan + require.Equal(t, len(testBytes), length) + require.Equal(t, testBytes, receivedBytes) + require.Nil(t, err) +} + +// change this to write to in-memory io.Writer (e.g. bytes.Buffer) +func TestKafkaStreamingService(t *testing.T) { + testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} + logger = log.TestingLogger() + kss, err := NewKafkaStreamingService(logger, producerConfig, topicPrefix, flushTimeoutMs, testKeys, testMarshaller, ack) + testStreamingService = kss + require.Nil(t, err) + require.IsType(t, &KafkaStreamingService{}, testStreamingService) + require.Equal(t, topicPrefix, testStreamingService.topicPrefix) + require.Equal(t, testMarshaller, testStreamingService.codec) + deleteTopics(t, topics, bootstrapServers) + createTopics(t, topics, bootstrapServers) + testListener1 = testStreamingService.listeners[mockStoreKey1][0] + testListener2 = testStreamingService.listeners[mockStoreKey2][0] + wg := new(sync.WaitGroup) + testStreamingService.Stream(wg) + testListenBeginBlock(t) + testListenDeliverTx1(t) + testListenDeliverTx2(t) + testListenEndBlock(t) + testStreamingService.Close() + wg.Wait() +} + +func testListenBeginBlock(t *testing.T) { + expectedBeginBlockReqBytes, err := testMarshaller.MarshalJSON(&testBeginBlockReq) + require.Nil(t, err) + expectedBeginBlockResBytes, err := testMarshaller.MarshalJSON(&testBeginBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey1, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) + require.Nil(t, err) + + // consume stored messages + topics := []string{string(BeginBlockReqTopic), BeginBlockResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedBeginBlockReqBytes, getMessageValueForTopic(msgs, string(BeginBlockReqTopic), 0)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 0)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 1)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 2)) + require.Equal(t, expectedBeginBlockResBytes, getMessageValueForTopic(msgs, BeginBlockResTopic, 0)) +} + +func testListenDeliverTx1(t *testing.T) { + expectedDeliverTxReq1Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxReq1) + require.Nil(t, err) + expectedDeliverTxRes1Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxRes1) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey2, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) + require.Nil(t, err) + + // consume stored messages + topics := []string{DeliverTxReqTopic, DeliverTxResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedDeliverTxReq1Bytes, getMessageValueForTopic(msgs, DeliverTxReqTopic, 0)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 3)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 4)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 5)) + require.Equal(t, expectedDeliverTxRes1Bytes, getMessageValueForTopic(msgs, DeliverTxResTopic, 0)) +} + +func testListenDeliverTx2(t *testing.T) { + expectedDeliverTxReq2Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxReq2) + require.Nil(t, err) + expectedDeliverTxRes2Bytes, err := testMarshaller.MarshalJSON(&testDeliverTxRes2) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey2, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) + require.Nil(t, err) + + // consume stored messages + topics := []string{DeliverTxReqTopic, DeliverTxResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedDeliverTxReq2Bytes, getMessageValueForTopic(msgs, DeliverTxReqTopic, 1)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 6)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 7)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 8)) + require.Equal(t, expectedDeliverTxRes2Bytes, getMessageValueForTopic(msgs, DeliverTxResTopic, 1)) +} + +func testListenEndBlock(t *testing.T) { + expectedEndBlockReqBytes, err := testMarshaller.MarshalJSON(&testEndBlockReq) + require.Nil(t, err) + expectedEndBlockResBytes, err := testMarshaller.MarshalJSON(&testEndBlockRes) + require.Nil(t, err) + + // write state changes + testListener1.OnWrite(mockStoreKey1, mockKey1, mockValue1, false) + testListener2.OnWrite(mockStoreKey1, mockKey2, mockValue2, false) + testListener1.OnWrite(mockStoreKey2, mockKey3, mockValue3, false) + + // expected KV pairs + expectedKVPair1, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey1, + Value: mockValue1, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair2, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey1.Name(), + Key: mockKey2, + Value: mockValue2, + Delete: false, + }) + require.Nil(t, err) + expectedKVPair3, err := testMarshaller.MarshalJSON(&types.StoreKVPair{ + StoreKey: mockStoreKey2.Name(), + Key: mockKey3, + Value: mockValue3, + Delete: false, + }) + require.Nil(t, err) + + // send the ABCI messages + err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) + require.Nil(t, err) + + // consume stored messages + topics := []string{EndBlockReqTopic, EndBlockResTopic, StateChangeTopic} + msgs, err := poll(bootstrapServers, topics, 5) + require.Nil(t, err) + + // validate data stored in Kafka + require.Equal(t, expectedEndBlockReqBytes, getMessageValueForTopic(msgs, EndBlockReqTopic, 0)) + require.Equal(t, expectedKVPair1, getMessageValueForTopic(msgs, StateChangeTopic, 9)) + require.Equal(t, expectedKVPair2, getMessageValueForTopic(msgs, StateChangeTopic, 10)) + require.Equal(t, expectedKVPair3, getMessageValueForTopic(msgs, StateChangeTopic, 11)) + require.Equal(t, expectedEndBlockResBytes, getMessageValueForTopic(msgs, EndBlockResTopic, 0)) +} + +func getMessageValueForTopic(msgs []*kafka.Message, topic string, offset int64) []byte { + topic = fmt.Sprintf("%s-%s", topicPrefix, topic) + for _, m := range msgs { + t := *m.TopicPartition.Topic + o := int64(m.TopicPartition.Offset) + if t == topic && o == offset { + return m.Value + } + } + return []byte{0} +} + +func poll(bootstrapServers string, topics []string, expectedMsgCnt int) ([]*kafka.Message, error) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) + + c, err := kafka.NewConsumer(&kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + // Avoid connecting to IPv6 brokers: + // This is needed for the ErrAllBrokersDown show-case below + // when using localhost brokers on OSX, since the OSX resolver + // will return the IPv6 addresses first. + // You typically don't need to specify this configuration property. + "broker.address.family": "v4", + "group.id": fmt.Sprintf("testGroup-%d", os.Process{}.Pid), + "auto.offset.reset": "earliest"}) + + if err != nil { + panic(fmt.Sprintf("Failed to create consumer: %s\n", err)) + } + + fmt.Printf("Created Consumer %v\n", c) + + var _topics []string + for _, t := range topics { + _topics = append(_topics, fmt.Sprintf("%s-%s", topicPrefix, t)) + } + + if err = c.SubscribeTopics(_topics, nil); err != nil { + panic(fmt.Sprintf("Failed to subscribe to consumer: %s\n", err)) + } + + msgs := make([]*kafka.Message, 0) + + run := true + + for run { + select { + case sig := <-sigchan: + fmt.Printf("Caught signal %v: terminating\n", sig) + run = false + default: + ev := c.Poll(100) + if ev == nil { + continue + } + + switch e := ev.(type) { + case *kafka.Message: + msgs = append(msgs, e) + case kafka.Error: + // Errors should generally be considered + // informational, the client will try to + // automatically recover. + // But in this example we choose to terminate + // the application if all brokers are down. + fmt.Printf("%% Error: %v: %v\n", e.Code(), e) + if e.Code() == kafka.ErrAllBrokersDown { + run = false + } + default: + fmt.Printf("Ignored %v\n", e) + + // Workaround so our tests pass. + // Wait for the expected messages to be delivered before closing the consumer + if expectedMsgCnt == len(msgs) { + run = false + } + } + } + } + + fmt.Printf("Closing consumer\n") + if err := c.Close(); err != nil { + return nil, err + } + + return msgs, nil +} + +func createTopics(t *testing.T, topics []string, bootstrapServers string) { + + adminClient, err := kafka.NewAdminClient(&kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + "broker.version.fallback": "0.10.0.0", + "api.version.fallback.ms": 0, + }) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + t.Fail() + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create topics on cluster. + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDuration, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("time.ParseDuration(60s)") + t.Fail() + } + + var _topics []kafka.TopicSpecification + for _, s := range topics { + _topics = append(_topics, + kafka.TopicSpecification{ + Topic: fmt.Sprintf("%s-%s", topicPrefix, s), + NumPartitions: 1, + ReplicationFactor: 1}) + } + results, err := adminClient.CreateTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDuration)) + if err != nil { + fmt.Printf("Problem during the topicPrefix creation: %v\n", err) + t.Fail() + } + + // Check for specific topicPrefix errors + for _, result := range results { + if result.Error.Code() != kafka.ErrNoError && + result.Error.Code() != kafka.ErrTopicAlreadyExists { + fmt.Printf("Topic creation failed for %s: %v", + result.Topic, result.Error.String()) + t.Fail() + } + } + + adminClient.Close() +} + +func deleteTopics(t *testing.T, topics []string, bootstrapServers string) { + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + t.Fail() + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Delete topics on cluster + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("ParseDuration(60s)") + t.Fail() + } + + var _topics []string + for _, s := range topics { + _topics = append(_topics, fmt.Sprintf("%s-%s", topicPrefix, s)) + } + + results, err := a.DeleteTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDur)) + if err != nil { + fmt.Printf("Failed to delete topics: %v\n", err) + t.Fail() + } + + // Print results + for _, result := range results { + fmt.Printf("%s\n", result) + } + + a.Close() +} From d79f52a5ca4d182de6474018a7ccd7ec20ffdd21 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 22:40:46 -0600 Subject: [PATCH 17/43] setup non-deterministic testing --- baseapp/abci.go | 2 +- baseapp/baseapp.go | 2 +- plugin/loader/loader.go | 4 +- plugin/plugins/file/service/service.go | 2 +- plugin/plugins/kafka/service/service.go | 2 +- plugin/plugins/trace/service/service.go | 2 +- simapp/sim_test.go | 153 +++++++++++++++++++++++- 7 files changed, 159 insertions(+), 8 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index 571d63084f14..b9f26dc4e8a2 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -342,7 +342,7 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel // but the BaseApp also imposes a global wait limit if app.globalWaitLimit > 0 { - maxWait := time.NewTicker(app.globalWaitLimit) + maxWait := time.NewTicker(app.globalWaitLimit * time.Millisecond) defer maxWait.Stop() for _, lis := range app.abciListeners { select { diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index b49cc1f43d1b..b26fe9ffe5ae 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -130,7 +130,7 @@ type BaseApp struct { // nolint: maligned // and exposing the requests and responses to external consumers abciListeners []ABCIListener - // globalWaitTime is the maximum amount of time the BaseApp will wait for positive acknowledgement of message + // globalWaitLimit is the maximum amount of time the BaseApp will wait for positive acknowledgement of message // receipt from ABCIListeners before halting globalWaitLimit time.Duration } diff --git a/plugin/loader/loader.go b/plugin/loader/loader.go index 3fd633d72f92..1d9c9b11fb30 100644 --- a/plugin/loader/loader.go +++ b/plugin/loader/loader.go @@ -92,12 +92,12 @@ type PluginLoader struct { // NewPluginLoader creates new plugin loader func NewPluginLoader(opts serverTypes.AppOptions, logger logging.Logger) (*PluginLoader, error) { loader := &PluginLoader{plugins: make(map[string]plugin.Plugin, len(preloadPlugins)), opts: opts, logger: logger} + loader.disabled = cast.ToStringSlice(opts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_DISABLED_TOML_KEY))) for _, v := range preloadPlugins { if err := loader.Load(v); err != nil { return nil, err } } - loader.disabled = cast.ToStringSlice(opts.Get(plugin.PLUGINS_DISABLED_TOML_KEY)) pluginDir := cast.ToString(opts.Get(plugin.PLUGINS_DIR_TOML_KEY)) if pluginDir == "" { pluginDir = filepath.Join(os.Getenv("GOPATH"), plugin.DEFAULT_PLUGINS_DIRECTORY) @@ -138,7 +138,7 @@ func (loader *PluginLoader) Load(pl plugin.Plugin) error { name, ppl.Version(), pl.Version()) } if sliceContainsStr(loader.disabled, name) { - loader.logger.Info("not loading disabled plugin", "plugin name", name) + loader.logger.Info("not loading disabled plugin", "name", name) return nil } loader.plugins[name] = pl diff --git a/plugin/plugins/file/service/service.go b/plugin/plugins/file/service/service.go index c8b7923e1dd2..b417188279ae 100644 --- a/plugin/plugins/file/service/service.go +++ b/plugin/plugins/file/service/service.go @@ -98,7 +98,7 @@ func NewFileStreamingService(writeDir, filePrefix string, storeKeys []types.Stor stateCache: make([][]byte, 0), stateCacheLock: new(sync.Mutex), ack: ack, - ackChan: make(chan bool), + ackChan: make(chan bool, 1), }, nil } diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 0dc7441c0002..03f8aee8ca30 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -142,7 +142,7 @@ func NewKafkaStreamingService( stateCache: make([][]byte, 0), stateCacheLock: new(sync.Mutex), ack: ack, - ackChan: make (chan bool), + ackChan: make (chan bool, 1), } go func() { diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go index 0d9d008d7746..32798443c330 100644 --- a/plugin/plugins/trace/service/service.go +++ b/plugin/plugins/trace/service/service.go @@ -96,7 +96,7 @@ func NewTraceStreamingService( stateCacheLock: new(sync.Mutex), printDataToStdout: printDataToStdout, ack: ack, - ackChan: make(chan bool), + ackChan: make(chan bool, 1), } return tss, nil diff --git a/simapp/sim_test.go b/simapp/sim_test.go index 70b2d9644a52..1f88bb3d5ca1 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -1,13 +1,22 @@ package simapp import ( + "context" "encoding/json" "fmt" + "github.com/confluentinc/confluent-kafka-go/kafka" + "github.com/cosmos/cosmos-sdk/plugin" + "github.com/cosmos/cosmos-sdk/server/types" + "github.com/spf13/cast" + "github.com/spf13/viper" + tmos "github.com/tendermint/tendermint/libs/os" "math/rand" "os" + "path/filepath" "runtime/debug" "strings" "testing" + "time" storetypes "github.com/cosmos/cosmos-sdk/store/types" "github.com/stretchr/testify/require" @@ -33,6 +42,9 @@ import ( "github.com/cosmos/cosmos-sdk/x/simulation" slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + kafkaplugin "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka" + kafkaservice "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service" ) // Get flags every time the simulator is run @@ -310,8 +322,21 @@ func TestAppStateDeterminism(t *testing.T) { logger = log.NewNopLogger() } + appOpts := loadAppOptions() + disabledPlugins := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_DISABLED_TOML_KEY))) + var kafkaDisabled bool = false + for _, p := range disabledPlugins { + if kafkaplugin.PLUGIN_NAME == p { + kafkaDisabled = true + break + } + } + if !kafkaDisabled { + prepKafkaTopics(appOpts) + } + db := dbm.NewMemDB() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, interBlockCacheOpt()) + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), appOpts, interBlockCacheOpt()) fmt.Printf( "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n", @@ -347,3 +372,129 @@ func TestAppStateDeterminism(t *testing.T) { } } } + +func loadAppOptions() types.AppOptions { + // load plugin config + usrHomeDir, _ := os.UserHomeDir() + confFile := filepath.Join(usrHomeDir, "app.toml") + vpr := viper.New() + vpr.SetConfigFile(confFile) + err := vpr.ReadInConfig() + if err != nil { + tmos.Exit(err.Error()) + } + return vpr +} + +func prepKafkaTopics(opts types.AppOptions) { + // kafka topic setup + topicPrefix := cast.ToString(opts.Get(fmt.Sprintf("%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, kafkaplugin.PLUGIN_NAME, kafkaplugin.TOPIC_PREFIX_PARAM))) + bootstrapServers := cast.ToString(opts.Get(fmt.Sprintf("%s.%s.%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, kafkaplugin.PLUGIN_NAME, kafkaplugin.PRODUCER_CONFIG_PARAM, "bootstrap_servers"))) + bootstrapServers = strings.ReplaceAll(bootstrapServers, "_", ".") + topics := []string{ + string(kafkaservice.BeginBlockReqTopic), + kafkaservice.BeginBlockResTopic, + kafkaservice.DeliverTxReqTopic, + kafkaservice.DeliverTxResTopic, + kafkaservice.EndBlockReqTopic, + kafkaservice.EndBlockResTopic, + kafkaservice.StateChangeTopic, + } + deleteTopics(topicPrefix, topics, bootstrapServers) + createTopics(topicPrefix, topics, bootstrapServers) +} + +func createTopics(topicPrefix string, topics []string, bootstrapServers string) { + + adminClient, err := kafka.NewAdminClient(&kafka.ConfigMap{ + "bootstrap.servers": bootstrapServers, + "broker.version.fallback": "0.10.0.0", + "api.version.fallback.ms": 0, + }) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + tmos.Exit(err.Error()) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Create topics on cluster. + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDuration, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("time.ParseDuration(60s)") + tmos.Exit(err.Error()) + } + + var _topics []kafka.TopicSpecification + for _, s := range topics { + _topics = append(_topics, + kafka.TopicSpecification{ + Topic: fmt.Sprintf("%s-%s", topicPrefix, s), + NumPartitions: 1, + ReplicationFactor: 1}) + } + results, err := adminClient.CreateTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDuration)) + if err != nil { + fmt.Printf("Problem during the topicPrefix creation: %v\n", err) + tmos.Exit(err.Error()) + } + + // Check for specific topicPrefix errors + for _, result := range results { + if result.Error.Code() != kafka.ErrNoError && + result.Error.Code() != kafka.ErrTopicAlreadyExists { + fmt.Printf("Topic creation failed for %s: %v", + result.Topic, result.Error.String()) + tmos.Exit(err.Error()) + } + } + + adminClient.Close() +} + +func deleteTopics(topicPrefix string, topics []string, bootstrapServers string) { + // Create a new AdminClient. + // AdminClient can also be instantiated using an existing + // Producer or Consumer instance, see NewAdminClientFromProducer and + // NewAdminClientFromConsumer. + a, err := kafka.NewAdminClient(&kafka.ConfigMap{"bootstrap.servers": bootstrapServers}) + if err != nil { + fmt.Printf("Failed to create Admin client: %s\n", err) + tmos.Exit(err.Error()) + } + + // Contexts are used to abort or limit the amount of time + // the Admin call blocks waiting for a result. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + // Delete topics on cluster + // Set Admin options to wait for the operation to finish (or at most 60s) + maxDur, err := time.ParseDuration("60s") + if err != nil { + fmt.Printf("ParseDuration(60s)") + tmos.Exit(err.Error()) + } + + var _topics []string + for _, s := range topics { + _topics = append(_topics, fmt.Sprintf("%s-%s", topicPrefix, s)) + } + + results, err := a.DeleteTopics(ctx, _topics, kafka.SetAdminOperationTimeout(maxDur)) + if err != nil { + fmt.Printf("Failed to delete topics: %v\n", err) + tmos.Exit(err.Error()) + } + + // Print results + for _, result := range results { + fmt.Printf("%s\n", result) + } + + a.Close() +} From be4bb2ad0f33571f6bb34d7e38e64f8e34e440f7 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Feb 2022 22:50:47 -0600 Subject: [PATCH 18/43] opt-in approach to enabling plugins --- plugin/example_config.toml | 2 +- plugin/loader/loader.go | 22 +++++++++++----------- plugin/plugin.go | 4 ++-- plugin/plugins/kafka/README.md | 4 ++-- plugin/plugins/trace/README.md | 4 ++-- simapp/sim_test.go | 10 +++------- 6 files changed, 21 insertions(+), 25 deletions(-) diff --git a/plugin/example_config.toml b/plugin/example_config.toml index 101dc2ce0949..5e67282a8f50 100644 --- a/plugin/example_config.toml +++ b/plugin/example_config.toml @@ -8,7 +8,7 @@ on = true # List of plugin names to enable from the plugin/plugins/* -disabled = [] +enabled = ["trace"] # The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins dir = "" diff --git a/plugin/loader/loader.go b/plugin/loader/loader.go index 1d9c9b11fb30..5c24710c9683 100644 --- a/plugin/loader/loader.go +++ b/plugin/loader/loader.go @@ -81,24 +81,24 @@ func (ls loaderState) String() string { // 4. Optionally call Start to start plugins. // 5. Call Close to close all plugins. type PluginLoader struct { - state loaderState - plugins map[string]plugin.Plugin - started []plugin.Plugin - opts serverTypes.AppOptions - logger logging.Logger - disabled []string + state loaderState + plugins map[string]plugin.Plugin + started []plugin.Plugin + opts serverTypes.AppOptions + logger logging.Logger + enabled []string } // NewPluginLoader creates new plugin loader func NewPluginLoader(opts serverTypes.AppOptions, logger logging.Logger) (*PluginLoader, error) { loader := &PluginLoader{plugins: make(map[string]plugin.Plugin, len(preloadPlugins)), opts: opts, logger: logger} - loader.disabled = cast.ToStringSlice(opts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_DISABLED_TOML_KEY))) + loader.enabled = cast.ToStringSlice(opts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ENABLED_TOML_KEY))) for _, v := range preloadPlugins { if err := loader.Load(v); err != nil { return nil, err } } - pluginDir := cast.ToString(opts.Get(plugin.PLUGINS_DIR_TOML_KEY)) + pluginDir := cast.ToString(opts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_DIR_TOML_KEY))) if pluginDir == "" { pluginDir = filepath.Join(os.Getenv("GOPATH"), plugin.DEFAULT_PLUGINS_DIRECTORY) } @@ -137,11 +137,11 @@ func (loader *PluginLoader) Load(pl plugin.Plugin) error { "while trying to load dynamically: %s", name, ppl.Version(), pl.Version()) } - if sliceContainsStr(loader.disabled, name) { - loader.logger.Info("not loading disabled plugin", "name", name) + if sliceContainsStr(loader.enabled, name) { + loader.plugins[name] = pl + loader.logger.Info("loading enabled plugin", "name", name) return nil } - loader.plugins[name] = pl return nil } diff --git a/plugin/plugin.go b/plugin/plugin.go index 4145cb48147f..b3c852d56451 100644 --- a/plugin/plugin.go +++ b/plugin/plugin.go @@ -19,8 +19,8 @@ const ( // PLUGINS_DIR_TOML_KEY is the second-level TOML key for the directory to load plugins from PLUGINS_DIR_TOML_KEY = "dir" - // PLUGINS_DISABLED_TOML_KEY is the second-level TOML key for a list of plugins to disable - PLUGINS_DISABLED_TOML_KEY = "disabled" + // PLUGINS_ENABLED_TOML_KEY is the second-level TOML key for a list of plugins to disable + PLUGINS_ENABLED_TOML_KEY = "enabled" // DEFAULT_PLUGINS_DIRECTORY is the default directory to load plugins from DEFAULT_PLUGINS_DIRECTORY = "src/github.com/cosmos/cosmos-sdk/plugin/plugins" diff --git a/plugin/plugins/kafka/README.md b/plugin/plugins/kafka/README.md index e82ee49a6961..fa653a62a382 100644 --- a/plugin/plugins/kafka/README.md +++ b/plugin/plugins/kafka/README.md @@ -47,8 +47,8 @@ The plugin has been hooked up to run with `test-sim-nondeterminism` task. For a # turn the plugin system, as a whole, on or off on = true - # List of plugin names to disable - disabled = ["file", "trace"] + # List of plugin names to enable from the plugin/plugins/* + enabled = ["kafka"] # The directory to load non-preloaded plugins from; defaults to ./plugin/plugins dir = "" diff --git a/plugin/plugins/trace/README.md b/plugin/plugins/trace/README.md index 09e51fadbe9b..6b98a83d3f42 100644 --- a/plugin/plugins/trace/README.md +++ b/plugin/plugins/trace/README.md @@ -29,8 +29,8 @@ The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload # turn the plugin system, as a whole, on or off on = true - # list of plugins to disable - disabled = [] + # List of plugin names to enable from the plugin/plugins/* + enabled = ["trace"] # The directory to load non-preloaded plugins from; defaults to dir = "" diff --git a/simapp/sim_test.go b/simapp/sim_test.go index 1f88bb3d5ca1..4c34ae627b67 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -323,17 +323,13 @@ func TestAppStateDeterminism(t *testing.T) { } appOpts := loadAppOptions() - disabledPlugins := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_DISABLED_TOML_KEY))) - var kafkaDisabled bool = false - for _, p := range disabledPlugins { + enabledPlugins := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ENABLED_TOML_KEY))) + for _, p := range enabledPlugins { if kafkaplugin.PLUGIN_NAME == p { - kafkaDisabled = true + prepKafkaTopics(appOpts) break } } - if !kafkaDisabled { - prepKafkaTopics(appOpts) - } db := dbm.NewMemDB() app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), appOpts, interBlockCacheOpt()) From f21ff7ea74232b2e7a5c1994cd4dc147e25cab69 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Fri, 4 Feb 2022 13:00:31 -0600 Subject: [PATCH 19/43] synchronize work between kafka and app.Commit() --- plugin/example_config.toml | 15 ++- plugin/plugins/kafka/README.md | 7 +- plugin/plugins/kafka/kafka.go | 15 ++- plugin/plugins/kafka/service/service.go | 116 ++++++++++++++++-------- 4 files changed, 109 insertions(+), 44 deletions(-) diff --git a/plugin/example_config.toml b/plugin/example_config.toml index 5e67282a8f50..eaffc06dcf48 100644 --- a/plugin/example_config.toml +++ b/plugin/example_config.toml @@ -8,7 +8,7 @@ on = true # List of plugin names to enable from the plugin/plugins/* -enabled = ["trace"] +enabled = ["kafka"] # The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins dir = "" @@ -18,7 +18,7 @@ dir = "" # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services # in milliseconds -global_ack_wait_limit = 5000 +global_ack_wait_limit = 2000 ############################################################################### ### File plugin configuration ### @@ -39,7 +39,7 @@ prefix = "" # whether to operate in fire-and-forget or success/failure acknowledgement mode # false == fire-and-forget; true == sends a message receipt success/fail signal -ack = "false" +ack = "true" ############################################################################### ### Trace Plugin configuration ### @@ -56,7 +56,7 @@ print_data_to_stdout = false # whether to operate in fire-and-forget or success/failure acknowledgement mode # false == fire-and-forget; true == sends a message receipt success/fail signal -ack = "false" +ack = "true" ############################################################################### ### Kafka Plugin configuration ### @@ -76,7 +76,12 @@ flush_timeout_ms = 1500 # whether to operate in fire-and-forget or success/failure acknowledgement mode # false == fire-and-forget; true == sends a message receipt success/fail signal -ack = "false" +ack = "true" + +# The amount of time to wait for acknowledgment of success/failure of message +# delivery of the current block before considering the delivery of messages failed. (In +# milliseconds) +delivered_block_wait_limit = 1000 # Producer configuration properties. # The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. diff --git a/plugin/plugins/kafka/README.md b/plugin/plugins/kafka/README.md index fa653a62a382..017cb96b5e02 100644 --- a/plugin/plugins/kafka/README.md +++ b/plugin/plugins/kafka/README.md @@ -58,7 +58,7 @@ The plugin has been hooked up to run with `test-sim-nondeterminism` task. For a # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services # in milliseconds - global_ack_wait_limit = 500 + global_ack_wait_limit = 2000 ############################################################################### ### Kafka Plugin configuration ### @@ -79,6 +79,11 @@ The plugin has been hooked up to run with `test-sim-nondeterminism` task. For a # whether to operate in fire-and-forget or success/failure acknowledgement mode # false == fire-and-forget; true == sends a message receipt success/fail signal ack = "false" + + # The amount of time to wait for acknowledgment of success/failure of message + # delivery of the current block before considering the delivery of messages failed. (In + # milliseconds) + delivered_block_wait_limit = 1000 # Producer configuration properties. # The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. diff --git a/plugin/plugins/kafka/kafka.go b/plugin/plugins/kafka/kafka.go index 714b61178869..a42554b9eca3 100644 --- a/plugin/plugins/kafka/kafka.go +++ b/plugin/plugins/kafka/kafka.go @@ -42,6 +42,10 @@ const ( // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode ACK_MODE = "ack" + + // DELIVERED_BLOCK_WAIT_LIMIT the amount of time to wait for acknowledgment of success/failure of + // message delivery of the current block before considering the delivery of messages failed. + DELIVERED_BLOCK_WAIT_LIMIT = "delivered_block_wait_limit" ) // Plugins is the exported symbol for loading this plugin @@ -83,6 +87,7 @@ func (ssp *streamingServicePlugin) Register( topicPrefix := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, TOPIC_PREFIX_PARAM))) flushTimeoutMs := cast.ToInt(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, FLUSH_TIMEOUT_MS_PARAM))) ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) + deliveredBlockWaitLimit := cast.ToDuration(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, DELIVERED_BLOCK_WAIT_LIMIT))) producerConfig := cast.ToStringMap(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM))) // get the store keys allowed to be exposed for this streaming service exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) @@ -134,7 +139,15 @@ func (ssp *streamingServicePlugin) Register( var err error ssp.kss, err = service.NewKafkaStreamingService( - bApp.Logger(), producerConfigMap, topicPrefix, flushTimeoutMs, exposeStoreKeys, marshaller, ack) + bApp.Logger(), + producerConfigMap, + topicPrefix, + flushTimeoutMs, + exposeStoreKeys, + marshaller, + ack, + deliveredBlockWaitLimit, + ) if err != nil { return err } diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 03f8aee8ca30..48da13f37771 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -8,7 +8,9 @@ import ( "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "strings" "sync" + "time" "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/types" @@ -71,20 +73,22 @@ var _ baseapp.StreamingService = (*KafkaStreamingService)(nil) // KafkaStreamingService is a concrete implementation of streaming.Service that writes state changes out to Kafka type KafkaStreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to - topicPrefix string // topicPrefix prefix name - producer *kafka.Producer // the producer instance that will be used to send messages to Kafka - flushTimeoutMs int // the time to wait for outstanding messages and requests to complete delivery (milliseconds) - codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel used for synchronize closure - ack bool // true == fire-and-forget; false == sends success/failure signal - ackStatus bool // success/failure status to be sent to ackChan - ackChan chan bool // channel used to send a success/failure signal + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + topicPrefix string // topicPrefix prefix name + producer *kafka.Producer // the producer instance that will be used to send messages to Kafka + flushTimeoutMs int // the time to wait for outstanding messages and requests to complete delivery (milliseconds) + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + ack bool // true == fire-and-forget; false == sends success/failure signal + ackStatus bool // success/failure status to be sent to ackChan + ackChan chan bool // channel used to send a success/failure signal + deliveredBlockChan chan struct{} // channel used for signaling the delivery of all messages for the current block to Kafka. + deliveredBlockWaitLimit time.Duration // the time to wait for Kafka service to deliver current block messages before timing out. } // IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener @@ -108,13 +112,14 @@ func (iw *IntermediateWriter) Write(b []byte) (int, error) { // NewKafkaStreamingService creates a new KafkaStreamingService func NewKafkaStreamingService( - logger log.Logger, - producerConfig kafka.ConfigMap, - topicPrefix string, - flushTimeoutMs int, - storeKeys []types.StoreKey, - c codec.BinaryCodec, - ack bool, + logger log.Logger, + producerConfig kafka.ConfigMap, + topicPrefix string, + flushTimeoutMs int, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + ack bool, + deliveredBlockWaitLimit time.Duration, ) (*KafkaStreamingService, error) { listenChan := make(chan []byte) iw := NewIntermediateWriter(listenChan) @@ -133,31 +138,41 @@ func NewKafkaStreamingService( logger.Debug("Created Producer: ", "producer", p) kss := &KafkaStreamingService{ - listeners: listeners, - srcChan: listenChan, - topicPrefix: topicPrefix, - producer: p, - flushTimeoutMs: flushTimeoutMs, - codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), - ack: ack, - ackChan: make (chan bool, 1), + listeners: listeners, + srcChan: listenChan, + topicPrefix: topicPrefix, + producer: p, + flushTimeoutMs: flushTimeoutMs, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + ack: ack, + ackChan: make (chan bool, 1), + deliveredBlockWaitLimit: deliveredBlockWaitLimit, } + var endBlockResTopic string + if len(kss.topicPrefix) > 0 { + endBlockResTopic = fmt.Sprintf("%s-%s", kss.topicPrefix, EndBlockResTopic) + } go func() { for e := range p.Events() { switch ev := e.(type) { case *kafka.Message: pTopic := ev.TopicPartition.Topic partition := ev.TopicPartition.Partition - offset := ev.TopicPartition.Offset + //offset := ev.TopicPartition.Offset key := string(ev.Key) if err := ev.TopicPartition.Error; err != nil { - logger.Debug("Delivery failed: ", "topic", pTopic, "partition", partition, "key", key, "err", err) + logger.Error("Delivery failed: ", "topic", pTopic, "partition", partition, "key", key, "err", err) kss.ackStatus = false } else { - logger.Debug("Delivered message:", "topic", pTopic, "partition", partition, "offset", offset, "key", key) + //logger.Debug("Delivered message:", "topic", pTopic, "partition", partition, "offset", offset, "key", key) + // signal delivery of the block's messages + if strings.Compare(endBlockResTopic, *pTopic) == 0 { + logger.Debug("====== EndBlock Delivered ======") + close(kss.deliveredBlockChan) + } } } } @@ -207,6 +222,7 @@ func (kss *KafkaStreamingService) setBeginBlock(req abci.RequestBeginBlock) { kss.currentBlockNumber = req.GetHeader().Height kss.currentTxIndex = 0 kss.ackStatus = true + kss.deliveredBlockChan = make(chan struct{}) } // ListenDeliverTx satisfies the Hook interface @@ -282,15 +298,41 @@ func (kss *KafkaStreamingService) ListenSuccess() <-chan bool { // if we are operating in fire-and-forget mode, immediately send a "success" signal if !kss.ack { go func() { + fmt.Printf("%s", "\r------ inside listenSuccess ------\n") kss.ackChan <- true }() } else { go func() { - // the KafkaStreamingService operating synchronously, but this will signify whether an error occurred - // during it's processing cycle - kss.ackChan <- kss.ackStatus + // Synchronize the work deliver of all block's messages. + // Force call to ListenSuccess() from within app.Commit() + // to wait {n} milliseconds before failing. + var deliveredBlock = false + maxWait := time.NewTicker(kss.deliveredBlockWaitLimit * time.Millisecond) + defer maxWait.Stop() + loop: + for { + // No reason to wait for block data to finish writing + // if any of the block's messages failed to be delivered + if !kss.ackStatus { + break loop + } + select { + case <-kss.deliveredBlockChan: + deliveredBlock = true + break loop + case <-maxWait.C: + break loop + } + } + + if deliveredBlock == false { + kss.ackChan <- false + } else { + kss.ackChan <- kss.ackStatus + } }() } + return kss.ackChan } From 222394b24bdd0389323daa997d12bf34d65664d9 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Fri, 4 Feb 2022 19:02:05 -0600 Subject: [PATCH 20/43] remove dependency on ack channel and use listener error response to act --- baseapp/abci.go | 27 +-- baseapp/baseapp.go | 6 - baseapp/options.go | 10 +- baseapp/streaming.go | 6 +- docs/architecture/adr-038-state-listening.md | 125 ++++-------- plugin/example_config.toml | 24 +-- plugin/plugins/file/file.go | 8 +- plugin/plugins/file/service/service.go | 104 +++------- plugin/plugins/file/service/service_test.go | 22 +-- plugin/plugins/kafka/README.md | 10 +- plugin/plugins/kafka/kafka.go | 15 +- plugin/plugins/kafka/service/service.go | 191 +++++++------------ plugin/plugins/kafka/service/service_test.go | 18 +- plugin/plugins/trace/README.md | 7 +- plugin/plugins/trace/service/service.go | 69 +++---- plugin/plugins/trace/service/service_test.go | 9 +- simapp/app.go | 8 +- 17 files changed, 206 insertions(+), 453 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index b9f26dc4e8a2..b794ff890276 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -8,7 +8,6 @@ import ( "sort" "strings" "syscall" - "time" "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" @@ -194,6 +193,9 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + if streamingListener.HaltAppOnDeliveryError() { + app.halt() + } } } @@ -220,6 +222,9 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + if streamingListener.HaltAppOnDeliveryError() { + app.halt() + } } } @@ -273,6 +278,9 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx for _, streamingListener := range app.abciListeners { if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { app.logger.Error("DeliverTx listening hook failed", "err", err) + if streamingListener.HaltAppOnDeliveryError() { + app.halt() + } } } }() @@ -339,23 +347,6 @@ func (app *BaseApp) Commit() (res abci.ResponseCommit) { app.halt() } - // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel - // but the BaseApp also imposes a global wait limit - if app.globalWaitLimit > 0 { - maxWait := time.NewTicker(app.globalWaitLimit * time.Millisecond) - defer maxWait.Stop() - for _, lis := range app.abciListeners { - select { - case success := <-lis.ListenSuccess(): - if success == false { - app.halt() - } - case <-maxWait.C: - app.halt() - } - } - } - if app.snapshotInterval > 0 && uint64(header.Height)%app.snapshotInterval == 0 { go app.snapshot(header.Height) } diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index b26fe9ffe5ae..ba60a614b684 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -4,8 +4,6 @@ import ( "context" "errors" "fmt" - "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -129,10 +127,6 @@ type BaseApp struct { // nolint: maligned // abciListeners for hooking into the ABCI message processing of the BaseApp // and exposing the requests and responses to external consumers abciListeners []ABCIListener - - // globalWaitLimit is the maximum amount of time the BaseApp will wait for positive acknowledgement of message - // receipt from ABCIListeners before halting - globalWaitLimit time.Duration } // NewBaseApp returns a reference to an initialized BaseApp. It accepts a diff --git a/baseapp/options.go b/baseapp/options.go index b21fb39034f2..e477171e5486 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -2,10 +2,8 @@ package baseapp import ( "fmt" - "io" - "time" - dbm "github.com/tendermint/tm-db" + "io" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/snapshots" @@ -246,9 +244,3 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { // BaseApp will pass BeginBlock, DeliverTx, and EndBlock requests and responses to the streaming services to update their ABCI context app.abciListeners = append(app.abciListeners, s) } - -// SetGlobalWaitLimit is used to set the maximum amount of time the BaseApp will wait for positive acknowledgement -// of message receipt from ABCIListeners before halting -func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { - app.globalWaitLimit = t -} diff --git a/baseapp/streaming.go b/baseapp/streaming.go index f37de0d87928..2d3b9d036da5 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -18,9 +18,9 @@ type ABCIListener interface { ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error - // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service - // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt - ListenSuccess() <-chan bool + // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails + // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. + HaltAppOnDeliveryError() bool } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index 440faf01e726..ab4e308cb0ac 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -222,9 +222,9 @@ type ABCIListener interface { ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error - // ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service - // after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt - ListenSuccess() <-chan bool + // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails + // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. + HaltAppOnDeliveryError() bool } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks @@ -257,15 +257,6 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { } ``` -We will add a new method to the `BaseApp` that is used to configure a global wait limit for receiving positive acknowledgement -of message receipt from the integrated `StreamingService`s. - -```go -func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { - app.globalWaitLimit = t -} -``` - We will also modify the `BeginBlock`, `EndBlock`, and `DeliverTx` methods to pass ABCI requests and responses to any streaming service hooks registered with the `BaseApp`. @@ -276,7 +267,12 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // Call the streaming service hooks with the BeginBlock messages for _, listener := range app.abciListeners { - listener.ListenBeginBlock(app.deliverState.ctx, req, res) + if err := listener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("ListenBeginBlock listening hook failed", "err", err) + if listener.HaltAppOnDeliveryError() { + app.halt() + } + } } return res @@ -290,73 +286,36 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // Call the streaming service hooks with the EndBlock messages for _, listener := range app.abciListeners { - listener.ListenEndBlock(app.deliverState.ctx, req, res) - } - - return res -} -``` - -```go -func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { - - ... - - gInfo, result, err := app.runTx(runTxModeDeliver, req.Tx) - if err != nil { - resultStr = "failed" - res := sdkerrors.ResponseDeliverTx(err, gInfo.GasWanted, gInfo.GasUsed, app.trace) - // If we throw an error, be sure to still call the streaming service's hook - for _, listener := range app.abciListeners { - listener.ListenDeliverTx(app.deliverState.ctx, req, res) + if err := listener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("ListenEndBlock listening hook failed", "err", err) + if listener.HaltAppOnDeliveryError() { + app.halt() + } } - return res - } - - res := abci.ResponseDeliverTx{ - GasWanted: int64(gInfo.GasWanted), // TODO: Should type accept unsigned ints? - GasUsed: int64(gInfo.GasUsed), // TODO: Should type accept unsigned ints? - Log: result.Log, - Data: result.Data, - Events: sdk.MarkEventsToIndex(result.Events, app.indexEvents), - } - - // Call the streaming service hooks with the DeliverTx messages - for _, listener := range app.abciListeners { - listener.ListenDeliverTx(app.deliverState.ctx, req, res) } return res } ``` -We will also modify the `Commit` method to process `success/failure` signals from the integrated `StreamingService`s using -the `ABCIListener.ListenSuccess()` method. Each `StreamingService` has an internal wait threshold after which it sends -`false` to the `ListenSuccess()` channel, and the BaseApp also imposes a configurable global wait limit. -If the `StreamingService` is operating in a "fire-and-forget" mode, `ListenSuccess()` should immediately return `true` -off the channel despite the success status of the service. - ```go -func (app *BaseApp) Commit() (res abci.ResponseCommit) { - - ... - - // each listener has an internal wait threshold after which it sends `false` to the ListenSuccess() channel - // but the BaseApp also imposes a global wait limit - maxWait := time.NewTicker(app.globalWaitLimit) - for _, lis := range app.abciListeners { - select { - case success := <- lis.ListenSuccess(): - if success == false { - app.halt() +func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { + + var abciRes abci.ResponseDeliverTx + defer func() { + for _, streamingListener := range app.abciListeners { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + if streamingListener.HaltAppOnDeliveryError() { + app.halt() + } } - case <- maxWait.C: - app.halt() } - } - + }() + ... + return res } ``` @@ -468,48 +427,42 @@ The plugin system will be configured within an app's app.toml file. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" ``` -There will be three parameters for configuring the plugin system: `plugins.on`, `plugins.disabled` and `plugins.dir`. +There will be three parameters for configuring the plugin system: `plugins.on`, `plugins.enabled` and `plugins.dir`. `plugins.on` is a bool that turns on or off the plugin system at large, `plugins.dir` directs the system to a directory -to load plugins from, and `plugins.disabled` is a list of names for the plugins we want to disable (useful for disabling preloaded plugins). +to load plugins from, and `plugins.enabled` provides `opt-in` semantics to plugin names to enable (including preloaded plugins). Configuration of a given plugin is ultimately specific to the plugin, but we will introduce some standards here: Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). -For streaming plugins a parameter `plugins.streaming.global_ack_wait_limit` is used to configure the maximum amount of time -the BaseApp will wait for positive acknowledgement of receipt by the external streaming services before it considers -the message relay to be a failure. Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys -(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.ack`) -that signifies whether the service operates in a fire-and-forget capacity or the BaseApp should require positive -acknowledgement of message receipt by the service. In the case of "ack" mode, the service may also need to be -configured with an acknowledgement wait limit specific to that individual service (e.g. `plugins.streaming.kafka.ack_wait_limit`). -The file `StreamingService` does not have an individual `ack_wait_limit` since it operates synchronously with the App. +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.halt_app_on_delivery_error`) +that signifies whether the service operates in a fire-and-forget capacity, or stop the BaseApp when an error occurs in +any of `ListenBeginBlock`, `ListenEndBlock` and `ListenDeliverTx`. e.g. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to " [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their plugin name - # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services - # in milliseconds - global_ack_wait_limit = 500 [plugins.streaming.file] # the specific parameters for the file streaming service plugin keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] write_dir = "path to the write directory" prefix = "optional prefix to prepend to the generated file names" - ack = "false" # false == fire-and-forget; true == sends a message receipt success/fail signal + halt_app_on_delivery_error = "false" # false == fire-and-forget; true == stop the application [plugins.streaming.kafka] - ... - [plugins.modules] + keys = [] + topic_prefix = "block" # Optional prefix for topic names where data will be stored. + flush_timeout_ms = 5000 # Flush and wait for outstanding messages and requests to complete delivery when calling `StreamingService.Close(). (milliseconds) + halt_app_on_delivery_error = true # Whether or not to halt the application when plugin fails to deliver message(s). ... ``` diff --git a/plugin/example_config.toml b/plugin/example_config.toml index eaffc06dcf48..8d36d8efb76a 100644 --- a/plugin/example_config.toml +++ b/plugin/example_config.toml @@ -16,10 +16,6 @@ dir = "" # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName [plugins.streaming] -# maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services -# in milliseconds -global_ack_wait_limit = 2000 - ############################################################################### ### File plugin configuration ### ############################################################################### @@ -37,9 +33,8 @@ write_dir = "" # Optional prefix to prepend to the generated file names prefix = "" -# whether to operate in fire-and-forget or success/failure acknowledgement mode -# false == fire-and-forget; true == sends a message receipt success/fail signal -ack = "true" +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true ############################################################################### ### Trace Plugin configuration ### @@ -54,9 +49,8 @@ keys = [] # In addition to block event info, print the data to stdout as well. print_data_to_stdout = false -# whether to operate in fire-and-forget or success/failure acknowledgement mode -# false == fire-and-forget; true == sends a message receipt success/fail signal -ack = "true" +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true ############################################################################### ### Kafka Plugin configuration ### @@ -74,14 +68,8 @@ topic_prefix = "block" # Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) flush_timeout_ms = 1500 -# whether to operate in fire-and-forget or success/failure acknowledgement mode -# false == fire-and-forget; true == sends a message receipt success/fail signal -ack = "true" - -# The amount of time to wait for acknowledgment of success/failure of message -# delivery of the current block before considering the delivery of messages failed. (In -# milliseconds) -delivered_block_wait_limit = 1000 +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true # Producer configuration properties. # The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. diff --git a/plugin/plugins/file/file.go b/plugin/plugins/file/file.go index c898e8e0029b..db754ff7568f 100644 --- a/plugin/plugins/file/file.go +++ b/plugin/plugins/file/file.go @@ -35,8 +35,8 @@ const ( // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service KEYS_PARAM = "keys" - // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode - ACK_MODE = "ack" + // HALT_APP_ON_DELIVERY_ERROR whether or not to halt the application when plugin fails to deliver message(s) + HALT_APP_ON_DELIVERY_ERROR = "halt_app_on_delivery_error" ) const minWaitDuration = time.Millisecond * 10 @@ -91,9 +91,9 @@ func (ssp *streamingServicePlugin) Register(bApp *baseapp.BaseApp, marshaller co exposeStoreKeys = append(exposeStoreKeys, storeKey) } } - ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) + haltAppOnDeliveryError := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, HALT_APP_ON_DELIVERY_ERROR))) var err error - ssp.fss, err = service.NewFileStreamingService(fileDir, filePrefix, exposeStoreKeys, marshaller, ack) + ssp.fss, err = service.NewFileStreamingService(fileDir, filePrefix, exposeStoreKeys, marshaller, haltAppOnDeliveryError) if err != nil { return err } diff --git a/plugin/plugins/file/service/service.go b/plugin/plugins/file/service/service.go index b417188279ae..a09feaed1d96 100644 --- a/plugin/plugins/file/service/service.go +++ b/plugin/plugins/file/service/service.go @@ -38,20 +38,17 @@ var _ baseapp.StreamingService = (*FileStreamingService)(nil) // FileStreamingService is a concrete implementation of streaming.Service that writes state changes out to files type FileStreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to - filePrefix string // optional prefix for each of the generated files - writeDir string // directory to write files into - codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel used for synchronize closure - - ack bool // true == fire-and-forget; false == sends success/failure signal - ackStatus bool // success/failure status, to be sent to ackChan - ackChan chan bool // the channel used to send the success/failure signal + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + filePrefix string // optional prefix for each of the generated files + writeDir string // directory to write files into + codec codec.BinaryCodec // marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + haltAppOnDeliveryError bool // true if the app should be halted on streaming errors, false otherwise } // IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener @@ -74,8 +71,13 @@ func (iw *IntermediateWriter) Write(b []byte) (int, error) { } // NewFileStreamingService creates a new FileStreamingService for the provided writeDir, (optional) filePrefix, and storeKeys -func NewFileStreamingService(writeDir, filePrefix string, storeKeys []types.StoreKey, c codec.BinaryCodec, - ack bool) (*FileStreamingService, error) { +func NewFileStreamingService( + writeDir, + filePrefix string, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + haltAppOnDeliveryError bool, +) (*FileStreamingService, error) { listenChan := make(chan []byte) iw := NewIntermediateWriter(listenChan) listener := types.NewStoreKVPairWriteListener(iw, c) @@ -90,15 +92,14 @@ func NewFileStreamingService(writeDir, filePrefix string, storeKeys []types.Stor return nil, err } return &FileStreamingService{ - listeners: listeners, - srcChan: listenChan, - filePrefix: filePrefix, - writeDir: writeDir, - codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), - ack: ack, - ackChan: make(chan bool, 1), + listeners: listeners, + srcChan: listenChan, + filePrefix: filePrefix, + writeDir: writeDir, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + haltAppOnDeliveryError: haltAppOnDeliveryError, }, nil } @@ -111,22 +112,17 @@ func (fss *FileStreamingService) Listeners() map[types.StoreKey][]types.WriteLis // It writes out the received BeginBlock request and response and the resulting state changes out to a file as described // in the above the naming schema func (fss *FileStreamingService) ListenBeginBlock(ctx sdk.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error { - // reset the ack status - fss.ackStatus = true // generate the new file dstFile, err := fss.openBeginBlockFile(req) if err != nil { - fss.ackStatus = false return err } // write req to file lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) if err != nil { - fss.ackStatus = false return err } if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - fss.ackStatus = false return err } // write all state changes cached for this stage to file @@ -135,7 +131,6 @@ func (fss *FileStreamingService) ListenBeginBlock(ctx sdk.Context, req abci.Requ if _, err = dstFile.Write(stateChange); err != nil { fss.stateCache = nil fss.stateCacheLock.Unlock() - fss.ackStatus = false return err } } @@ -145,16 +140,13 @@ func (fss *FileStreamingService) ListenBeginBlock(ctx sdk.Context, req abci.Requ // write res to file lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) if err != nil { - fss.ackStatus = false return err } if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - fss.ackStatus = false return err } // close file if err := dstFile.Close(); err != nil { - fss.ackStatus = false return err } return nil @@ -177,17 +169,14 @@ func (fss *FileStreamingService) ListenDeliverTx(ctx sdk.Context, req abci.Reque // generate the new file dstFile, err := fss.openDeliverTxFile() if err != nil { - fss.ackStatus = false return err } // write req to file lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) if err != nil { - fss.ackStatus = false return err } if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - fss.ackStatus = false return err } // write all state changes cached for this stage to file @@ -196,7 +185,6 @@ func (fss *FileStreamingService) ListenDeliverTx(ctx sdk.Context, req abci.Reque if _, err = dstFile.Write(stateChange); err != nil { fss.stateCache = nil fss.stateCacheLock.Unlock() - fss.ackStatus = false return err } } @@ -206,16 +194,13 @@ func (fss *FileStreamingService) ListenDeliverTx(ctx sdk.Context, req abci.Reque // write res to file lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) if err != nil { - fss.ackStatus = false return err } if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - fss.ackStatus = false return err } // close file if err := dstFile.Close(); err != nil { - fss.ackStatus = false return err } return nil @@ -237,17 +222,14 @@ func (fss *FileStreamingService) ListenEndBlock(ctx sdk.Context, req abci.Reques // generate the new file dstFile, err := fss.openEndBlockFile() if err != nil { - fss.ackStatus = false return err } // write req to file lengthPrefixedReqBytes, err := fss.codec.MarshalLengthPrefixed(&req) if err != nil { - fss.ackStatus = false return err } if _, err = dstFile.Write(lengthPrefixedReqBytes); err != nil { - fss.ackStatus = false return err } // write all state changes cached for this stage to file @@ -256,7 +238,6 @@ func (fss *FileStreamingService) ListenEndBlock(ctx sdk.Context, req abci.Reques if _, err = dstFile.Write(stateChange); err != nil { fss.stateCache = nil fss.stateCacheLock.Unlock() - fss.ackStatus = false return err } } @@ -266,16 +247,13 @@ func (fss *FileStreamingService) ListenEndBlock(ctx sdk.Context, req abci.Reques // write res to file lengthPrefixedResBytes, err := fss.codec.MarshalLengthPrefixed(&res) if err != nil { - fss.ackStatus = false return err } if _, err = dstFile.Write(lengthPrefixedResBytes); err != nil { - fss.ackStatus = false return err } // close file if err := dstFile.Close(); err != nil { - fss.ackStatus = false return err } return nil @@ -320,32 +298,10 @@ func (fss *FileStreamingService) Close() error { return nil } -// ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service -// after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt -func (fss *FileStreamingService) ListenSuccess() <-chan bool { - // if we are operating in fire-and-forget mode, immediately send a "success" signal - if !fss.ack { - go func() { - fss.ackChan <- true - }() - } else { - go func() { - // the FileStreamingService operating synchronously, but this will signify whether an error occurred - // during it's processing cycle - fss.ackChan <- fss.ackStatus - }() - } - return fss.ackChan -} - -// SetAckMode is used to set the ack mode for testing purposes -func (fss *FileStreamingService) SetAckMode(on bool) { - fss.ack = on -} - -// SetAckStatus is used to set the ack status for testing purposes -func (fss *FileStreamingService) SetAckStatus(status bool) { - fss.ackStatus = status +// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails +// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +func (fss *FileStreamingService) HaltAppOnDeliveryError() bool { + return fss.haltAppOnDeliveryError } // isDirWriteable checks if dir is writable by writing and removing a file diff --git a/plugin/plugins/file/service/service_test.go b/plugin/plugins/file/service/service_test.go index c7deab81791f..81b081a45354 100644 --- a/plugin/plugins/file/service/service_test.go +++ b/plugin/plugins/file/service/service_test.go @@ -133,7 +133,7 @@ func TestFileStreamingService(t *testing.T) { defer os.RemoveAll(testDir) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - testStreamingService, err = NewFileStreamingService(testDir, testPrefix, testKeys, testMarshaller, false) + testStreamingService, err = NewFileStreamingService(testDir, testPrefix, testKeys, testMarshaller, true) require.Nil(t, err) require.IsType(t, &FileStreamingService{}, testStreamingService) require.Equal(t, testPrefix, testStreamingService.filePrefix) @@ -147,26 +147,6 @@ func TestFileStreamingService(t *testing.T) { testListenDeliverTx1(t) testListenDeliverTx2(t) testListenEndBlock(t) - - // status is success but not operating in ack mode - success := <-testStreamingService.ListenSuccess() - require.Equal(t, success, true) - - // status is failure but not operating in ack mode - testStreamingService.SetAckStatus(false) - success = <-testStreamingService.ListenSuccess() - require.Equal(t, success, true) - - // status is failure and operating in ack mode - testStreamingService.SetAckMode(true) - success = <-testStreamingService.ListenSuccess() - require.Equal(t, success, false) - - // status is success and operating in ack mode - testStreamingService.SetAckStatus(true) - success = <-testStreamingService.ListenSuccess() - require.Equal(t, success, true) - testStreamingService.Close() wg.Wait() } diff --git a/plugin/plugins/kafka/README.md b/plugin/plugins/kafka/README.md index 017cb96b5e02..f88d3426ff8b 100644 --- a/plugin/plugins/kafka/README.md +++ b/plugin/plugins/kafka/README.md @@ -76,14 +76,8 @@ The plugin has been hooked up to run with `test-sim-nondeterminism` task. For a # Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) flush_timeout_ms = 1500 - # whether to operate in fire-and-forget or success/failure acknowledgement mode - # false == fire-and-forget; true == sends a message receipt success/fail signal - ack = "false" - - # The amount of time to wait for acknowledgment of success/failure of message - # delivery of the current block before considering the delivery of messages failed. (In - # milliseconds) - delivered_block_wait_limit = 1000 + # Whether or not to halt the application when plugin fails to deliver message(s). + halt_app_on_delivery_error = true # Producer configuration properties. # The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. diff --git a/plugin/plugins/kafka/kafka.go b/plugin/plugins/kafka/kafka.go index a42554b9eca3..a3fd92c66e41 100644 --- a/plugin/plugins/kafka/kafka.go +++ b/plugin/plugins/kafka/kafka.go @@ -40,12 +40,8 @@ const ( // KEYS_PARAM is a list of the StoreKeys we want to expose for this streaming service KEYS_PARAM = "keys" - // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode - ACK_MODE = "ack" - - // DELIVERED_BLOCK_WAIT_LIMIT the amount of time to wait for acknowledgment of success/failure of - // message delivery of the current block before considering the delivery of messages failed. - DELIVERED_BLOCK_WAIT_LIMIT = "delivered_block_wait_limit" + // HALT_APP_ON_DELIVERY_ERROR whether or not to halt the application when plugin fails to deliver message(s) + HALT_APP_ON_DELIVERY_ERROR = "halt_app_on_delivery_error" ) // Plugins is the exported symbol for loading this plugin @@ -86,8 +82,7 @@ func (ssp *streamingServicePlugin) Register( tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) topicPrefix := cast.ToString(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, TOPIC_PREFIX_PARAM))) flushTimeoutMs := cast.ToInt(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, FLUSH_TIMEOUT_MS_PARAM))) - ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) - deliveredBlockWaitLimit := cast.ToDuration(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, DELIVERED_BLOCK_WAIT_LIMIT))) + haltAppOnDeliveryError := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, HALT_APP_ON_DELIVERY_ERROR))) producerConfig := cast.ToStringMap(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM))) // get the store keys allowed to be exposed for this streaming service exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) @@ -139,14 +134,12 @@ func (ssp *streamingServicePlugin) Register( var err error ssp.kss, err = service.NewKafkaStreamingService( - bApp.Logger(), producerConfigMap, topicPrefix, flushTimeoutMs, exposeStoreKeys, marshaller, - ack, - deliveredBlockWaitLimit, + haltAppOnDeliveryError, ) if err != nil { return err diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 48da13f37771..a84475f634fa 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -5,16 +5,12 @@ import ( "fmt" "github.com/confluentinc/confluent-kafka-go/kafka" "github.com/cosmos/cosmos-sdk/baseapp" - "github.com/gogo/protobuf/proto" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/log" - "strings" - "sync" - "time" - "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" + "sync" ) /* @@ -73,22 +69,19 @@ var _ baseapp.StreamingService = (*KafkaStreamingService)(nil) // KafkaStreamingService is a concrete implementation of streaming.Service that writes state changes out to Kafka type KafkaStreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to - topicPrefix string // topicPrefix prefix name - producer *kafka.Producer // the producer instance that will be used to send messages to Kafka - flushTimeoutMs int // the time to wait for outstanding messages and requests to complete delivery (milliseconds) - codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel used for synchronize closure - ack bool // true == fire-and-forget; false == sends success/failure signal - ackStatus bool // success/failure status to be sent to ackChan - ackChan chan bool // channel used to send a success/failure signal - deliveredBlockChan chan struct{} // channel used for signaling the delivery of all messages for the current block to Kafka. - deliveredBlockWaitLimit time.Duration // the time to wait for Kafka service to deliver current block messages before timing out. + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + topicPrefix string // topicPrefix prefix name + producer *kafka.Producer // the producer instance that will be used to send messages to Kafka + flushTimeoutMs int // the time to wait for outstanding messages and requests to complete delivery (milliseconds) + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + deliveryChan chan kafka.Event // Kafka producer delivery report channel + haltAppOnDeliveryError bool // true if the app should be halted on streaming errors, false otherwise } // IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener @@ -112,14 +105,12 @@ func (iw *IntermediateWriter) Write(b []byte) (int, error) { // NewKafkaStreamingService creates a new KafkaStreamingService func NewKafkaStreamingService( - logger log.Logger, - producerConfig kafka.ConfigMap, - topicPrefix string, - flushTimeoutMs int, - storeKeys []types.StoreKey, - c codec.BinaryCodec, - ack bool, - deliveredBlockWaitLimit time.Duration, + producerConfig kafka.ConfigMap, + topicPrefix string, + flushTimeoutMs int, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + haltAppOnDeliveryError bool, ) (*KafkaStreamingService, error) { listenChan := make(chan []byte) iw := NewIntermediateWriter(listenChan) @@ -135,49 +126,19 @@ func NewKafkaStreamingService( return nil, err } - logger.Debug("Created Producer: ", "producer", p) - kss := &KafkaStreamingService{ - listeners: listeners, - srcChan: listenChan, - topicPrefix: topicPrefix, - producer: p, - flushTimeoutMs: flushTimeoutMs, - codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), - ack: ack, - ackChan: make (chan bool, 1), - deliveredBlockWaitLimit: deliveredBlockWaitLimit, + listeners: listeners, + srcChan: listenChan, + topicPrefix: topicPrefix, + producer: p, + flushTimeoutMs: flushTimeoutMs, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + deliveryChan: make(chan kafka.Event), + haltAppOnDeliveryError: haltAppOnDeliveryError, } - var endBlockResTopic string - if len(kss.topicPrefix) > 0 { - endBlockResTopic = fmt.Sprintf("%s-%s", kss.topicPrefix, EndBlockResTopic) - } - go func() { - for e := range p.Events() { - switch ev := e.(type) { - case *kafka.Message: - pTopic := ev.TopicPartition.Topic - partition := ev.TopicPartition.Partition - //offset := ev.TopicPartition.Offset - key := string(ev.Key) - if err := ev.TopicPartition.Error; err != nil { - logger.Error("Delivery failed: ", "topic", pTopic, "partition", partition, "key", key, "err", err) - kss.ackStatus = false - } else { - //logger.Debug("Delivered message:", "topic", pTopic, "partition", partition, "offset", offset, "key", key) - // signal delivery of the block's messages - if strings.Compare(endBlockResTopic, *pTopic) == 0 { - logger.Debug("====== EndBlock Delivered ======") - close(kss.deliveredBlockChan) - } - } - } - } - }() - return kss, nil } @@ -221,8 +182,6 @@ func (kss *KafkaStreamingService) ListenBeginBlock( func (kss *KafkaStreamingService) setBeginBlock(req abci.RequestBeginBlock) { kss.currentBlockNumber = req.GetHeader().Height kss.currentTxIndex = 0 - kss.ackStatus = true - kss.deliveredBlockChan = make(chan struct{}) } // ListenDeliverTx satisfies the Hook interface @@ -292,48 +251,10 @@ func (kss *KafkaStreamingService) ListenEndBlock( return nil } -// ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service -// after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt -func (kss *KafkaStreamingService) ListenSuccess() <-chan bool { - // if we are operating in fire-and-forget mode, immediately send a "success" signal - if !kss.ack { - go func() { - fmt.Printf("%s", "\r------ inside listenSuccess ------\n") - kss.ackChan <- true - }() - } else { - go func() { - // Synchronize the work deliver of all block's messages. - // Force call to ListenSuccess() from within app.Commit() - // to wait {n} milliseconds before failing. - var deliveredBlock = false - maxWait := time.NewTicker(kss.deliveredBlockWaitLimit * time.Millisecond) - defer maxWait.Stop() - loop: - for { - // No reason to wait for block data to finish writing - // if any of the block's messages failed to be delivered - if !kss.ackStatus { - break loop - } - select { - case <-kss.deliveredBlockChan: - deliveredBlock = true - break loop - case <-maxWait.C: - break loop - } - } - - if deliveredBlock == false { - kss.ackChan <- false - } else { - kss.ackChan <- kss.ackStatus - } - }() - } - - return kss.ackChan +// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails +// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +func (kss *KafkaStreamingService) HaltAppOnDeliveryError() bool { + return kss.haltAppOnDeliveryError } // Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received @@ -363,8 +284,9 @@ func (kss *KafkaStreamingService) Stream(wg *sync.WaitGroup) error { // Close satisfies the io.Closer interface func (kss *KafkaStreamingService) Close() error { - close(kss.quitChan) kss.producer.Flush(kss.flushTimeoutMs) + close(kss.quitChan) + close(kss.deliveryChan) kss.producer.Close() return nil } @@ -377,11 +299,9 @@ func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event string for i, stateChange := range kss.stateCache { key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { - kss.ackStatus = false return err } if err := kss.writeAsJsonToKafka(ctx, StateChangeTopic, key, kvPair); err != nil { - kss.ackStatus = false return err } } @@ -393,20 +313,49 @@ func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event string return nil } -func (kss *KafkaStreamingService) writeAsJsonToKafka(ctx sdk.Context, topic string, key string, data proto.Message) error { +func (kss *KafkaStreamingService) writeAsJsonToKafka( + ctx sdk.Context, + topic string, + key string, + data proto.Message, +) error { kodec := kss.codec.(*codec.ProtoCodec) json, err := kodec.MarshalJSON(data) if err != nil { - kss.ackStatus = false return err } if len(kss.topicPrefix) > 0 { topic = fmt.Sprintf("%s-%s", kss.topicPrefix, topic) } - kss.producer.ProduceChannel() <- &kafka.Message{ + + // produce message + if err := kss.producer.Produce(&kafka.Message{ TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, Value: json, Key: []byte(key), + }, kss.deliveryChan); err != nil { + return err + } + + return kss.checkDeliveryReport(ctx) +} + +// checkDeliveryReport checks kafka.Producer delivery report for successful or failed messages +func (kss *KafkaStreamingService) checkDeliveryReport(ctx sdk.Context) error { + e := <-kss.deliveryChan + m := e.(*kafka.Message) + topic := *m.TopicPartition.Topic + partition := m.TopicPartition.Partition + offset := m.TopicPartition.Offset + key := string(m.Key) + topicErr := m.TopicPartition.Error + logger := ctx.Logger() + + if topicErr != nil { + logger.Error("Delivery failed: ", "topic", topic, "partition", partition, "key", key, "err", topicErr) + return topicErr + } else { + logger.Debug("Delivered message:", "topic", topic, "partition", partition, "offset", offset, "key", key) } return nil diff --git a/plugin/plugins/kafka/service/service_test.go b/plugin/plugins/kafka/service/service_test.go index 012959a0698f..2305c9bc0c90 100644 --- a/plugin/plugins/kafka/service/service_test.go +++ b/plugin/plugins/kafka/service/service_test.go @@ -27,8 +27,7 @@ var ( testMarshaller = codec.NewProtoCodec(interfaceRegistry) testStreamingService *KafkaStreamingService testListener1, testListener2 types.WriteListener - emptyContext = sdk.Context{} - logger log.Logger + testingCtx sdk.Context // test abci message types mockHash = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9} @@ -126,9 +125,6 @@ var ( mockValue2 = []byte{4, 3, 2} mockKey3 = []byte{3, 4, 5} mockValue3 = []byte{5, 4, 3} - - // false == fire-and-forget; true == sends a message receipt success/fail signal - ack = false ) func TestIntermediateWriter(t *testing.T) { @@ -152,9 +148,9 @@ func TestIntermediateWriter(t *testing.T) { // change this to write to in-memory io.Writer (e.g. bytes.Buffer) func TestKafkaStreamingService(t *testing.T) { + testingCtx = sdk.NewContext(nil, types1.Header{}, false, log.TestingLogger()) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - logger = log.TestingLogger() - kss, err := NewKafkaStreamingService(logger, producerConfig, topicPrefix, flushTimeoutMs, testKeys, testMarshaller, ack) + kss, err := NewKafkaStreamingService(producerConfig, topicPrefix, flushTimeoutMs, testKeys, testMarshaller, true) testStreamingService = kss require.Nil(t, err) require.IsType(t, &KafkaStreamingService{}, testStreamingService) @@ -209,7 +205,7 @@ func testListenBeginBlock(t *testing.T) { require.Nil(t, err) // send the ABCI messages - err = testStreamingService.ListenBeginBlock(emptyContext, testBeginBlockReq, testBeginBlockRes) + err = testStreamingService.ListenBeginBlock(testingCtx, testBeginBlockReq, testBeginBlockRes) require.Nil(t, err) // consume stored messages @@ -260,7 +256,7 @@ func testListenDeliverTx1(t *testing.T) { require.Nil(t, err) // send the ABCI messages - err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq1, testDeliverTxRes1) + err = testStreamingService.ListenDeliverTx(testingCtx, testDeliverTxReq1, testDeliverTxRes1) require.Nil(t, err) // consume stored messages @@ -311,7 +307,7 @@ func testListenDeliverTx2(t *testing.T) { require.Nil(t, err) // send the ABCI messages - err = testStreamingService.ListenDeliverTx(emptyContext, testDeliverTxReq2, testDeliverTxRes2) + err = testStreamingService.ListenDeliverTx(testingCtx, testDeliverTxReq2, testDeliverTxRes2) require.Nil(t, err) // consume stored messages @@ -362,7 +358,7 @@ func testListenEndBlock(t *testing.T) { require.Nil(t, err) // send the ABCI messages - err = testStreamingService.ListenEndBlock(emptyContext, testEndBlockReq, testEndBlockRes) + err = testStreamingService.ListenEndBlock(testingCtx, testEndBlockReq, testEndBlockRes) require.Nil(t, err) // consume stored messages diff --git a/plugin/plugins/trace/README.md b/plugin/plugins/trace/README.md index 6b98a83d3f42..88ebd650fe38 100644 --- a/plugin/plugins/trace/README.md +++ b/plugin/plugins/trace/README.md @@ -50,10 +50,9 @@ The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload # In addition to block event info, print the data to stdout as well. print_data_to_stdout = false - - # whether to operate in fire-and-forget or success/failure acknowledgement mode - # false == fire-and-forget; true == sends a message receipt success/fail signal - ack = "false" + + # Whether or not to halt the application when plugin fails to deliver message(s). + halt_app_on_delivery_error = true ``` 2. Run `make test-sim-nondeterminism` and wait for the tests to finish. diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go index 32798443c330..caf60d5ca173 100644 --- a/plugin/plugins/trace/service/service.go +++ b/plugin/plugins/trace/service/service.go @@ -38,18 +38,16 @@ const ( // TraceStreamingService is a concrete implementation of streaming.Service that writes state changes to log file. type TraceStreamingService struct { - listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp - srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to - codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files - stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received - stateCacheLock *sync.Mutex // mutex for the state cache - currentBlockNumber int64 // the current block number - currentTxIndex int64 // the index of the current tx - quitChan chan struct{} // channel used for synchronize closure - printDataToStdout bool // Print types.StoreKVPair data stored in each event to stdout. - ack bool // true == fire-and-forget; false == sends success/failure signal - ackStatus bool // success/failure status to be sent to ackChan - ackChan chan bool // channel used to send a success/failure signal + listeners map[types.StoreKey][]types.WriteListener // the listeners that will be initialized with BaseApp + srcChan <-chan []byte // the channel that all of the WriteListeners write their data out to + codec codec.BinaryCodec // binary marshaller used for re-marshalling the ABCI messages to write them out to the destination files + stateCache [][]byte // cache the protobuf binary encoded StoreKVPairs in the order they are received + stateCacheLock *sync.Mutex // mutex for the state cache + currentBlockNumber int64 // the current block number + currentTxIndex int64 // the index of the current tx + quitChan chan struct{} // channel used for synchronize closure + printDataToStdout bool // Print types.StoreKVPair data stored in each event to stdout. + haltAppOnDeliveryError bool // true if the app should be halted on streaming errors, false otherwise } // IntermediateWriter is used so that we do not need to update the underlying io.Writer inside the StoreKVPairWriteListener @@ -74,10 +72,10 @@ func (iw *IntermediateWriter) Write(b []byte) (int, error) { // NewTraceStreamingService creates a new TraceStreamingService for the provided // storeKeys, BinaryCodec and deliverBlockWaitLimit (in milliseconds) func NewTraceStreamingService( - storeKeys []types.StoreKey, - c codec.BinaryCodec, - printDataToStdout bool, - ack bool, + storeKeys []types.StoreKey, + c codec.BinaryCodec, + printDataToStdout bool, + haltAppOnDeliveryError bool, ) (*TraceStreamingService, error) { listenChan := make(chan []byte) iw := NewIntermediateWriter(listenChan) @@ -89,14 +87,13 @@ func NewTraceStreamingService( } tss := &TraceStreamingService{ - listeners: listeners, - srcChan: listenChan, - codec: c, - stateCache: make([][]byte, 0), - stateCacheLock: new(sync.Mutex), - printDataToStdout: printDataToStdout, - ack: ack, - ackChan: make(chan bool, 1), + listeners: listeners, + srcChan: listenChan, + codec: c, + stateCache: make([][]byte, 0), + stateCacheLock: new(sync.Mutex), + printDataToStdout: printDataToStdout, + haltAppOnDeliveryError: haltAppOnDeliveryError, } return tss, nil @@ -142,7 +139,6 @@ func (tss *TraceStreamingService) setBeginBlock(req abci.RequestBeginBlock) { tss.currentBlockNumber = req.GetHeader().Height // reset on new block tss.currentTxIndex = 0 - tss.ackStatus = true } // ListenDeliverTx satisfies the Hook interface @@ -212,23 +208,10 @@ func (tss *TraceStreamingService) ListenEndBlock( return nil } -// ListenSuccess returns a chan that is used to acknowledge successful receipt of messages by the external service -// after some configurable delay, `false` is sent to this channel from the service to signify failure of receipt. -// For fire-and-forget model, set the chan to always be `true`: -func (tss *TraceStreamingService) ListenSuccess() <-chan bool { - // if we are operating in fire-and-forget mode, immediately send a "success" signal - if !tss.ack { - go func() { - tss.ackChan <- true - }() - } else { - go func() { - // the TraceStreamingService operating synchronously, but this will signify whether an error occurred - // during it's processing cycle - tss.ackChan <- tss.ackStatus - }() - } - return tss.ackChan +// HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails +// in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +func (tss *TraceStreamingService) HaltAppOnDeliveryError() bool { + return tss.haltAppOnDeliveryError } // Stream spins up a goroutine select loop which awaits length-prefixed binary encoded KV pairs and caches them in the order they were received @@ -270,11 +253,9 @@ func (tss *TraceStreamingService) writeStateChange(ctx sdk.Context, event string for i, stateChange := range tss.stateCache { key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { - tss.ackStatus = false return err } if err := tss.writeEventReqRes(ctx, key, kvPair); err != nil { - tss.ackStatus = false return err } } diff --git a/plugin/plugins/trace/service/service_test.go b/plugin/plugins/trace/service/service_test.go index 86f6e31881e2..6f8243557f06 100644 --- a/plugin/plugins/trace/service/service_test.go +++ b/plugin/plugins/trace/service/service_test.go @@ -95,13 +95,6 @@ var ( mockValue2 = []byte{4, 3, 2} mockKey3 = []byte{3, 4, 5} mockValue3 = []byte{5, 4, 3} - - // print event data in stdout - printDataToStdout = true - - // false == fire-and-forget; true == sends a message receipt success/fail signal - ack = false - ) func TestIntermediateWriter(t *testing.T) { @@ -126,7 +119,7 @@ func TestIntermediateWriter(t *testing.T) { func TestKafkaStreamingService(t *testing.T) { loggerContext = emptyContext.WithLogger(log.TestingLogger()) testKeys := []types.StoreKey{mockStoreKey1, mockStoreKey2} - tss, err := NewTraceStreamingService(testKeys, testMarshaller, printDataToStdout, ack) + tss, err := NewTraceStreamingService(testKeys, testMarshaller, true, false) testStreamingService = tss require.Nil(t, err) require.IsType(t, &TraceStreamingService{}, testStreamingService) diff --git a/simapp/app.go b/simapp/app.go index c29f7ce0cdce..6d6f13cf0233 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -236,14 +236,8 @@ func NewSimApp( pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) if cast.ToBool(appOpts.Get(pluginsOnKey)) { - // set the global wait limit for state streaming plugin message receipt acknowledgement - globalWaitLimitKey := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, plugin.GLOBAL_ACK_WAIT_LIMIT_TOML_KEY) - globalWaitLimit := cast.ToDuration(appOpts.Get(globalWaitLimitKey)) - if globalWaitLimit > 0 { - bApp.SetGlobalWaitLimit(globalWaitLimit) - } - // this loads the preloaded and any plugins found in `plugins.dir` + // if their names match those in the `plugins.enabled` list. pluginLoader, err := loader.NewPluginLoader(appOpts, logger) if err != nil { tmos.Exit(err.Error()) From 7fd78064f03e59297ae2f6596199c4f5dfbe975b Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Fri, 11 Feb 2022 13:37:02 -0600 Subject: [PATCH 21/43] fromatting --- docs/architecture/adr-038-state-listening.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index ab4e308cb0ac..dfe188f89e76 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -287,7 +287,7 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // Call the streaming service hooks with the EndBlock messages for _, listener := range app.abciListeners { if err := listener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("ListenEndBlock listening hook failed", "err", err) + app.logger.Error("ListenEndBlock listening hook failed", "err", err) if listener.HaltAppOnDeliveryError() { app.halt() } From 2680478936d78ad8d7f6f6cf15e4b831adc5d717 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Fri, 18 Feb 2022 11:30:54 -0600 Subject: [PATCH 22/43] concurrent listener calls --- baseapp/abci.go | 83 +++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 66 insertions(+), 17 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index b794ff890276..dd72db7ae26f 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -7,6 +7,7 @@ import ( "os" "sort" "strings" + "sync" "syscall" "github.com/gogo/protobuf/proto" @@ -190,13 +191,29 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg app.voteInfos = req.LastCommitInfo.GetVotes() // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) + var halt = false for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) - if streamingListener.HaltAppOnDeliveryError() { - app.halt() + // increment the wait group counter + wg.Add(1) + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + if streamingListener.HaltAppOnDeliveryError() { + halt = true + } } - } + }() + } + + // wait for all the listener calls to finish + wg.Wait() + + if halt { + app.halt() } return res @@ -218,14 +235,30 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc res.ConsensusParamUpdates = cp } - // call the streaming service hooks with the EndBlock messages + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) + var halt = false for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) - if streamingListener.HaltAppOnDeliveryError() { - app.halt() + // increment the wait group counter + wg.Add(1) + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + if streamingListener.HaltAppOnDeliveryError() { + halt = true + } } - } + }() + } + + // wait for all the listener calls to finish + wg.Wait() + + if halt { + app.halt() } return res @@ -275,13 +308,30 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx var abciRes abci.ResponseDeliverTx defer func() { + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) + var halt = false for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) - if streamingListener.HaltAppOnDeliveryError() { - app.halt() + // increment the wait group counter + wg.Add(1) + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + if streamingListener.HaltAppOnDeliveryError() { + halt = true + } } - } + }() + } + + // wait for all the listener calls to finish + wg.Wait() + + if halt { + app.halt() } }() @@ -298,7 +348,6 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx } return abciRes - } // Commit implements the ABCI interface. It will commit all state that exists in From d86ce53969f7fb44368aa26d740e10f02150d3c8 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 2 Mar 2022 17:44:31 -0600 Subject: [PATCH 23/43] async fire-and-forget when halt_app_on_delivery_error = false --- baseapp/abci.go | 102 ++++++++++++++++++++++++------------------------ 1 file changed, 51 insertions(+), 51 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index dd72db7ae26f..cb628a962ad6 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -192,30 +192,30 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // call the hooks with the BeginBlock messages wg := new(sync.WaitGroup) - var halt = false for _, streamingListener := range app.abciListeners { - // increment the wait group counter - wg.Add(1) streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines - go func() { - // decrement the counter when the go routine completes - defer wg.Done() - if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) - if streamingListener.HaltAppOnDeliveryError() { - halt = true + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + app.halt() } - } - }() + }() + } else { + go func() { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + } + }() + } } - // wait for all the listener calls to finish wg.Wait() - if halt { - app.halt() - } - return res } @@ -237,30 +237,30 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // call the hooks with the BeginBlock messages wg := new(sync.WaitGroup) - var halt = false for _, streamingListener := range app.abciListeners { - // increment the wait group counter - wg.Add(1) streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines - go func() { - // decrement the counter when the go routine completes - defer wg.Done() - if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) - if streamingListener.HaltAppOnDeliveryError() { - halt = true + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + app.halt() } - } - }() + }() + } else { + go func() { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + } + }() + } } - // wait for all the listener calls to finish wg.Wait() - if halt { - app.halt() - } - return res } @@ -310,29 +310,29 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx defer func() { // call the hooks with the BeginBlock messages wg := new(sync.WaitGroup) - var halt = false for _, streamingListener := range app.abciListeners { - // increment the wait group counter - wg.Add(1) streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines - go func() { - // decrement the counter when the go routine completes - defer wg.Done() - if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) - if streamingListener.HaltAppOnDeliveryError() { - halt = true + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + app.halt() } - } - }() + }() + } else { + go func() { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + } + }() + } } - // wait for all the listener calls to finish wg.Wait() - - if halt { - app.halt() - } }() ctx := app.getContextForTx(runTxModeDeliver, req.Tx) From 7f6173a72f1e6ab229f9a6c37e612d1d45e3850d Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 3 Mar 2022 11:06:29 -0600 Subject: [PATCH 24/43] updated comments for HaltAppOnDeliveryError --- baseapp/streaming.go | 3 +++ plugin/plugins/file/service/service.go | 3 +++ plugin/plugins/kafka/service/service.go | 3 +++ plugin/plugins/trace/service/service.go | 3 +++ 4 files changed, 12 insertions(+) diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 2d3b9d036da5..67c31555730d 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -20,6 +20,9 @@ type ABCIListener interface { ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. + // When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will + // be replayed to all listeners when the node restarts and all successful listeners that received data + // prior to the halt will receive duplicate data. HaltAppOnDeliveryError() bool } diff --git a/plugin/plugins/file/service/service.go b/plugin/plugins/file/service/service.go index a09feaed1d96..104796707a56 100644 --- a/plugin/plugins/file/service/service.go +++ b/plugin/plugins/file/service/service.go @@ -300,6 +300,9 @@ func (fss *FileStreamingService) Close() error { // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will +// be replayed to all listeners when the node restarts and all successful listeners that received data +// prior to the halt will receive duplicate data. func (fss *FileStreamingService) HaltAppOnDeliveryError() bool { return fss.haltAppOnDeliveryError } diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index a84475f634fa..046b84ae70a7 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -253,6 +253,9 @@ func (kss *KafkaStreamingService) ListenEndBlock( // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will +// be replayed to all listeners when the node restarts and all successful listeners that received data +// prior to the halt will receive duplicate data. func (kss *KafkaStreamingService) HaltAppOnDeliveryError() bool { return kss.haltAppOnDeliveryError } diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go index caf60d5ca173..b59d1b4a7c3d 100644 --- a/plugin/plugins/trace/service/service.go +++ b/plugin/plugins/trace/service/service.go @@ -210,6 +210,9 @@ func (tss *TraceStreamingService) ListenEndBlock( // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. +// When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will +// be replayed to all listeners when the node restarts and all successful listeners that received data +// prior to the halt will receive duplicate data. func (tss *TraceStreamingService) HaltAppOnDeliveryError() bool { return tss.haltAppOnDeliveryError } From cdfe1b0bfee6dc76e07661fa39b08fd24bae7415 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Fri, 4 Mar 2022 16:31:51 -0600 Subject: [PATCH 25/43] improve non-determinism tests for state listening --- Makefile | 3 + plugin/plugins/kafka/README.md | 200 ++++++++++++------------ plugin/plugins/kafka/docker-compose.yml | 19 ++- sim-state-listening.mk | 42 +++++ simapp/sim_test.go | 145 +++++++++++++++-- 5 files changed, 284 insertions(+), 125 deletions(-) create mode 100644 sim-state-listening.mk diff --git a/Makefile b/Makefile index 0c598af97cde..07b58037202f 100644 --- a/Makefile +++ b/Makefile @@ -259,6 +259,9 @@ endif .PHONY: run-tests test test-all $(TEST_TARGETS) +# Sim tests with state listening plugins enabled +include sim-state-listening.mk + test-sim-nondeterminism: @echo "Running non-determinism test..." @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminism -Enabled=true \ diff --git a/plugin/plugins/kafka/README.md b/plugin/plugins/kafka/README.md index f88d3426ff8b..81674afef7b8 100644 --- a/plugin/plugins/kafka/README.md +++ b/plugin/plugins/kafka/README.md @@ -5,106 +5,17 @@ This plugin demonstrates how to listen to state changes of individual `KVStores` - - [Dependencies](#dependencies) - - [Running the plugin](#running-the-plugin) - [Plugin design](#plugin-design) - [Channel-Based producer](#channel-based-producer) - [Delivery Report handler](#delivery-report-handler) - [Message serde](#message-serde) + - [Example configuration](#example-configuration) + - [Testing the plugin](#testing-the-plugin) - [Confluent Platform](#confluent-platform) - [Docker](#docker) - [Schema Registry](#schema-registry) - [KSQL examples](#ksql-examples) - - - -## Dependencies - -To test and run the examples, you must have `docker` and `docker-compose` installed on your system. Use the links below for installation instructions. - -* [Docker](https://www.docker.com/get-started) -* [Docker Compose] - - -## Running the plugin - -The plugin has been hooked up to run with `test-sim-nondeterminism` task. For a lighter test you can run `./plugin/plugins/kafka/service/service_test.go`. The [KSQ examples](#ksql-examples) below will work with both test scenarios. - -1. Spin up the docker images of the Confluent Platform following the instructions in the [Confluent Platform](#confluent-platform) section. Once the docker images are up and running you'll be able to access the platform on [localhost:9021](localhost:9021). -2. Copy the content below to `~/app.toml`. - - ``` - # app.toml - - ... - - ############################################################################### - ### Plugin system configuration ### - ############################################################################### - - [plugins] - - # turn the plugin system, as a whole, on or off - on = true - - # List of plugin names to enable from the plugin/plugins/* - enabled = ["kafka"] - - # The directory to load non-preloaded plugins from; defaults to ./plugin/plugins - dir = "" - - # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName - [plugins.streaming] - - # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services - # in milliseconds - global_ack_wait_limit = 2000 - - ############################################################################### - ### Kafka Plugin configuration ### - ############################################################################### - - # The specific parameters for the Kafka streaming service plugin - [plugins.streaming.kafka] - # List of store keys we want to expose for this streaming service. - keys = [] - - # Optional topic prefix for the topic(s) where data will be stored - topic_prefix = "block" - - # Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) - flush_timeout_ms = 1500 - - # Whether or not to halt the application when plugin fails to deliver message(s). - halt_app_on_delivery_error = true - - # Producer configuration properties. - # The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. - # For a full list of producer configuration properties - # see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md - [plugins.streaming.kafka.producer] - - # Initial list of brokers as a comma seperated list of broker host or host:port[, host:port[,...]] - bootstrap_servers = "localhost:9092" - - # Client identifier - client_id = "my-app-id" - - # This field indicates the number of acknowledgements the leader - # broker must receive from ISR brokers before responding to the request - acks = "all" - - # When set to true, the producer will ensure that messages - # are successfully produced exactly once and in the original produce order. - # The following configuration properties are adjusted automatically (if not modified by the user) - # when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), - # retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. - # Producer instantation will fail if user-supplied configuration is incompatible. - enable_idempotence = true - ``` -3. Run `make test-sim-nondeterminism` and wait for the tests to finish. -4. Go to the [KSQ examples](#ksql-examples) section and go through the examples. ## Plugin design @@ -191,17 +102,104 @@ Example: } ``` +### Example configuration + +Below is an example of how to configure the Kafka plugin. +``` +# app.toml + +... + +############################################################################### +### Plugin system configuration ### +############################################################################### + +[plugins] + +# turn the plugin system, as a whole, on or off +on = true + +# List of plugin names to enable from the plugin/plugins/* +enabled = ["kafka"] + +# The directory to load non-preloaded plugins from; defaults to ./plugin/plugins +dir = "" + +# a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName +[plugins.streaming] + +# maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services +# in milliseconds +global_ack_wait_limit = 2000 + +############################################################################### +### Kafka Plugin configuration ### +############################################################################### + +# The specific parameters for the Kafka streaming service plugin +[plugins.streaming.kafka] + +# List of store keys we want to expose for this streaming service. +keys = [] + +# Optional topic prefix for the topic(s) where data will be stored +topic_prefix = "block" + +# Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) +flush_timeout_ms = 1500 + +# Whether or not to halt the application when plugin fails to deliver message(s). +halt_app_on_delivery_error = true + +# Producer configuration properties. +# The plugin uses confluent-kafka-go which is a lightweight wrapper around librdkafka. +# For a full list of producer configuration properties +# see https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md +[plugins.streaming.kafka.producer] + +# Initial list of brokers as a comma seperated list of broker host or host:port[, host:port[,...]] +bootstrap_servers = "localhost:9092" + +# Client identifier +client_id = "my-app-id" + +# This field indicates the number of acknowledgements the leader +# broker must receive from ISR brokers before responding to the request +acks = "all" + +# When set to true, the producer will ensure that messages +# are successfully produced exactly once and in the original produce order. +# The following configuration properties are adjusted automatically (if not modified by the user) +# when idempotence is enabled: max.in.flight.requests.per.connection=5 (must be less than or equal to 5), +# retries=INT32_MAX (must be greater than 0), acks=all, queuing.strategy=fifo. +# Producer instantation will fail if user-supplied configuration is incompatible. +enable_idempotence = true +``` + +## Testing the plugin + +Non-determinism testing has been set up to run with the Kafka plugin. + +To execute the tests, run: +``` +make test-sim-nondeterminism-state-listening-kafka +``` + ## Confluent Platform +If you're interested in viewing or querying events stored in kafka you can stand up the Confluent Platform stack with docker. + +*Visit the Confluent Platform [docs](https://docs.confluent.io/platform/current/quickstart/ce-docker-quickstart.html) for up to date docker instructions.* + ### Docker Spin up Confluent Platform. ``` -cd .../cosmos-sdk/plugin/plugins/kafka/docker-compose.yml +docker-compose -f plugin/plugins/kafka/docker-compose.yml up -d ``` +You should see something like this: ``` -docker-compose up -d Creating network "kafka_default" with the default driver Creating zookeeper ... done Creating broker ... done @@ -217,6 +215,10 @@ Creating control-center ... done Check status ``` docker-compose ps +``` + +You should see something like this: +``` Name Command State Ports --------------------------------------------------------------------------------------------------------- broker /etc/confluent/docker/run Up 0.0.0.0:9092->9092/tcp, 0.0.0.0:9101->9101/tcp @@ -230,13 +232,6 @@ schema-registry /etc/confluent/docker/run Up 0.0.0.0:8081->8081/tc zookeeper /etc/confluent/docker/run Up 0.0.0.0:2181->2181/tcp, 2888/tcp, 3888/tcp ``` - - -### Schema Registry - -Because `golang` lacks support to be able to register Protobuf messages with the schema registry, one needs to generate the Java code from the proto messages and use the [KafkaProtobufSerializer.java](https://github.com/confluentinc/schema-registry/blob/master/protobuf-serializer/src/main/java/io/confluent/kafka/serializers/protobuf/KafkaProtobufSerializer.java) to automatically register them. The Java libraries make this process exctreamly easy. Take a look [here](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html) fro an example of how this is achived. - - ### KSQL examples One huge advante of using Kafka with the Confluent Platform is the KSQL streaming engine. KSQL allows us to be able to write queries and create streams or tables from one or multiple Kafka topics (through joins) without having to write any code. @@ -313,4 +308,9 @@ Result: } ``` +### Schema Registry + +Because `golang` lacks support to be able to register Protobuf messages with the schema registry, one needs to generate the Java code from the proto messages and use the [KafkaProtobufSerializer.java](https://github.com/confluentinc/schema-registry/blob/master/protobuf-serializer/src/main/java/io/confluent/kafka/serializers/protobuf/KafkaProtobufSerializer.java) to automatically register them. The Java libraries make this process exctreamly easy. Take a look [here](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html) fro an example of how this is achived. + + Check out the [docs](https://docs.ksqldb.io/en/latest/) and this [post](https://www.confluent.io/blog/ksqldb-0-15-reads-more-message-keys-supports-more-data-types/) for more complex examples and a deeper understanding of KSQL. diff --git a/plugin/plugins/kafka/docker-compose.yml b/plugin/plugins/kafka/docker-compose.yml index 017fe9a4d621..d8ee19bb7d38 100644 --- a/plugin/plugins/kafka/docker-compose.yml +++ b/plugin/plugins/kafka/docker-compose.yml @@ -2,7 +2,7 @@ version: '2' services: zookeeper: - image: confluentinc/cp-zookeeper:7.0.0 + image: confluentinc/cp-zookeeper:7.0.1 hostname: zookeeper container_name: zookeeper ports: @@ -12,7 +12,7 @@ services: ZOOKEEPER_TICK_TIME: 2000 broker: - image: confluentinc/cp-server:7.0.0 + image: confluentinc/cp-server:7.0.1 hostname: broker container_name: broker depends_on: @@ -41,7 +41,7 @@ services: CONFLUENT_SUPPORT_CUSTOMER_ID: 'anonymous' schema-registry: - image: confluentinc/cp-schema-registry:7.0.0 + image: confluentinc/cp-schema-registry:7.0.1 hostname: schema-registry container_name: schema-registry depends_on: @@ -65,7 +65,6 @@ services: environment: CONNECT_BOOTSTRAP_SERVERS: 'broker:29092' CONNECT_REST_ADVERTISED_HOST_NAME: connect - CONNECT_REST_PORT: 8083 CONNECT_GROUP_ID: compose-connect-group CONNECT_CONFIG_STORAGE_TOPIC: docker-connect-configs CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: 1 @@ -78,14 +77,14 @@ services: CONNECT_VALUE_CONVERTER: io.confluent.connect.avro.AvroConverter CONNECT_VALUE_CONVERTER_SCHEMA_REGISTRY_URL: http://schema-registry:8081 # CLASSPATH required due to CC-2422 - CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.0.0.jar + CLASSPATH: /usr/share/java/monitoring-interceptors/monitoring-interceptors-7.0.1.jar CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor" CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor" CONNECT_PLUGIN_PATH: "/usr/share/java,/usr/share/confluent-hub-components" CONNECT_LOG4J_LOGGERS: org.apache.zookeeper=ERROR,org.I0Itec.zkclient=ERROR,org.reflections=ERROR control-center: - image: confluentinc/cp-enterprise-control-center:7.0.0 + image: confluentinc/cp-enterprise-control-center:7.0.1 hostname: control-center container_name: control-center depends_on: @@ -108,7 +107,7 @@ services: PORT: 9021 ksqldb-server: - image: confluentinc/cp-ksqldb-server:7.0.0 + image: confluentinc/cp-ksqldb-server:7.0.1 hostname: ksqldb-server container_name: ksqldb-server depends_on: @@ -131,7 +130,7 @@ services: KSQL_KSQL_LOGGING_PROCESSING_STREAM_AUTO_CREATE: 'true' ksqldb-cli: - image: confluentinc/cp-ksqldb-cli:7.0.0 + image: confluentinc/cp-ksqldb-cli:7.0.1 container_name: ksqldb-cli depends_on: - broker @@ -141,7 +140,7 @@ services: tty: true ksql-datagen: - image: confluentinc/ksqldb-examples:7.0.0 + image: confluentinc/ksqldb-examples:7.0.1 hostname: ksql-datagen container_name: ksql-datagen depends_on: @@ -163,7 +162,7 @@ services: STREAMS_SCHEMA_REGISTRY_PORT: 8081 rest-proxy: - image: confluentinc/cp-kafka-rest:7.0.0 + image: confluentinc/cp-kafka-rest:7.0.1 depends_on: - broker - schema-registry diff --git a/sim-state-listening.mk b/sim-state-listening.mk new file mode 100644 index 000000000000..72ae516849cd --- /dev/null +++ b/sim-state-listening.mk @@ -0,0 +1,42 @@ +#!/usr/bin/make -f + +################################################ +# Simulation tests with State Listening plugins +# +# This file is an extension for sims.mk +################################################ + +test-sim-nondeterminism-state-listening-file: + @echo "Running non-determinism-state-listening-file test..." + @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ + -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ + -StateListeningPlugin=file -HaltAppOnDeliveryError=true + +test-sim-nondeterminism-state-listening-trace: + @echo "Running non-determinism-state-listening-trace test..." + @go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ + -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ + -StateListeningPlugin=trace -HaltAppOnDeliveryError=true + +test-sim-nondeterminism-state-listening-kafka: + @echo "Running non-determinism-state-listening-kafka test..." + @echo "Starting Kafka..." + docker-compose -f plugin/plugins/kafka/docker-compose.yml up -d zookeeper broker + + @-go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ + -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ + -StateListeningPlugin=kafka -HaltAppOnDeliveryError=true + + @echo "Stopping Kafka..." + docker-compose -f plugin/plugins/kafka/docker-compose.yml down + +test-sim-nondeterminism-state-listening-all: \ + test-sim-nondeterminism-state-listening-file \ + test-sim-nondeterminism-state-listening-trace \ + test-sim-nondeterminism-state-listening-kafka + +.PHONY: \ +test-sim-nondeterminism-state-listening-all \ +test-sim-nondeterminism-state-listening-file \ +test-sim-nondeterminism-state-listening-trace \ +test-sim-nondeterminism-state-listening-kafka diff --git a/simapp/sim_test.go b/simapp/sim_test.go index 4c34ae627b67..7a724c393ca6 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -3,6 +3,7 @@ package simapp import ( "context" "encoding/json" + "flag" "fmt" "github.com/confluentinc/confluent-kafka-go/kafka" "github.com/cosmos/cosmos-sdk/plugin" @@ -12,7 +13,6 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" "math/rand" "os" - "path/filepath" "runtime/debug" "strings" "testing" @@ -47,9 +47,17 @@ import ( kafkaservice "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service" ) +var ( + StateListeningPlugin string + HaltAppOnDeliveryError bool +) + // Get flags every time the simulator is run func init() { GetSimulatorFlags() + // State listening flags + flag.StringVar(&StateListeningPlugin, "StateListeningPlugin", "", "State listening plugin name") + flag.BoolVar(&HaltAppOnDeliveryError, "HaltAppOnDeliveryError", true, "Halt app when state listeners fail") } type StoreKeysPrefixes struct { @@ -322,17 +330,102 @@ func TestAppStateDeterminism(t *testing.T) { logger = log.NewNopLogger() } - appOpts := loadAppOptions() - enabledPlugins := cast.ToStringSlice(appOpts.Get(fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ENABLED_TOML_KEY))) - for _, p := range enabledPlugins { - if kafkaplugin.PLUGIN_NAME == p { - prepKafkaTopics(appOpts) - break - } + db := dbm.NewMemDB() + app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), EmptyAppOptions{}, interBlockCacheOpt()) + + fmt.Printf( + "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n", + config.Seed, i+1, numSeeds, j+1, numTimesToRunPerSeed, + ) + + _, _, err := simulation.SimulateFromSeed( + t, + os.Stdout, + app.BaseApp, + AppStateFn(app.AppCodec(), app.SimulationManager()), + simtypes.RandomAccounts, // Replace with own random account function if using keys other than secp256k1 + SimulationOperations(app, app.AppCodec(), config), + app.ModuleAccountAddrs(), + config, + app.AppCodec(), + ) + require.NoError(t, err) + + if config.Commit { + PrintStats(db) + } + + appHash := app.LastCommitID().Hash + appHashList[j] = appHash + + if j != 0 { + require.Equal( + t, string(appHashList[0]), string(appHashList[j]), + "non-determinism in seed %d: %d/%d, attempt: %d/%d\n", config.Seed, i+1, numSeeds, j+1, numTimesToRunPerSeed, + ) + } + } + } +} + +// TODO: Make another test for the fuzzer itself, which just has noOp txs +// and doesn't depend on the application. +func TestAppStateDeterminismWithStateListening(t *testing.T) { + if !FlagEnabledValue { + t.Skip("skipping application simulation") + } + + if StateListeningPlugin == "" { + t.Skip("state listening plugin flag not provided: -StateListeningPlugin=name") + } + + config := NewConfigFromFlags() + config.InitialBlockHeight = 1 + config.ExportParamsPath = "" + config.OnOperation = false + config.AllInvariants = false + config.ChainID = helpers.SimAppChainID + + numSeeds := 3 + numTimesToRunPerSeed := 5 + appHashList := make([]json.RawMessage, numTimesToRunPerSeed) + + // State listening plugin config + appOpts := loadAppOptions() + key := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ENABLED_TOML_KEY) + enabledPlugins := cast.ToStringSlice(appOpts.Get(key)) + for _, p := range enabledPlugins { + // Kafka plugin topic configuration + if kafkaplugin.PLUGIN_NAME == p { + prepKafkaTopics(appOpts) + break + } + } + + for i := 0; i < numSeeds; i++ { + config.Seed = rand.Int63() + + for j := 0; j < numTimesToRunPerSeed; j++ { + var logger log.Logger + if FlagVerboseValue { + logger = log.TestingLogger() + } else { + logger = log.NewNopLogger() } db := dbm.NewMemDB() - app := NewSimApp(logger, db, nil, true, map[int64]bool{}, DefaultNodeHome, FlagPeriodValue, MakeTestEncodingConfig(), appOpts, interBlockCacheOpt()) + app := NewSimApp( + logger, + db, + nil, + true, + map[int64]bool{}, + DefaultNodeHome, + FlagPeriodValue, + MakeTestEncodingConfig(), + appOpts, + interBlockCacheOpt(), + ) fmt.Printf( "running non-determinism simulation; seed %d: %d/%d, attempt: %d/%d\n", @@ -371,14 +464,36 @@ func TestAppStateDeterminism(t *testing.T) { func loadAppOptions() types.AppOptions { // load plugin config - usrHomeDir, _ := os.UserHomeDir() - confFile := filepath.Join(usrHomeDir, "app.toml") + keys := make([]string, 0) // leave empty to listen to all store keys + m := make(map[string]interface{}) + m["plugins.on"] = true + m["plugins.enabled"] = []string{StateListeningPlugin} + m["plugins.dir"] = "" + // file plugin + m["plugins.streaming.file.keys"] = keys + m["plugins.streaming.file.write_dir"] = "" + m["plugins.streaming.file.prefix"] = "" + m["plugins.streaming.file.halt_app_on_delivery_error"] = HaltAppOnDeliveryError + // trace plugin + m["plugins.streaming.trace.keys"] = keys + m["plugins.streaming.trace.print_data_to_stdout"] = false + m["plugins.streaming.trace.halt_app_on_delivery_error"] = HaltAppOnDeliveryError + // kafka plugin + m["plugins.streaming.kafka.keys"] = keys + m["plugins.streaming.kafka.topic_prefix"] = "sim" + m["plugins.streaming.kafka.flush_timeout_ms"] = 5000 + m["plugins.streaming.kafka.halt_app_on_delivery_error"] = HaltAppOnDeliveryError + // Kafka plugin producer + m["plugins.streaming.kafka.producer.bootstrap_servers"] = "localhost:9092" + m["plugins.streaming.kafka.producer.client_id"] = "may-app-id" + m["plugins.streaming.kafka.producer.acks"] = "all" + m["plugins.streaming.kafka.producer.enable_idempotence"] = true + vpr := viper.New() - vpr.SetConfigFile(confFile) - err := vpr.ReadInConfig() - if err != nil { - tmos.Exit(err.Error()) + for key, value := range m { + vpr.SetDefault(key, value) } + return vpr } From 8ed57c4bc32e930b4b3803aaf45528188548d58a Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 9 Mar 2022 19:00:11 -0600 Subject: [PATCH 26/43] continue with testing when docker-compose returns error --- sim-state-listening.mk | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sim-state-listening.mk b/sim-state-listening.mk index 72ae516849cd..f6313b17e6c8 100644 --- a/sim-state-listening.mk +++ b/sim-state-listening.mk @@ -21,14 +21,14 @@ test-sim-nondeterminism-state-listening-trace: test-sim-nondeterminism-state-listening-kafka: @echo "Running non-determinism-state-listening-kafka test..." @echo "Starting Kafka..." - docker-compose -f plugin/plugins/kafka/docker-compose.yml up -d zookeeper broker + @-docker-compose -f plugin/plugins/kafka/docker-compose.yml up -d zookeeper broker @-go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ -StateListeningPlugin=kafka -HaltAppOnDeliveryError=true @echo "Stopping Kafka..." - docker-compose -f plugin/plugins/kafka/docker-compose.yml down + @-docker-compose -f plugin/plugins/kafka/docker-compose.yml down test-sim-nondeterminism-state-listening-all: \ test-sim-nondeterminism-state-listening-file \ From a20a6f9d1f804d93d3f20b624e809e8204a8375e Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Tue, 15 Mar 2022 18:22:42 -0500 Subject: [PATCH 27/43] add fallback timer to kill indefinite running listener goroutines --- baseapp/abci.go | 134 ++++++++++++++++++++++++++++++++++++++------ baseapp/baseapp.go | 5 ++ baseapp/options.go | 7 +++ plugin/streaming.go | 4 +- simapp/app.go | 7 +++ simapp/sim_test.go | 1 + 6 files changed, 138 insertions(+), 20 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index cb628a962ad6..1228fbeeddf9 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -1,6 +1,7 @@ package baseapp import ( + "context" "crypto/sha256" "errors" "fmt" @@ -9,6 +10,7 @@ import ( "strings" "sync" "syscall" + "time" "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" @@ -198,12 +200,7 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // increment the wait group counter wg.Add(1) go func() { - // decrement the counter when the go routine completes - defer wg.Done() - if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) - app.halt() - } + app.listenBeginBlock(req, res, streamingListener, wg) }() } else { go func() { @@ -219,6 +216,43 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg return res } +// listenBeginBlock asynchronously processes BeginBlock state change events. +// The listener must complete its work before the global threshold is reached. +// Otherwise, all work will be abandoned and resources released. +func (app *BaseApp) listenBeginBlock( + req abci.RequestBeginBlock, + res abci.ResponseBeginBlock, + streamingListener ABCIListener, + wg *sync.WaitGroup, +) { + defer wg.Done() + + // Set timer so goroutines don't block indefinitely + ctx, cancel := context.WithTimeout(context.Background(), app.globalWaitLimit*time.Second) + defer cancel() + + var listenErr error + ch := make(chan struct{}) + + go func(ch chan struct{}) { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + listenErr = err + } + ch <- struct{}{} + }(ch) + + select { + case <-ch: + case <-ctx.Done(): + listenErr = ctx.Err() + } + + if listenErr != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", listenErr) + app.halt() + } +} + // EndBlock implements the ABCI interface. func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBlock) { @@ -243,12 +277,7 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // increment the wait group counter wg.Add(1) go func() { - // decrement the counter when the go routine completes - defer wg.Done() - if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) - app.halt() - } + app.listenEndBlock(req, res, streamingListener, wg) }() } else { go func() { @@ -264,6 +293,43 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc return res } +// listenEndBlock asynchronously processes BeginBlock state change events. +// The listener must complete its work before the global threshold is reached. +// Otherwise, all work will be abandoned and resources released. +func (app *BaseApp) listenEndBlock( + req abci.RequestEndBlock, + res abci.ResponseEndBlock, + streamingListener ABCIListener, + wg *sync.WaitGroup, +) { + defer wg.Done() + + // Set timer so goroutines don't block indefinitely + ctx, cancel := context.WithTimeout(context.Background(), app.globalWaitLimit*time.Second) + defer cancel() + + var listenErr error + ch := make(chan struct{}) + + go func(ch chan struct{}) { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + listenErr = err + } + ch <- struct{}{} + }(ch) + + select { + case <-ch: + case <-ctx.Done(): + listenErr = ctx.Err() + } + + if listenErr != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", listenErr) + app.halt() + } +} + // CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In // CheckTx mode, messages are not executed. This means messages are only validated // and only the AnteHandler is executed. State is persisted to the BaseApp's @@ -316,12 +382,7 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx // increment the wait group counter wg.Add(1) go func() { - // decrement the counter when the go routine completes - defer wg.Done() - if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) - app.halt() - } + app.listenDeliverTx(req, abciRes, streamingListener, wg) }() } else { go func() { @@ -350,6 +411,43 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx return abciRes } +// listenEndBlock asynchronously processes BeginBlock state change events. +// The listener must complete its work before the global threshold is reached. +// Otherwise, all work will be abandoned and resources released. +func (app *BaseApp) listenDeliverTx( + req abci.RequestDeliverTx, + res abci.ResponseDeliverTx, + streamingListener ABCIListener, + wg *sync.WaitGroup, +) { + defer wg.Done() + + // Set timer so goroutines don't block indefinitely + ctx, cancel := context.WithTimeout(context.Background(), app.globalWaitLimit*time.Second) + defer cancel() + + var listenErr error + ch := make(chan struct{}) + + go func(ch chan struct{}) { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { + listenErr = err + } + ch <- struct{}{} + }(ch) + + select { + case <-ch: + case <-ctx.Done(): + listenErr = ctx.Err() + } + + if listenErr != nil { + app.logger.Error("DeliverTx listening hook failed", "err", listenErr) + app.halt() + } +} + // Commit implements the ABCI interface. It will commit all state that exists in // the deliver state's multi-store and includes the resulting commit ID in the // returned abci.ResponseCommit. Commit will set the check state based on the diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index ba60a614b684..c7e98e79d2ab 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -8,6 +8,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" dbm "github.com/tendermint/tm-db" + "time" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/snapshots" @@ -127,6 +128,10 @@ type BaseApp struct { // nolint: maligned // abciListeners for hooking into the ABCI message processing of the BaseApp // and exposing the requests and responses to external consumers abciListeners []ABCIListener + + // globalWaitLimit is the maximum amount of time the BaseApp will wait + // for ABCIListeners to complete their work. + globalWaitLimit time.Duration } // NewBaseApp returns a reference to an initialized BaseApp. It accepts a diff --git a/baseapp/options.go b/baseapp/options.go index e477171e5486..2fa2cbe1c404 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -4,6 +4,7 @@ import ( "fmt" dbm "github.com/tendermint/tm-db" "io" + "time" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/snapshots" @@ -244,3 +245,9 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { // BaseApp will pass BeginBlock, DeliverTx, and EndBlock requests and responses to the streaming services to update their ABCI context app.abciListeners = append(app.abciListeners, s) } + +// SetGlobalWaitLimit is used to set the maximum amount of time the BaseApp will wait for ABCIListeners +// to finish their work before halting +func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { + app.globalWaitLimit = t +} \ No newline at end of file diff --git a/plugin/streaming.go b/plugin/streaming.go index e230aa524bde..b0c3b7fc6652 100644 --- a/plugin/streaming.go +++ b/plugin/streaming.go @@ -11,8 +11,8 @@ import ( // STREAMING_TOML_KEY is the top-level TOML key for configuring streaming service plugins const STREAMING_TOML_KEY = "streaming" -// GLOBAL_ACK_WAIT_LIMIT_TOML_KEY is the TOML key for configuring the global ack wait limit -const GLOBAL_ACK_WAIT_LIMIT_TOML_KEY = "global_ack_wait_limit" +// GLOBAL_WAIT_LIMIT_TOML_KEY is the TOML key for configuring the global wait limit +const GLOBAL_WAIT_LIMIT_TOML_KEY = "global_wait_limit" // StateStreamingPlugin interface for plugins that load a baseapp.StreamingService implementation from a plugin onto a baseapp.BaseApp type StateStreamingPlugin interface { diff --git a/simapp/app.go b/simapp/app.go index 6d6f13cf0233..a797ebab7ae1 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -236,6 +236,13 @@ func NewSimApp( pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) if cast.ToBool(appOpts.Get(pluginsOnKey)) { + // set the global wait limit for state streaming plugins + pluginsStreamingKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY) + globalWaitLimitKey := fmt.Sprintf("%s.%s", pluginsStreamingKey, plugin.GLOBAL_WAIT_LIMIT_TOML_KEY) + globalWaitLimit := cast.ToDuration(appOpts.Get(globalWaitLimitKey)) + if globalWaitLimit > 0 { + bApp.SetGlobalWaitLimit(globalWaitLimit) + } // this loads the preloaded and any plugins found in `plugins.dir` // if their names match those in the `plugins.enabled` list. pluginLoader, err := loader.NewPluginLoader(appOpts, logger) diff --git a/simapp/sim_test.go b/simapp/sim_test.go index 7a724c393ca6..af649b70c39b 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -469,6 +469,7 @@ func loadAppOptions() types.AppOptions { m["plugins.on"] = true m["plugins.enabled"] = []string{StateListeningPlugin} m["plugins.dir"] = "" + m["plugins.streaming.global_wait_limit"] = 120 // file plugin m["plugins.streaming.file.keys"] = keys m["plugins.streaming.file.write_dir"] = "" From 0ad44b790dc377f7c2b81eb96ecd74a77d4a9d5a Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 16 Mar 2022 13:56:39 -0500 Subject: [PATCH 28/43] update comment --- baseapp/streaming.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 67c31555730d..113b26acaf6c 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -18,11 +18,12 @@ type ABCIListener interface { ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error // ListenDeliverTx updates the steaming service with the latest DeliverTx messages ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error - // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails - // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. - // When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will - // be replayed to all listeners when the node restarts and all successful listeners that received data - // prior to the halt will receive duplicate data. + // HaltAppOnDeliveryError returns true if the application has been configured to halt when + // ListenBeginBlock, ListenEndBlock, ListenDeliverTx fail to process messages and false when + // the application has been configured to send messages to ListenBeginBlock, ListenEndBlock, ListenDeliverTx + // in fire-and-forget fashion. + // + // This Behavior is controlled by a corresponding app config setting. HaltAppOnDeliveryError() bool } From 9570199e8f2d3a29e69ed6d948b3554adde25afa Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 16 Mar 2022 14:01:02 -0500 Subject: [PATCH 29/43] fix typo --- baseapp/streaming.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/baseapp/streaming.go b/baseapp/streaming.go index 113b26acaf6c..940ff43aee15 100644 --- a/baseapp/streaming.go +++ b/baseapp/streaming.go @@ -23,7 +23,7 @@ type ABCIListener interface { // the application has been configured to send messages to ListenBeginBlock, ListenEndBlock, ListenDeliverTx // in fire-and-forget fashion. // - // This Behavior is controlled by a corresponding app config setting. + // This behavior is controlled by a corresponding app config setting. HaltAppOnDeliveryError() bool } From 3c19fdbf55d09ac66d5657e6e3a63a02b3966168 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 23 Mar 2022 16:57:18 -0500 Subject: [PATCH 30/43] disable delivery report when in fire-and-forget mode --- plugin/plugins/kafka/kafka.go | 6 ++++++ plugin/plugins/kafka/service/service.go | 19 ++++++++++++++++--- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/plugin/plugins/kafka/kafka.go b/plugin/plugins/kafka/kafka.go index a3fd92c66e41..4a4be3e5b22f 100644 --- a/plugin/plugins/kafka/kafka.go +++ b/plugin/plugins/kafka/kafka.go @@ -130,6 +130,12 @@ func (ssp *streamingServicePlugin) Register( if err := producerConfigMap.SetKey(key, element); err != nil { return err } + if !haltAppOnDeliveryError { + // disable delivery reports when operating in fire-and-forget fashion + if err := producerConfigMap.SetKey("go.delivery.reports", false); err != nil { + return err + } + } } var err error diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 046b84ae70a7..90237a8beef8 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -331,12 +331,25 @@ func (kss *KafkaStreamingService) writeAsJsonToKafka( topic = fmt.Sprintf("%s-%s", kss.topicPrefix, topic) } - // produce message - if err := kss.producer.Produce(&kafka.Message{ + // prepare message + message := &kafka.Message{ TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, Value: json, Key: []byte(key), - }, kss.deliveryChan); err != nil { + } + + // produce message in fire-and-forget fashion + if !kss.haltAppOnDeliveryError { + // the producer has been configured with `go.delivery.reports: false` + // pass `nil` for private delivery reports chan + if err := kss.producer.Produce(message, nil); err != nil { + return err + } + return nil + } + + // produce message and check delivery report + if err := kss.producer.Produce(message, kss.deliveryChan); err != nil { return err } From 6c0530f9eb3fff95cc8f3e6480ceed5703dcc892 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 23 Mar 2022 18:11:50 -0500 Subject: [PATCH 31/43] code improvement --- plugin/plugins/kafka/service/service.go | 35 +++++++++++-------------- 1 file changed, 16 insertions(+), 19 deletions(-) diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 90237a8beef8..52858603b102 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -135,10 +135,14 @@ func NewKafkaStreamingService( codec: c, stateCache: make([][]byte, 0), stateCacheLock: new(sync.Mutex), - deliveryChan: make(chan kafka.Event), haltAppOnDeliveryError: haltAppOnDeliveryError, } + // setup private delivery channel to listen for delivery errors. + if haltAppOnDeliveryError { + kss.deliveryChan = make(chan kafka.Event) + } + return kss, nil } @@ -331,25 +335,15 @@ func (kss *KafkaStreamingService) writeAsJsonToKafka( topic = fmt.Sprintf("%s-%s", kss.topicPrefix, topic) } - // prepare message - message := &kafka.Message{ + // produce message + // when `halt_app_on_delivery_error = false`, kss.deliveryChan is `nil` + // and the producer is configured with `go.delivery.reports: false` + // this means that the producer operates in a fire-and-forget mode + if err := kss.producer.Produce(&kafka.Message{ TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, Value: json, Key: []byte(key), - } - - // produce message in fire-and-forget fashion - if !kss.haltAppOnDeliveryError { - // the producer has been configured with `go.delivery.reports: false` - // pass `nil` for private delivery reports chan - if err := kss.producer.Produce(message, nil); err != nil { - return err - } - return nil - } - - // produce message and check delivery report - if err := kss.producer.Produce(message, kss.deliveryChan); err != nil { + }, kss.deliveryChan); err != nil { return err } @@ -358,6 +352,10 @@ func (kss *KafkaStreamingService) writeAsJsonToKafka( // checkDeliveryReport checks kafka.Producer delivery report for successful or failed messages func (kss *KafkaStreamingService) checkDeliveryReport(ctx sdk.Context) error { + if kss.deliveryChan == nil { + return nil + } + e := <-kss.deliveryChan m := e.(*kafka.Message) topic := *m.TopicPartition.Topic @@ -369,10 +367,9 @@ func (kss *KafkaStreamingService) checkDeliveryReport(ctx sdk.Context) error { if topicErr != nil { logger.Error("Delivery failed: ", "topic", topic, "partition", partition, "key", key, "err", topicErr) - return topicErr } else { logger.Debug("Delivered message:", "topic", topic, "partition", partition, "offset", offset, "key", key) } - return nil + return topicErr } \ No newline at end of file From 63ea30ba5bd46b05f580b17403ca0b173190159b Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Mon, 28 Mar 2022 19:05:52 -0500 Subject: [PATCH 32/43] fix config param --- plugin/plugins/trace/trace.go | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/plugin/plugins/trace/trace.go b/plugin/plugins/trace/trace.go index 2c55ca1aa20e..36d63305b886 100644 --- a/plugin/plugins/trace/trace.go +++ b/plugin/plugins/trace/trace.go @@ -30,8 +30,8 @@ const ( PRINT_DATA_TO_STDOUT_PARAM = "print_data_to_stdout" - // ACK_MODE configures whether to operate in fire-and-forget or success/failure acknowledgement mode - ACK_MODE = "ack" + // HALT_APP_ON_DELIVERY_ERROR whether or not to halt the application when plugin fails to deliver message(s) + HALT_APP_ON_DELIVERY_ERROR = "halt_app_on_delivery_error" ) // Plugins is the exported symbol for loading this plugin @@ -71,7 +71,8 @@ func (ssp *streamingServicePlugin) Register( // load all the params required for this plugin from the provided AppOptions tomlKeyPrefix := fmt.Sprintf("%s.%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY, PLUGIN_NAME) printDataToStdout := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, PRINT_DATA_TO_STDOUT_PARAM))) - ack := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, ACK_MODE))) + haltAppOnDeliveryError := cast.ToBool(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, HALT_APP_ON_DELIVERY_ERROR))) + // get the store keys allowed to be exposed for this streaming service exposeKeyStrings := cast.ToStringSlice(ssp.opts.Get(fmt.Sprintf("%s.%s", tomlKeyPrefix, KEYS_PARAM))) @@ -92,7 +93,7 @@ func (ssp *streamingServicePlugin) Register( } var err error - ssp.tss, err = service.NewTraceStreamingService(exposeStoreKeys, marshaller, printDataToStdout, ack) + ssp.tss, err = service.NewTraceStreamingService(exposeStoreKeys, marshaller, printDataToStdout, haltAppOnDeliveryError) if err != nil { return err } From 7f4ed75455b95fba158e345079987bc492e3f130 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Mon, 28 Mar 2022 22:09:43 -0500 Subject: [PATCH 33/43] remove fallback timer, long upgrades may trigger it --- baseapp/abci.go | 134 +++++++----------------------------------------- 1 file changed, 18 insertions(+), 116 deletions(-) diff --git a/baseapp/abci.go b/baseapp/abci.go index 1228fbeeddf9..cb628a962ad6 100644 --- a/baseapp/abci.go +++ b/baseapp/abci.go @@ -1,7 +1,6 @@ package baseapp import ( - "context" "crypto/sha256" "errors" "fmt" @@ -10,7 +9,6 @@ import ( "strings" "sync" "syscall" - "time" "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" @@ -200,7 +198,12 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg // increment the wait group counter wg.Add(1) go func() { - app.listenBeginBlock(req, res, streamingListener, wg) + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + app.halt() + } }() } else { go func() { @@ -216,43 +219,6 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg return res } -// listenBeginBlock asynchronously processes BeginBlock state change events. -// The listener must complete its work before the global threshold is reached. -// Otherwise, all work will be abandoned and resources released. -func (app *BaseApp) listenBeginBlock( - req abci.RequestBeginBlock, - res abci.ResponseBeginBlock, - streamingListener ABCIListener, - wg *sync.WaitGroup, -) { - defer wg.Done() - - // Set timer so goroutines don't block indefinitely - ctx, cancel := context.WithTimeout(context.Background(), app.globalWaitLimit*time.Second) - defer cancel() - - var listenErr error - ch := make(chan struct{}) - - go func(ch chan struct{}) { - if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - listenErr = err - } - ch <- struct{}{} - }(ch) - - select { - case <-ch: - case <-ctx.Done(): - listenErr = ctx.Err() - } - - if listenErr != nil { - app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", listenErr) - app.halt() - } -} - // EndBlock implements the ABCI interface. func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBlock) { @@ -277,7 +243,12 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc // increment the wait group counter wg.Add(1) go func() { - app.listenEndBlock(req, res, streamingListener, wg) + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + app.halt() + } }() } else { go func() { @@ -293,43 +264,6 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc return res } -// listenEndBlock asynchronously processes BeginBlock state change events. -// The listener must complete its work before the global threshold is reached. -// Otherwise, all work will be abandoned and resources released. -func (app *BaseApp) listenEndBlock( - req abci.RequestEndBlock, - res abci.ResponseEndBlock, - streamingListener ABCIListener, - wg *sync.WaitGroup, -) { - defer wg.Done() - - // Set timer so goroutines don't block indefinitely - ctx, cancel := context.WithTimeout(context.Background(), app.globalWaitLimit*time.Second) - defer cancel() - - var listenErr error - ch := make(chan struct{}) - - go func(ch chan struct{}) { - if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - listenErr = err - } - ch <- struct{}{} - }(ch) - - select { - case <-ch: - case <-ctx.Done(): - listenErr = ctx.Err() - } - - if listenErr != nil { - app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", listenErr) - app.halt() - } -} - // CheckTx implements the ABCI interface and executes a tx in CheckTx mode. In // CheckTx mode, messages are not executed. This means messages are only validated // and only the AnteHandler is executed. State is persisted to the BaseApp's @@ -382,7 +316,12 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx // increment the wait group counter wg.Add(1) go func() { - app.listenDeliverTx(req, abciRes, streamingListener, wg) + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + app.halt() + } }() } else { go func() { @@ -411,43 +350,6 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx return abciRes } -// listenEndBlock asynchronously processes BeginBlock state change events. -// The listener must complete its work before the global threshold is reached. -// Otherwise, all work will be abandoned and resources released. -func (app *BaseApp) listenDeliverTx( - req abci.RequestDeliverTx, - res abci.ResponseDeliverTx, - streamingListener ABCIListener, - wg *sync.WaitGroup, -) { - defer wg.Done() - - // Set timer so goroutines don't block indefinitely - ctx, cancel := context.WithTimeout(context.Background(), app.globalWaitLimit*time.Second) - defer cancel() - - var listenErr error - ch := make(chan struct{}) - - go func(ch chan struct{}) { - if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, res); err != nil { - listenErr = err - } - ch <- struct{}{} - }(ch) - - select { - case <-ch: - case <-ctx.Done(): - listenErr = ctx.Err() - } - - if listenErr != nil { - app.logger.Error("DeliverTx listening hook failed", "err", listenErr) - app.halt() - } -} - // Commit implements the ABCI interface. It will commit all state that exists in // the deliver state's multi-store and includes the resulting commit ID in the // returned abci.ResponseCommit. Commit will set the check state based on the From e08fbf72d6c8c0d21d9e31d5db3f04b9557f00dd Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 30 Mar 2022 13:56:06 -0500 Subject: [PATCH 34/43] use golang enum type for message key --- plugin/plugins/kafka/service/service.go | 43 +++++++++++++------------ 1 file changed, 23 insertions(+), 20 deletions(-) diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 52858603b102..8291ad32b0ec 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -33,19 +33,19 @@ the length-prefixed protobuf encoded `EndBlockEvent` request is written, and the */ // Event Kafka message key enum types for listen events. -type Event string +type Event int64 const ( - BeginBlockEvent Event = "BEGIN_BLOCK" - EndBlockEvent = "END_BLOCK" - DeliverTxEvent = "DELIVER_TX" + BEGIN_BLOCK Event = iota + END_BLOCK + DELIVER_TX ) // EventType Kafka message key enum types for the event types. -type EventType string +type EventType int64 const ( - RequestEventType EventType = "REQUEST" - ResponseEventType = "RESPONSE" - StateChangeEventType = "STATE_CHANGE" + REQUEST EventType = iota + RESPONSE + STATE_CHANGE ) // EventTypeValueTypeTopic Kafka topic name enum types @@ -62,7 +62,7 @@ const ( // MsgKeyFtm Kafka message composite key format enum types const ( - MsgKeyFtm = `{"block_height":%d,"event":"%s","event_id":%d,"event_type":"%s","event_type_id":%d}` + MsgKeyFtm = `{"block_height":%d,"event":%d,"event_id":%d,"event_type":%d,"event_type_id":%d}` ) var _ baseapp.StreamingService = (*KafkaStreamingService)(nil) @@ -160,22 +160,23 @@ func (kss *KafkaStreamingService) ListenBeginBlock( res abci.ResponseBeginBlock, ) error { kss.setBeginBlock(req) + event := int64(BEGIN_BLOCK) eventId := int64(1) eventTypeId := 1 // write req - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, BeginBlockEvent, eventId, RequestEventType, eventTypeId) + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, REQUEST, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, string(BeginBlockReqTopic), key, &req); err != nil { return err } // write state changes - if err := kss.writeStateChange(ctx, string(BeginBlockEvent), eventId); err != nil { + if err := kss.writeStateChange(ctx, event, eventId); err != nil { return err } // write res - key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, EndBlockEvent, 1, ResponseEventType, 1) + key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, 1, RESPONSE, 1) if err := kss.writeAsJsonToKafka(ctx, BeginBlockResTopic, key, &res); err != nil { return err } @@ -196,22 +197,23 @@ func (kss *KafkaStreamingService) ListenDeliverTx( req abci.RequestDeliverTx, res abci.ResponseDeliverTx, ) error { + event := int64(DELIVER_TX) eventId := kss.getDeliverTxId() eventTypeId := 1 // write req - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, DeliverTxEvent, eventId, RequestEventType, eventTypeId) + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, REQUEST, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, DeliverTxReqTopic, key, &req); err != nil { return err } // write state changes - if err := kss.writeStateChange(ctx, DeliverTxEvent, eventId); err != nil { + if err := kss.writeStateChange(ctx, event, eventId); err != nil { return err } // write res - key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, DeliverTxEvent, eventId, ResponseEventType, 1) + key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, RESPONSE, 1) if err := kss.writeAsJsonToKafka(ctx, DeliverTxResTopic, key, &res); err != nil { return err } @@ -232,22 +234,23 @@ func (kss *KafkaStreamingService) ListenEndBlock( req abci.RequestEndBlock, res abci.ResponseEndBlock, ) error { + event := int64(END_BLOCK) eventId := int64(1) eventTypeId := 1 // write req - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, EndBlockEvent, eventId, RequestEventType, eventTypeId) + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, REQUEST, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, EndBlockReqTopic, key, &req); err != nil { return err } // write state changes - if err := kss.writeStateChange(ctx, EndBlockEvent, eventId); err != nil { + if err := kss.writeStateChange(ctx, event, eventId); err != nil { return err } // write res - key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, EndBlockEvent, eventId, ResponseEventType, eventTypeId) + key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, RESPONSE, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, EndBlockResTopic, key, &res); err != nil { return err } @@ -298,13 +301,13 @@ func (kss *KafkaStreamingService) Close() error { return nil } -func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event string, eventId int64) error { +func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event int64, eventId int64) error { // write all state changes cached for this stage to Kafka kss.stateCacheLock.Lock() kodec := kss.codec.(*codec.ProtoCodec) kvPair := new(types.StoreKVPair) for i, stateChange := range kss.stateCache { - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) + key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, STATE_CHANGE, i+1) if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { return err } From 603dca40ccc2049ce0bb1b510a1bc9fddfde8e49 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Wed, 30 Mar 2022 13:56:46 -0500 Subject: [PATCH 35/43] default to fire-and-forget for sim testing --- sim-state-listening.mk | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sim-state-listening.mk b/sim-state-listening.mk index f6313b17e6c8..e01d67e91605 100644 --- a/sim-state-listening.mk +++ b/sim-state-listening.mk @@ -25,7 +25,7 @@ test-sim-nondeterminism-state-listening-kafka: @-go test -mod=readonly $(SIMAPP) -run TestAppStateDeterminismWithStateListening -Enabled=true \ -NumBlocks=50 -BlockSize=100 -Commit=true -Period=0 -v -timeout 24h \ - -StateListeningPlugin=kafka -HaltAppOnDeliveryError=true + -StateListeningPlugin=kafka -HaltAppOnDeliveryError=false @echo "Stopping Kafka..." @-docker-compose -f plugin/plugins/kafka/docker-compose.yml down From 08d6de921efa58cfb888ad4ba75bc391902bb0fc Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Mon, 4 Apr 2022 22:15:30 -0500 Subject: [PATCH 36/43] serialize to protobuf binary --- plugin/plugins/kafka/proto/msg_key.proto | 24 ++ plugin/plugins/kafka/service/msg_key.pb.go | 300 +++++++++++++++++++++ plugin/plugins/kafka/service/service.go | 65 +++-- 3 files changed, 370 insertions(+), 19 deletions(-) create mode 100644 plugin/plugins/kafka/proto/msg_key.proto create mode 100644 plugin/plugins/kafka/service/msg_key.pb.go diff --git a/plugin/plugins/kafka/proto/msg_key.proto b/plugin/plugins/kafka/proto/msg_key.proto new file mode 100644 index 000000000000..bf91edb4e4a6 --- /dev/null +++ b/plugin/plugins/kafka/proto/msg_key.proto @@ -0,0 +1,24 @@ +syntax = "proto3"; + +option go_package = "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service"; + +option java_multiple_files = true; +option java_package = "network.cosmos.listening.plugins.kafka.service"; + +message MsgKey { + int64 block_height = 1 [json_name = "block_height"]; + enum Event { + BEGIN_BLOCK = 0; + END_BLOCK = 1; + DELIVER_TX = 2; + } + Event event = 2; + int64 event_id = 3 [json_name = "event_id"]; + enum EventType { + REQUEST = 0; + RESPONSE = 1; + STATE_CHANGE = 2; + } + EventType event_type = 4 [json_name = "event_type"]; + int64 event_type_id = 5 [json_name = "event_type_id"]; +} \ No newline at end of file diff --git a/plugin/plugins/kafka/service/msg_key.pb.go b/plugin/plugins/kafka/service/msg_key.pb.go new file mode 100644 index 000000000000..dddfe588aefd --- /dev/null +++ b/plugin/plugins/kafka/service/msg_key.pb.go @@ -0,0 +1,300 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.27.1 +// protoc v3.17.3 +// source: msg_key.proto + +package service + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type MsgKey_Event int32 + +const ( + MsgKey_BEGIN_BLOCK MsgKey_Event = 0 + MsgKey_END_BLOCK MsgKey_Event = 1 + MsgKey_DELIVER_TX MsgKey_Event = 2 +) + +// Enum value maps for MsgKey_Event. +var ( + MsgKey_Event_name = map[int32]string{ + 0: "BEGIN_BLOCK", + 1: "END_BLOCK", + 2: "DELIVER_TX", + } + MsgKey_Event_value = map[string]int32{ + "BEGIN_BLOCK": 0, + "END_BLOCK": 1, + "DELIVER_TX": 2, + } +) + +func (x MsgKey_Event) Enum() *MsgKey_Event { + p := new(MsgKey_Event) + *p = x + return p +} + +func (x MsgKey_Event) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MsgKey_Event) Descriptor() protoreflect.EnumDescriptor { + return file_msg_key_proto_enumTypes[0].Descriptor() +} + +func (MsgKey_Event) Type() protoreflect.EnumType { + return &file_msg_key_proto_enumTypes[0] +} + +func (x MsgKey_Event) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MsgKey_Event.Descriptor instead. +func (MsgKey_Event) EnumDescriptor() ([]byte, []int) { + return file_msg_key_proto_rawDescGZIP(), []int{0, 0} +} + +type MsgKey_EventType int32 + +const ( + MsgKey_REQUEST MsgKey_EventType = 0 + MsgKey_RESPONSE MsgKey_EventType = 1 + MsgKey_STATE_CHANGE MsgKey_EventType = 2 +) + +// Enum value maps for MsgKey_EventType. +var ( + MsgKey_EventType_name = map[int32]string{ + 0: "REQUEST", + 1: "RESPONSE", + 2: "STATE_CHANGE", + } + MsgKey_EventType_value = map[string]int32{ + "REQUEST": 0, + "RESPONSE": 1, + "STATE_CHANGE": 2, + } +) + +func (x MsgKey_EventType) Enum() *MsgKey_EventType { + p := new(MsgKey_EventType) + *p = x + return p +} + +func (x MsgKey_EventType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (MsgKey_EventType) Descriptor() protoreflect.EnumDescriptor { + return file_msg_key_proto_enumTypes[1].Descriptor() +} + +func (MsgKey_EventType) Type() protoreflect.EnumType { + return &file_msg_key_proto_enumTypes[1] +} + +func (x MsgKey_EventType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use MsgKey_EventType.Descriptor instead. +func (MsgKey_EventType) EnumDescriptor() ([]byte, []int) { + return file_msg_key_proto_rawDescGZIP(), []int{0, 1} +} + +type MsgKey struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BlockHeight int64 `protobuf:"varint,1,opt,name=block_height,proto3" json:"block_height,omitempty"` + Event MsgKey_Event `protobuf:"varint,2,opt,name=event,proto3,enum=MsgKey_Event" json:"event,omitempty"` + EventId int64 `protobuf:"varint,3,opt,name=event_id,proto3" json:"event_id,omitempty"` + EventType MsgKey_EventType `protobuf:"varint,4,opt,name=event_type,proto3,enum=MsgKey_EventType" json:"event_type,omitempty"` + EventTypeId int64 `protobuf:"varint,5,opt,name=event_type_id,proto3" json:"event_type_id,omitempty"` +} + +func (x *MsgKey) Reset() { + *x = MsgKey{} + if protoimpl.UnsafeEnabled { + mi := &file_msg_key_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MsgKey) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MsgKey) ProtoMessage() {} + +func (x *MsgKey) ProtoReflect() protoreflect.Message { + mi := &file_msg_key_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MsgKey.ProtoReflect.Descriptor instead. +func (*MsgKey) Descriptor() ([]byte, []int) { + return file_msg_key_proto_rawDescGZIP(), []int{0} +} + +func (x *MsgKey) GetBlockHeight() int64 { + if x != nil { + return x.BlockHeight + } + return 0 +} + +func (x *MsgKey) GetEvent() MsgKey_Event { + if x != nil { + return x.Event + } + return MsgKey_BEGIN_BLOCK +} + +func (x *MsgKey) GetEventId() int64 { + if x != nil { + return x.EventId + } + return 0 +} + +func (x *MsgKey) GetEventType() MsgKey_EventType { + if x != nil { + return x.EventType + } + return MsgKey_REQUEST +} + +func (x *MsgKey) GetEventTypeId() int64 { + if x != nil { + return x.EventTypeId + } + return 0 +} + +var File_msg_key_proto protoreflect.FileDescriptor + +var file_msg_key_proto_rawDesc = []byte{ + 0x0a, 0x0d, 0x6d, 0x73, 0x67, 0x5f, 0x6b, 0x65, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xb9, 0x02, 0x0a, 0x06, 0x4d, 0x73, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x22, 0x0a, 0x0c, 0x62, 0x6c, + 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x65, 0x69, 0x67, 0x68, 0x74, 0x12, 0x23, + 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, + 0x4d, 0x73, 0x67, 0x4b, 0x65, 0x79, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x12, + 0x31, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x11, 0x2e, 0x4d, 0x73, 0x67, 0x4b, 0x65, 0x79, 0x2e, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x69, 0x64, 0x22, 0x37, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x12, 0x0f, 0x0a, 0x0b, 0x42, 0x45, 0x47, 0x49, 0x4e, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, + 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x45, 0x4e, 0x44, 0x5f, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, + 0x01, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x4c, 0x49, 0x56, 0x45, 0x52, 0x5f, 0x54, 0x58, 0x10, + 0x02, 0x22, 0x38, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, + 0x0a, 0x07, 0x52, 0x45, 0x51, 0x55, 0x45, 0x53, 0x54, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x52, + 0x45, 0x53, 0x50, 0x4f, 0x4e, 0x53, 0x45, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x53, 0x54, 0x41, + 0x54, 0x45, 0x5f, 0x43, 0x48, 0x41, 0x4e, 0x47, 0x45, 0x10, 0x02, 0x42, 0x65, 0x0a, 0x26, 0x6e, + 0x65, 0x74, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2e, 0x6c, 0x69, + 0x73, 0x74, 0x65, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x2e, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, 0x73, 0x2f, 0x63, 0x6f, 0x73, 0x6d, 0x6f, + 0x73, 0x2d, 0x73, 0x64, 0x6b, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x61, 0x66, 0x6b, 0x61, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_msg_key_proto_rawDescOnce sync.Once + file_msg_key_proto_rawDescData = file_msg_key_proto_rawDesc +) + +func file_msg_key_proto_rawDescGZIP() []byte { + file_msg_key_proto_rawDescOnce.Do(func() { + file_msg_key_proto_rawDescData = protoimpl.X.CompressGZIP(file_msg_key_proto_rawDescData) + }) + return file_msg_key_proto_rawDescData +} + +var file_msg_key_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_msg_key_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_msg_key_proto_goTypes = []interface{}{ + (MsgKey_Event)(0), // 0: MsgKey.Event + (MsgKey_EventType)(0), // 1: MsgKey.EventType + (*MsgKey)(nil), // 2: MsgKey +} +var file_msg_key_proto_depIdxs = []int32{ + 0, // 0: MsgKey.event:type_name -> MsgKey.Event + 1, // 1: MsgKey.event_type:type_name -> MsgKey.EventType + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_msg_key_proto_init() } +func file_msg_key_proto_init() { + if File_msg_key_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_msg_key_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MsgKey); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_msg_key_proto_rawDesc, + NumEnums: 2, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_msg_key_proto_goTypes, + DependencyIndexes: file_msg_key_proto_depIdxs, + EnumInfos: file_msg_key_proto_enumTypes, + MessageInfos: file_msg_key_proto_msgTypes, + }.Build() + File_msg_key_proto = out.File + file_msg_key_proto_rawDesc = nil + file_msg_key_proto_goTypes = nil + file_msg_key_proto_depIdxs = nil +} diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 8291ad32b0ec..978690befb1e 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -8,8 +8,8 @@ import ( "github.com/cosmos/cosmos-sdk/codec" "github.com/cosmos/cosmos-sdk/store/types" sdk "github.com/cosmos/cosmos-sdk/types" - "github.com/gogo/protobuf/proto" abci "github.com/tendermint/tendermint/abci/types" + "google.golang.org/protobuf/proto" "sync" ) @@ -162,10 +162,16 @@ func (kss *KafkaStreamingService) ListenBeginBlock( kss.setBeginBlock(req) event := int64(BEGIN_BLOCK) eventId := int64(1) - eventTypeId := 1 + eventTypeId := int64(1) + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(REQUEST), + EventTypeId: eventTypeId, + } // write req - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, REQUEST, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, string(BeginBlockReqTopic), key, &req); err != nil { return err } @@ -176,7 +182,7 @@ func (kss *KafkaStreamingService) ListenBeginBlock( } // write res - key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, 1, RESPONSE, 1) + key.EventType = MsgKey_EventType(RESPONSE) if err := kss.writeAsJsonToKafka(ctx, BeginBlockResTopic, key, &res); err != nil { return err } @@ -199,10 +205,16 @@ func (kss *KafkaStreamingService) ListenDeliverTx( ) error { event := int64(DELIVER_TX) eventId := kss.getDeliverTxId() - eventTypeId := 1 + eventTypeId := int64(1) + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(REQUEST), + EventTypeId: eventTypeId, + } // write req - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, REQUEST, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, DeliverTxReqTopic, key, &req); err != nil { return err } @@ -213,7 +225,7 @@ func (kss *KafkaStreamingService) ListenDeliverTx( } // write res - key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, RESPONSE, 1) + key.EventType = MsgKey_EventType(RESPONSE) if err := kss.writeAsJsonToKafka(ctx, DeliverTxResTopic, key, &res); err != nil { return err } @@ -236,10 +248,16 @@ func (kss *KafkaStreamingService) ListenEndBlock( ) error { event := int64(END_BLOCK) eventId := int64(1) - eventTypeId := 1 + eventTypeId := int64(1) + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(REQUEST), + EventTypeId: eventTypeId, + } // write req - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, REQUEST, eventTypeId) if err := kss.writeAsJsonToKafka(ctx, EndBlockReqTopic, key, &req); err != nil { return err } @@ -250,7 +268,7 @@ func (kss *KafkaStreamingService) ListenEndBlock( } // write res - key = fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, RESPONSE, eventTypeId) + key.EventType = MsgKey_EventType(RESPONSE) if err := kss.writeAsJsonToKafka(ctx, EndBlockResTopic, key, &res); err != nil { return err } @@ -304,11 +322,16 @@ func (kss *KafkaStreamingService) Close() error { func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event int64, eventId int64) error { // write all state changes cached for this stage to Kafka kss.stateCacheLock.Lock() - kodec := kss.codec.(*codec.ProtoCodec) kvPair := new(types.StoreKVPair) for i, stateChange := range kss.stateCache { - key := fmt.Sprintf(MsgKeyFtm, kss.currentBlockNumber, event, eventId, STATE_CHANGE, i+1) - if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + key := &MsgKey{ + BlockHeight: kss.currentBlockNumber, + Event: MsgKey_Event(event), + EventId: eventId, + EventType: MsgKey_EventType(STATE_CHANGE), + EventTypeId: int64(i + 1), + } + if err := kss.codec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { return err } if err := kss.writeAsJsonToKafka(ctx, StateChangeTopic, key, kvPair); err != nil { @@ -326,14 +349,18 @@ func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event int64, func (kss *KafkaStreamingService) writeAsJsonToKafka( ctx sdk.Context, topic string, - key string, - data proto.Message, + msgKey *MsgKey, + msgValue codec.ProtoMarshaler, ) error { - kodec := kss.codec.(*codec.ProtoCodec) - json, err := kodec.MarshalJSON(data) + key, err := proto.Marshal(msgKey) if err != nil { return err } + value, err := kss.codec.Marshal(msgValue) + if err != nil { + return err + } + if len(kss.topicPrefix) > 0 { topic = fmt.Sprintf("%s-%s", kss.topicPrefix, topic) } @@ -344,8 +371,8 @@ func (kss *KafkaStreamingService) writeAsJsonToKafka( // this means that the producer operates in a fire-and-forget mode if err := kss.producer.Produce(&kafka.Message{ TopicPartition: kafka.TopicPartition{Topic: &topic, Partition: kafka.PartitionAny}, - Value: json, - Key: []byte(key), + Key: key, + Value: value, }, kss.deliveryChan); err != nil { return err } From 1f2ed498eea83f865de973bc9bd3b148c74f2ef5 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Mon, 11 Apr 2022 18:46:40 -0500 Subject: [PATCH 37/43] remove global wait timout --- baseapp/baseapp.go | 5 ----- baseapp/options.go | 7 ------- simapp/app.go | 7 ------- simapp/sim_test.go | 1 - 4 files changed, 20 deletions(-) diff --git a/baseapp/baseapp.go b/baseapp/baseapp.go index c7e98e79d2ab..ba60a614b684 100644 --- a/baseapp/baseapp.go +++ b/baseapp/baseapp.go @@ -8,7 +8,6 @@ import ( "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" dbm "github.com/tendermint/tm-db" - "time" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/snapshots" @@ -128,10 +127,6 @@ type BaseApp struct { // nolint: maligned // abciListeners for hooking into the ABCI message processing of the BaseApp // and exposing the requests and responses to external consumers abciListeners []ABCIListener - - // globalWaitLimit is the maximum amount of time the BaseApp will wait - // for ABCIListeners to complete their work. - globalWaitLimit time.Duration } // NewBaseApp returns a reference to an initialized BaseApp. It accepts a diff --git a/baseapp/options.go b/baseapp/options.go index 2fa2cbe1c404..e477171e5486 100644 --- a/baseapp/options.go +++ b/baseapp/options.go @@ -4,7 +4,6 @@ import ( "fmt" dbm "github.com/tendermint/tm-db" "io" - "time" "github.com/cosmos/cosmos-sdk/codec/types" "github.com/cosmos/cosmos-sdk/snapshots" @@ -245,9 +244,3 @@ func (app *BaseApp) SetStreamingService(s StreamingService) { // BaseApp will pass BeginBlock, DeliverTx, and EndBlock requests and responses to the streaming services to update their ABCI context app.abciListeners = append(app.abciListeners, s) } - -// SetGlobalWaitLimit is used to set the maximum amount of time the BaseApp will wait for ABCIListeners -// to finish their work before halting -func (app *BaseApp) SetGlobalWaitLimit(t time.Duration) { - app.globalWaitLimit = t -} \ No newline at end of file diff --git a/simapp/app.go b/simapp/app.go index a797ebab7ae1..6d6f13cf0233 100644 --- a/simapp/app.go +++ b/simapp/app.go @@ -236,13 +236,6 @@ func NewSimApp( pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) if cast.ToBool(appOpts.Get(pluginsOnKey)) { - // set the global wait limit for state streaming plugins - pluginsStreamingKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.STREAMING_TOML_KEY) - globalWaitLimitKey := fmt.Sprintf("%s.%s", pluginsStreamingKey, plugin.GLOBAL_WAIT_LIMIT_TOML_KEY) - globalWaitLimit := cast.ToDuration(appOpts.Get(globalWaitLimitKey)) - if globalWaitLimit > 0 { - bApp.SetGlobalWaitLimit(globalWaitLimit) - } // this loads the preloaded and any plugins found in `plugins.dir` // if their names match those in the `plugins.enabled` list. pluginLoader, err := loader.NewPluginLoader(appOpts, logger) diff --git a/simapp/sim_test.go b/simapp/sim_test.go index af649b70c39b..7a724c393ca6 100644 --- a/simapp/sim_test.go +++ b/simapp/sim_test.go @@ -469,7 +469,6 @@ func loadAppOptions() types.AppOptions { m["plugins.on"] = true m["plugins.enabled"] = []string{StateListeningPlugin} m["plugins.dir"] = "" - m["plugins.streaming.global_wait_limit"] = 120 // file plugin m["plugins.streaming.file.keys"] = keys m["plugins.streaming.file.write_dir"] = "" From 2bfc803a5732dee06d244a635b41d3701b368580 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Tue, 12 Apr 2022 12:10:49 -0500 Subject: [PATCH 38/43] update plugin docs --- plugin/README.md | 36 +++-- plugin/plugins/kafka/README.md | 253 +++++++-------------------------- plugin/plugins/trace/README.md | 35 +++-- 3 files changed, 85 insertions(+), 239 deletions(-) diff --git a/plugin/README.md b/plugin/README.md index 9069ea7ab2ef..dc5a94bfaeed 100644 --- a/plugin/README.md +++ b/plugin/README.md @@ -28,20 +28,22 @@ type Plugin interface { Specific plugin types extend this interface, enabling them to work with the loader tooling defined in the [loader sub-directory](./loader). -The plugin system itself is configured using the `plugins` TOML mapping in the App's app.toml file. There are three -parameters for configuring the plugins: `plugins.on`, `plugins.disabled` and `plugins.dir`. `plugins.on` is a bool that +The plugin system itself is configured using the `plugins` TOML mapping in the App's `app.toml` file. There are three +parameters for configuring the plugins: `plugins.on`, `plugins.enabled` and `plugins.dir`. `plugins.on` is a bool that turns on or off the plugin system at large, `plugins.dir` directs the system to a directory to load plugins from, and -`plugins.disabled` is a list of names for the plugins we want to disable (useful for disabling preloaded plugins). +`plugins.enabled` is a list enabled plugin names. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" ``` As mentioned above, some plugins can be preloaded. This means they do not need to be loaded from the specified `plugins.dir` and instead -are loaded by default. At this time the only preloaded plugin is the [file streaming service plugin](./plugins/file). +are loaded by default. Note, both preloaded and non-preloaded plugins must appear in `plugins.enabled` list for the app to send events to them. +This provides node operators with the ability to `opt-in` and enable only plugins of interest. At this time the only preloaded plugins are; +the [file streaming service plugin](./plugins/file), the [trace streaming service plugin](./plugins/trace) and the [kafka streaming service plugin](./plugins/kafka). Plugins can be added to the preloaded set by adding the plugin to the [plugins dir](../../plugin/plugin.go) and modifying the [preload_list](../../plugin/loader/preload_list). In your application, if the `plugins.on` is set to `true` use this to direct the invocation of `NewPluginLoader` and walk through @@ -68,6 +70,7 @@ func NewSimApp( pluginsOnKey := fmt.Sprintf("%s.%s", plugin.PLUGINS_TOML_KEY, plugin.PLUGINS_ON_TOML_KEY) if cast.ToBool(appOpts.Get(pluginsOnKey)) { // this loads the preloaded and any plugins found in `plugins.dir` + // if their names match those in the `plugins.enabled` list. pluginLoader, err := loader.NewPluginLoader(appOpts, logger) if err != nil { // handle error @@ -118,38 +121,31 @@ type StateStreamingPlugin interface { } ``` -A `StateStreamingPlugin` is configured from within an App using the `AppOptions` loaded from the app.toml file. +A `StateStreamingPlugin` is configured from within an App using the `AppOptions` loaded from the `app.toml` file. Every `StateStreamingPlugin` will be configured within the `plugins.streaming` TOML mapping. The exact keys/parameters present in this mapping will be dependent on the specific `StateStreamingPlugin`, but we will introduce some standards here using the file `StateStreamingPlugin`: Plugin TOML configuration should be split into separate sub-tables for each kind of plugin (e.g. `plugins.streaming`). -For streaming plugins a parameter `plugins.streaming.global_ack_wait_limit` is used to configure the maximum amount of time -the BaseApp will wait for positive acknowledgement of receipt by the external streaming services before it considers -the message relay to be a failure. Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys -(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.ack`) -that signifies whether the service operates in a fire-and-forget capacity or the BaseApp should require positive -acknowledgement of message receipt by the service. In the case of "ack" mode, the service may also need to be -configured with an acknowledgement wait limit specific to that individual service (e.g. `plugins.streaming.kafka.ack_wait_limit`). -The file `StreamingService` does not have an individual `ack_wait_limit` since it operates synchronously with the App. +(e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.halt_app_on_delivery_error`) +that signifies whether the service operates in a fire-and-forget capacity, or the BaseApp should halt in case of a delivery error by the plugin service. +The file `StreamingService` does not have an individual `halt_app_on_delivery_error` since it operates synchronously with the App. e.g. ```toml [plugins] on = false # turn the plugin system, as a whole, on or off - disabled = ["list", "of", "plugin", "names", "to", "disable"] + enabled = ["list", "of", "plugin", "names", "to", "enable"] dir = "the directory to load non-preloaded plugins from; defaults to cosmos-sdk/plugin/plugins" [plugins.streaming] # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName - # maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services - # in milliseconds - global_ack_wait_limit = 500 [plugins.streaming.file] # the specific parameters for the file streaming service plugin keys = ["list", "of", "store", "keys", "we", "want", "to", "expose", "for", "this", "streaming", "service"] write_dir = "path to the write directory" - prefix = "optional prefix to prepend to the generated file names" - ack = "false" # false == fire-and-forget; true == sends a message receipt success/fail signal + prefix = "optional prefix to prepend to the generated file names + # Whether or not to halt the application when plugin fails to deliver message(s). + halt_app_on_delivery_error = false # false = fire-and-forget ``` diff --git a/plugin/plugins/kafka/README.md b/plugin/plugins/kafka/README.md index 81674afef7b8..6265f7861881 100644 --- a/plugin/plugins/kafka/README.md +++ b/plugin/plugins/kafka/README.md @@ -6,99 +6,83 @@ This plugin demonstrates how to listen to state changes of individual `KVStores` - [Plugin design](#plugin-design) - - [Channel-Based producer](#channel-based-producer) + - [Function-Based producer](#function-based-producer) - [Delivery Report handler](#delivery-report-handler) - [Message serde](#message-serde) + - [Message key](#message-key) - [Example configuration](#example-configuration) - [Testing the plugin](#testing-the-plugin) - - [Confluent Platform](#confluent-platform) - - [Docker](#docker) - - [Schema Registry](#schema-registry) - - [KSQL examples](#ksql-examples) + - [Confluent Platform](#confluent-platform) ## Plugin design -The plugin was build using [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go); a lightwieght wrapper around [librdkafka](https://github.com/edenhill/librdkafka). +The plugin was build using [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go), a lightweight wrapper around [librdkafka](https://github.com/edenhill/librdkafka). -This particular implmentation uses: +This particular implementation uses: * `Channel-Based producer` - Faster than the function-based `producer.Produce()`. * `Delivery reports handler` - Notifies the application of success or failure to deliver messages to Kafka. -### Channel-Based producer -The plugin uses the `producer.Producerchannel()` to deliver messages to Kafka. - +### Function-Based producer +The plugin uses the `producer.Produce()` to deliver messages to Kafka. Delivery reports are emitted on the `producer.Events()` or specific private channel. +Any errors that occur during delivery propagate up the stack and `halt` the app when `plugins.streaming.kafka.halt_app_on_delivery_error = true` Pros: -* Proper channel backpressure if `librdkafka`'s internal queue is full. The queue size can be controlled by setting. -* Message order is preserved (guaranteed by the producer API). -* Faster than the `function-based` async producer. +* Go:ish Cons: -* Double queueing: messages are first queued in the channel and the inside librdkafka. the Size of the channel is configurable via `queue.buffering.max.messages`. +* `Produce()` is a non-blocking call, if the internal librdkafka queue is full the call will fail. + +*The Producer's queue is configurable with the `queue.buffering.max.messages` property (default: 100000). See [config-docs](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md) for further understanding. ### Delivery Report handler -Producing is an asynchronous operation. Therefore, the client notifies the application (per-message) of success or failure through delivery reports. Deliver reports are by default emmitted on the `producer.Events()` channel as `*kafka.Message`. One needs to check `msg.TopicPartition.Error` for `nil` to find out if the message was successfully delivered or not. +Producing is an asynchronous operation. Therefore, the client notifies the application (per-message) of success or failure through delivery reports. +Deliver reports are by default emitted on the `producer.Events()` channel as `*kafka.Message`. One needs to check `msg.TopicPartition.Error` for `nil` for successful delivery. +The plugin implementation uses a private delivery channel `Produce(msg *Message, deliveryChan chan Event)` for successful delivery of each message. +When `plugins.streaming.kafka.halt_app_on_delivery_error = true`, the app will `halt` if delivery of any messages fails. This helps keep state in sync between the node and Kafka. Pros: -* Can be used to propagate success or failures to the application. -* Can be used to track the messages produced. -* Can be turned off by setting `"go.delivery.reports": false` for a fire-and-forget scenario. +* Used to propagate success or failures to the application. +* Used to track the messages produced. +* Is turned off by setting `"go.delivery.reports": false` for a fire-and-forget scenario. Cons: -* Must be handled in a go routine which makes it difficult to propagate errors to the `WriterListner.onWrite()`. +* Slower than when the plugin operates in fire-and-forget mode `plugins.streaming.kafka.halt_app_on_delivery_error = false` as each message needs to be checked whether it was successfully delivered. ### Message serde -As of this writing there is no `golang` support for `serialization/deserialization` of proto messages for the Confluent Schema Registry. Because of this limitiation, the Marshalled JSON data is saved instead. +As of this writing there is no `golang` support for `serialization/deserialization` of proto message schema with the Confluent Schema Registry. Therefore, the Kafka plugin produces messages in proto binary format without a registered schema. -Note, you can register the proto messages with the schema registry by generating the `Java` code and using the supported [Java](https://github.com/confluentinc/schema-registry/blob/master/protobuf-serializer/src/main/java/io/confluent/kafka/serializers/protobuf/KafkaProtobufSerializer.java) client library for the schema registry to automatically register the proto messages. +Note, proto message schemas can be registered with the Confluent Schema Registry by [generating the Java code](https://developers.google.com/protocol-buffers/docs/javatutorial) of the CosmosSDK proto files and then use the supported Java libraries. See the Confluent [docs](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html) for how to do this. #### Message `key` -To be able to identify an track messages in Kafka the `key` is made up of the following properties: -* `block height` - BIGINT -* `event` - BEGIN_BLOCK, END_BLOCK, DELIVER_TX -* `event_id` - BIGINT (increments for DELIVER_TX) -* `event_type` - REQUEST, RESPONSE, STATE_CHANGE -* `event_type_id` - BIGINT (increments for STATE_CHANGE) - -Example: -``` -// first tx -{ - "block_height": 1, - "event": "DELIVER_TX", - "event_id": 1, - "event_type": "REQUEST", - "event_type_id ": 1 -} -// second tx -{ - "block_height": 1, - "event": "DELIVER_TX", - "event_id": 2, // incrementing - "event_type": "REQUEST", - "event_type_id ": 1 -} +To be able to identify and track messages in Kafka a [msg_key.proto](./proto/msg_key.proto) was introduced to the plugin. + ``` +syntax = "proto3"; -#### Message `value` +option go_package = "github.com/cosmos/cosmos-sdk/plugin/plugins/kafka/service"; -The `value` structure is the Marshalled JSON structure of the request, response or the state change for begin block, end block, and deliver tx events. +option java_multiple_files = true; +option java_package = "network.cosmos.listening.plugins.kafka.service"; -Example: -``` -{ - "BLOCK_HEIGHT": 1, - "EVENT": "BEGIN_BLOCK", - "EVENT_ID": 1, - "EVENT_TYPE": "STATE_CHANGE", - "EVENT_TYPE_ID": 1, - "STORE_KEY": "mockStore1", - "DELETE": false, - "KEY": "AQID", - "VALUE": "AwIB" +message MsgKey { + int64 block_height = 1 [json_name = "block_height"]; + enum Event { + BEGIN_BLOCK = 0; + END_BLOCK = 1; + DELIVER_TX = 2; + } + Event event = 2; + int64 event_id = 3 [json_name = "event_id"]; + enum EventType { + REQUEST = 0; + RESPONSE = 1; + STATE_CHANGE = 2; + } + EventType event_type = 4 [json_name = "event_type"]; + int64 event_type_id = 5 [json_name = "event_type_id"]; } ``` @@ -108,7 +92,7 @@ Below is an example of how to configure the Kafka plugin. ``` # app.toml -... +. . . ############################################################################### ### Plugin system configuration ### @@ -122,31 +106,25 @@ on = true # List of plugin names to enable from the plugin/plugins/* enabled = ["kafka"] -# The directory to load non-preloaded plugins from; defaults to ./plugin/plugins +# The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins dir = "" -# a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName -[plugins.streaming] - -# maximum amount of time the BaseApp will await positive acknowledgement of message receipt from all streaming services -# in milliseconds -global_ack_wait_limit = 2000 ############################################################################### ### Kafka Plugin configuration ### ############################################################################### -# The specific parameters for the Kafka streaming service plugin +# The specific parameters for the kafka streaming service plugin [plugins.streaming.kafka] # List of store keys we want to expose for this streaming service. keys = [] -# Optional topic prefix for the topic(s) where data will be stored -topic_prefix = "block" +# Optional prefix for topic names where data will be stored. +topic_prefix = "pio" # Flush and wait for outstanding messages and requests to complete delivery. (milliseconds) -flush_timeout_ms = 1500 +flush_timeout_ms = 5000 # Whether or not to halt the application when plugin fails to deliver message(s). halt_app_on_delivery_error = true @@ -161,7 +139,7 @@ halt_app_on_delivery_error = true bootstrap_servers = "localhost:9092" # Client identifier -client_id = "my-app-id" +client_id = "pio-state-listening" # This field indicates the number of acknowledgements the leader # broker must receive from ISR brokers before responding to the request @@ -185,132 +163,7 @@ To execute the tests, run: make test-sim-nondeterminism-state-listening-kafka ``` -## Confluent Platform +### Confluent Platform If you're interested in viewing or querying events stored in kafka you can stand up the Confluent Platform stack with docker. - -*Visit the Confluent Platform [docs](https://docs.confluent.io/platform/current/quickstart/ce-docker-quickstart.html) for up to date docker instructions.* - -### Docker - -Spin up Confluent Platform. -``` -docker-compose -f plugin/plugins/kafka/docker-compose.yml up -d -``` - -You should see something like this: -``` -Creating network "kafka_default" with the default driver -Creating zookeeper ... done -Creating broker ... done -Creating schema-registry ... done -Creating rest-proxy ... done -Creating connect ... done -Creating ksqldb-server ... done -Creating ksql-datagen ... done -Creating ksqldb-cli ... done -Creating control-center ... done -``` - -Check status -``` -docker-compose ps -``` - -You should see something like this: -``` - Name Command State Ports ---------------------------------------------------------------------------------------------------------- -broker /etc/confluent/docker/run Up 0.0.0.0:9092->9092/tcp, 0.0.0.0:9101->9101/tcp -connect /etc/confluent/docker/run Up 0.0.0.0:8083->8083/tcp, 9092/tcp -control-center /etc/confluent/docker/run Up 0.0.0.0:9021->9021/tcp -ksql-datagen bash -c echo Waiting for K ... Up -ksqldb-cli /bin/sh Up -ksqldb-server /etc/confluent/docker/run Up 0.0.0.0:8088->8088/tcp -rest-proxy /etc/confluent/docker/run Up 0.0.0.0:8082->8082/tcp -schema-registry /etc/confluent/docker/run Up 0.0.0.0:8081->8081/tcp -zookeeper /etc/confluent/docker/run Up 0.0.0.0:2181->2181/tcp, 2888/tcp, 3888/tcp -``` - -### KSQL examples - -One huge advante of using Kafka with the Confluent Platform is the KSQL streaming engine. KSQL allows us to be able to write queries and create streams or tables from one or multiple Kafka topics (through joins) without having to write any code. - -Examples: - -Create a structured stream from the `block-state-change` topic containig raw data. This will make it easier to be able to fitler out specific events. -``` -CREATE OR REPLACE STREAM state_change_stream ( - block_height BIGINT KEY, /* k1 */ - event STRING KEY, /* k2 */ - event_id BIGINT KEY, /* k3 */ - event_type STRING KEY, /* k4 */ - event_type_id BIGINT KEY, /* k5 */ - store_key STRING, - `delete` BOOLEAN, - key STRING, - value STRING /* this may be a STRUC depending on the store type */ -) WITH (KAFKA_TOPIC='block-state-change', KEY_FORMAT='JSON', VALUE_FORMAT='JSON'); -``` - -Run the below query to see the messages in of this new stream. - -``` -SELECT * FROM state_change_stream EMIT CHANGES LIMIT 1; -``` - -Result: -``` -{ - "BLOCK_HEIGHT": 1, - "EVENT": "BEGIN_BLOCK", - "EVENT_ID": 1, - "EVENT_TYPE": "STATE_CHANGE", - "EVENT_TYPE_ID": 1, - "STORE_KEY": "mockStore1", - "delete": false, - "KEY": "AQID", - "VALUE": "AwIB" -} -``` - -Lets take it one step further and create a stream that contains only `DELIVER_TX` events. - -``` -SET 'processing.guarantee' = 'exactly_once'; - -CREATE OR REPLACE STREAM deliver_tx_state_change_stream - AS SELECT * - FROM STATE_CHANGE_STREAM - WHERE event = 'DELIVER_TX' - EMIT CHANGES; -``` - -Lets take a look at what the data looks like. - -``` -SELECT * FROM deliver_tx_state_change_stream EMIT CHANGES LIMIT 1; -``` - -Result: - -``` -{ - "BLOCK_HEIGHT": 2, - "EVENT": "BEGIN_BLOCK", - "EVENT_ID": 1, - "EVENT_TYPE": "STATE_CHANGE", - "EVENT_TYPE_ID": 1, - "STORE_KEY": "acc", - "delete": false, - "KEY": "AQBhNv4khMI7PylvV6i1lSlSCleL", - "VALUE": "CiAvY29zbW9zLmF1dGgudjFiZXRhMS5CYXNlQWNjb3VudBJ8Ci1jb3Ntb3MxcXBzbmRsM3lzbnByazBlZmRhdDYzZHY0OTlmcTU0dXR0eWdncGsSRgofL2Nvc21vcy5jcnlwdG8uc2VjcDI1NmsxLlB1YktleRIjCiECcyIkZHE6G+gkK2TJEjko3LjNFgZ4Fmfu90jDkjlbojcYygEgAQ==" -} -``` - -### Schema Registry - -Because `golang` lacks support to be able to register Protobuf messages with the schema registry, one needs to generate the Java code from the proto messages and use the [KafkaProtobufSerializer.java](https://github.com/confluentinc/schema-registry/blob/master/protobuf-serializer/src/main/java/io/confluent/kafka/serializers/protobuf/KafkaProtobufSerializer.java) to automatically register them. The Java libraries make this process exctreamly easy. Take a look [here](https://docs.confluent.io/platform/current/schema-registry/serdes-develop/serdes-protobuf.html) fro an example of how this is achived. - - -Check out the [docs](https://docs.ksqldb.io/en/latest/) and this [post](https://www.confluent.io/blog/ksqldb-0-15-reads-more-message-keys-supports-more-data-types/) for more complex examples and a deeper understanding of KSQL. +Visit the Confluent Platform [docs](https://docs.confluent.io/platform/current/quickstart/ce-docker-quickstart.html) for up to date docker instructions. diff --git a/plugin/plugins/trace/README.md b/plugin/plugins/trace/README.md index 88ebd650fe38..aec40cfe1450 100644 --- a/plugin/plugins/trace/README.md +++ b/plugin/plugins/trace/README.md @@ -18,44 +18,41 @@ The plugin is setup to run as the `default` plugin. See `./plugin/loader/preload ``` # app.toml - ... - + . . . + ############################################################################### ### Plugin system configuration ### ############################################################################### - + [plugins] - + # turn the plugin system, as a whole, on or off on = true - + # List of plugin names to enable from the plugin/plugins/* - enabled = ["trace"] - - # The directory to load non-preloaded plugins from; defaults to + enabled = ["kafka"] + + # The directory to load non-preloaded plugins from; defaults $GOPATH/src/github.com/cosmos/cosmos-sdk/plugin/plugins dir = "" - - # a mapping of plugin-specific streaming service parameters, mapped to their pluginFileName - [plugins.streaming] - + ############################################################################### ### Trace Plugin configuration ### ############################################################################### - - # The specific parameters for the Kafka streaming service plugin + + # The specific parameters for the trace streaming service plugin [plugins.streaming.trace] - + # List of store keys we want to expose for this streaming service. keys = [] - + # In addition to block event info, print the data to stdout as well. print_data_to_stdout = false # Whether or not to halt the application when plugin fails to deliver message(s). - halt_app_on_delivery_error = true + halt_app_on_delivery_error = false ``` - -2. Run `make test-sim-nondeterminism` and wait for the tests to finish. + +2. Run `make test-sim-nondeterminism-state-listening-trace` and wait for the tests to finish. ## Plugin design From 2625529519d2bf2c60f222918e26a48be2f5df4b Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Tue, 12 Apr 2022 13:00:45 -0500 Subject: [PATCH 39/43] update ADR to reflect latest proposal --- docs/architecture/adr-038-state-listening.md | 252 +++++++++++-------- 1 file changed, 154 insertions(+), 98 deletions(-) diff --git a/docs/architecture/adr-038-state-listening.md b/docs/architecture/adr-038-state-listening.md index dfe188f89e76..835386f6500b 100644 --- a/docs/architecture/adr-038-state-listening.md +++ b/docs/architecture/adr-038-state-listening.md @@ -30,10 +30,10 @@ In a new file, `store/types/listening.go`, we will create a `WriteListener` inte ```go // WriteListener interface for streaming data out from a listenkv.Store type WriteListener interface { - // if value is nil then it was deleted - // storeKey indicates the source KVStore, to facilitate using the same WriteListener across separate KVStores - // delete bool indicates if it was a delete; true: delete, false: set - OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error + // if value is nil then it was deleted + // storeKey indicates the source KVStore, to facilitate using the same WriteListener across separate KVStores + // delete bool indicates if it was a delete; true: delete, false: set + OnWrite(storeKey StoreKey, key []byte, value []byte, delete bool) error } ``` @@ -59,33 +59,33 @@ message StoreKVPair { // StoreKVPairWriteListener is used to configure listening to a KVStore by writing out length-prefixed // protobuf encoded StoreKVPairs to an underlying io.Writer type StoreKVPairWriteListener struct { - writer io.Writer - marshaller codec.BinaryCodec + writer io.Writer + marshaller codec.BinaryCodec } // NewStoreKVPairWriteListener wraps creates a StoreKVPairWriteListener with a provdied io.Writer and codec.BinaryCodec func NewStoreKVPairWriteListener(w io.Writer, m codec.BinaryCodec) *StoreKVPairWriteListener { - return &StoreKVPairWriteListener{ - writer: w, - marshaller: m, - } + return &StoreKVPairWriteListener{ + writer: w, + marshaller: m, + } } // OnWrite satisfies the WriteListener interface by writing length-prefixed protobuf encoded StoreKVPairs func (wl *StoreKVPairWriteListener) OnWrite(storeKey types.StoreKey, key []byte, value []byte, delete bool) error error { - kvPair := new(types.StoreKVPair) - kvPair.StoreKey = storeKey.Name() - kvPair.Delete = Delete - kvPair.Key = key - kvPair.Value = value - by, err := wl.marshaller.MarshalBinaryLengthPrefixed(kvPair) - if err != nil { - return err - } - if _, err := wl.writer.Write(by); err != nil { - return err - } - return nil + kvPair := new(types.StoreKVPair) + kvPair.StoreKey = storeKey.Name() + kvPair.Delete = Delete + kvPair.Key = key + kvPair.Value = value + by, err := wl.marshaller.MarshalBinaryLengthPrefixed(kvPair) + if err != nil { + return err + } + if _, err := wl.writer.Write(by); err != nil { + return err + } + return nil } ``` @@ -99,39 +99,39 @@ We can configure the `Store` with a set of `WriteListener`s which stream the out // Operations are traced on each core KVStore call and written to any of the // underlying listeners with the proper key and operation permissions type Store struct { - parent types.KVStore - listeners []types.WriteListener - parentStoreKey types.StoreKey + parent types.KVStore + listeners []types.WriteListener + parentStoreKey types.StoreKey } // NewStore returns a reference to a new traceKVStore given a parent // KVStore implementation and a buffered writer. func NewStore(parent types.KVStore, psk types.StoreKey, listeners []types.WriteListener) *Store { - return &Store{parent: parent, listeners: listeners, parentStoreKey: psk} + return &Store{parent: parent, listeners: listeners, parentStoreKey: psk} } // Set implements the KVStore interface. It traces a write operation and // delegates the Set call to the parent KVStore. func (s *Store) Set(key []byte, value []byte) { - types.AssertValidKey(key) - s.parent.Set(key, value) - s.onWrite(false, key, value) + types.AssertValidKey(key) + s.parent.Set(key, value) + s.onWrite(false, key, value) } // Delete implements the KVStore interface. It traces a write operation and // delegates the Delete call to the parent KVStore. func (s *Store) Delete(key []byte) { - s.parent.Delete(key) - s.onWrite(true, key, nil) + s.parent.Delete(key) + s.onWrite(true, key, nil) } // onWrite writes a KVStore operation to all the WriteListeners func (s *Store) onWrite(delete bool, key, value []byte) { - for _, l := range s.listeners { - if err := l.OnWrite(s.parentStoreKey, key, value, delete); err != nil { - // log error - } - } + for _, l := range s.listeners { + if err := l.OnWrite(s.parentStoreKey, key, value, delete); err != nil { + // log error + } + } } ``` @@ -142,30 +142,30 @@ Additionally, we will update the `CacheWrap` and `CacheWrapper` interfaces to en ```go type MultiStore interface { - ... + ... - // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey - ListeningEnabled(key StoreKey) bool + // ListeningEnabled returns if listening is enabled for the KVStore belonging the provided StoreKey + ListeningEnabled(key StoreKey) bool - // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey - // It appends the listeners to a current set, if one already exists - AddListeners(key StoreKey, listeners []WriteListener) + // AddListeners adds WriteListeners for the KVStore belonging to the provided StoreKey + // It appends the listeners to a current set, if one already exists + AddListeners(key StoreKey, listeners []WriteListener) } ``` ```go type CacheWrap interface { - ... + ... - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap + // CacheWrapWithListeners recursively wraps again with listening enabled + CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap } type CacheWrapper interface { - ... + ... - // CacheWrapWithListeners recursively wraps again with listening enabled - CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap + // CacheWrapWithListeners recursively wraps again with listening enabled + CacheWrapWithListeners(storeKey types.StoreKey, listeners []WriteListener) CacheWrap } ``` @@ -176,16 +176,16 @@ to wrap the returned `KVStore` with a `listenkv.Store` if listening is turned on ```go func (rs *Store) GetKVStore(key types.StoreKey) types.KVStore { - store := rs.stores[key].(types.KVStore) + store := rs.stores[key].(types.KVStore) - if rs.TracingEnabled() { - store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext) - } - if rs.ListeningEnabled(key) { - store = listenkv.NewStore(key, store, rs.listeners[key]) - } + if rs.TracingEnabled() { + store = tracekv.NewStore(store, rs.traceWriter, rs.traceContext) + } + if rs.ListeningEnabled(key) { + store = listenkv.NewStore(key, store, rs.listeners[key]) + } - return store + return store } ``` @@ -194,11 +194,11 @@ to and enable listening in the cache layer. ```go func (rs *Store) CacheMultiStore() types.CacheMultiStore { - stores := make(map[types.StoreKey]types.CacheWrapper) - for k, v := range rs.stores { - stores[k] = v - } - return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners) + stores := make(map[types.StoreKey]types.CacheWrapper) + for k, v := range rs.stores { + stores[k] = v + } + return cachemulti.NewStore(rs.db, stores, rs.keysByName, rs.traceWriter, rs.traceContext, rs.listeners) } ``` @@ -216,27 +216,31 @@ receipt from the `StreamingService`. ```go // ABCIListener interface used to hook into the ABCI message processing of the BaseApp type ABCIListener interface { - // ListenBeginBlock updates the streaming service with the latest BeginBlock messages - ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error - // ListenEndBlock updates the steaming service with the latest EndBlock messages - ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error - // ListenDeliverTx updates the steaming service with the latest DeliverTx messages - ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error - // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails - // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. Setting this to `false` will give fire-and-forget semantics. - HaltAppOnDeliveryError() bool + // ListenBeginBlock updates the streaming service with the latest BeginBlock messages + ListenBeginBlock(ctx types.Context, req abci.RequestBeginBlock, res abci.ResponseBeginBlock) error + // ListenEndBlock updates the steaming service with the latest EndBlock messages + ListenEndBlock(ctx types.Context, req abci.RequestEndBlock, res abci.ResponseEndBlock) error + // ListenDeliverTx updates the steaming service with the latest DeliverTx messages + ListenDeliverTx(ctx types.Context, req abci.RequestDeliverTx, res abci.ResponseDeliverTx) error + // HaltAppOnDeliveryError whether or not to halt the application when delivery of massages fails + // in ListenBeginBlock, ListenEndBlock, ListenDeliverTx. When `false, the app will operate in fire-and-forget mode. + // When `true`, the app will gracefully halt and stop the running node. Uncommitted blocks will + // be replayed to all listeners when the node restarts and all successful listeners that received data + // prior to the halt will receive duplicate data. Whether or not a listener operates in a fire-and-forget mode + // is determined by the listener's configuration property `halt_app_on_delivery_error = true|false`. + HaltAppOnDeliveryError() bool } // StreamingService interface for registering WriteListeners with the BaseApp and updating the service with the ABCI messages using the hooks type StreamingService interface { - // Stream is the streaming service loop, awaits kv pairs and writes them to a destination stream or file - Stream(wg *sync.WaitGroup) error - // Listeners returns the streaming service's listeners for the BaseApp to register - Listeners() map[types.StoreKey][]store.WriteListener - // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp - ABCIListener - // Closer interface - io.Closer + // Stream is the streaming service loop, awaits kv pairs and writes them to a destination stream or file + Stream(wg *sync.WaitGroup) error + // Listeners returns the streaming service's listeners for the BaseApp to register + Listeners() map[types.StoreKey][]store.WriteListener + // ABCIListener interface for hooking into the ABCI messages from inside the BaseApp + ABCIListener + // Closer interface + io.Closer } ``` @@ -265,15 +269,32 @@ func (app *BaseApp) BeginBlock(req abci.RequestBeginBlock) (res abci.ResponseBeg ... - // Call the streaming service hooks with the BeginBlock messages - for _, listener := range app.abciListeners { - if err := listener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("ListenBeginBlock listening hook failed", "err", err) - if listener.HaltAppOnDeliveryError() { - app.halt() - } + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) + for _, streamingListener := range app.abciListeners { + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + app.halt() + } + }() + } else { + // fire and forget semantics + go func() { + if err := streamingListener.ListenBeginBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("BeginBlock listening hook failed", "height", req.Header.Height, "err", err) + } + }() } } + // wait for all the listener calls to finish + wg.Wait() return res } @@ -285,14 +306,31 @@ func (app *BaseApp) EndBlock(req abci.RequestEndBlock) (res abci.ResponseEndBloc ... // Call the streaming service hooks with the EndBlock messages - for _, listener := range app.abciListeners { - if err := listener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { - app.logger.Error("ListenEndBlock listening hook failed", "err", err) - if listener.HaltAppOnDeliveryError() { - app.halt() - } + wg := new(sync.WaitGroup) + for _, streamingListener := range app.abciListeners { + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + app.halt() + } + }() + } else { + // fire and forget semantics + go func() { + if err := streamingListener.ListenEndBlock(app.deliverState.ctx, req, res); err != nil { + app.logger.Error("EndBlock listening hook failed", "height", req.Height, "err", err) + } + }() } } + // wait for all the listener calls to finish + wg.Wait() return res } @@ -303,14 +341,32 @@ func (app *BaseApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx var abciRes abci.ResponseDeliverTx defer func() { + // call the hooks with the BeginBlock messages + wg := new(sync.WaitGroup) for _, streamingListener := range app.abciListeners { - if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { - app.logger.Error("DeliverTx listening hook failed", "err", err) - if streamingListener.HaltAppOnDeliveryError() { - app.halt() - } + streamingListener := streamingListener // https://go.dev/doc/faq#closures_and_goroutines + if streamingListener.HaltAppOnDeliveryError() { + // increment the wait group counter + wg.Add(1) + go func() { + // decrement the counter when the go routine completes + defer wg.Done() + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + app.halt() + } + }() + } else { + // fire and forget semantics + go func() { + if err := streamingListener.ListenDeliverTx(app.deliverState.ctx, req, abciRes); err != nil { + app.logger.Error("DeliverTx listening hook failed", "err", err) + } + }() } } + // wait for all the listener calls to finish + wg.Wait() }() ... @@ -442,7 +498,7 @@ Plugin TOML configuration should be split into separate sub-tables for each kind Within these sub-tables, the parameters for a specific plugin of that kind are included in another sub-table (e.g. `plugins.streaming.file`). It is generally expected, but not required, that a streaming service plugin can be configured with a set of store keys (e.g. `plugins.streaming.file.keys`) for the stores it listens to and a flag (e.g. `plugins.streaming.file.halt_app_on_delivery_error`) -that signifies whether the service operates in a fire-and-forget capacity, or stop the BaseApp when an error occurs in +that signifies whether the service operates in a fire-and-forget capacity, or stop the BaseApp when an error occurs in any of `ListenBeginBlock`, `ListenEndBlock` and `ListenDeliverTx`. e.g. From af1f9fca45fcf1e3da3f96ef295df3c3dd089a14 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Tue, 19 Apr 2022 12:32:05 -0500 Subject: [PATCH 40/43] tidy up --- go.sum | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/go.sum b/go.sum index 109483b982fd..a9f06617c414 100644 --- a/go.sum +++ b/go.sum @@ -224,6 +224,7 @@ github.com/confio/ics23/go v0.7.0/go.mod h1:E45NqnlpxGnpfTWL/xauN7MRwEE28T4Dd4ur github.com/confluentinc/confluent-kafka-go v1.8.2 h1:PBdbvYpyOdFLehj8j+9ba7FL4c4Moxn79gy9cYKxG5E= github.com/confluentinc/confluent-kafka-go v1.8.2/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg= github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/continuity v0.2.1 h1:/EeEo2EtN3umhbbgCveyjifoMYg0pS+nMMEemaYw634= github.com/containerd/continuity v0.2.1/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -290,7 +291,9 @@ github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WA github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -317,9 +320,12 @@ github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E github.com/esimonov/ifshort v1.0.4/go.mod h1:Pe8zjlRrJ80+q2CxHLfEOfTwxCZ4O+MuhcHcfgNWTk0= github.com/ettle/strcase v0.1.1/go.mod h1:hzDLsPC7/lwKyBOywSHEP89nt2pDgdy+No1NBA9o9VY= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c h1:8ISkoahWXwZR41ois5lSJBSVw4D0OV19Ht/JSTzvSv0= github.com/facebookgo/ensure v0.0.0-20200202191622-63f1cf65ac4c/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= +github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052/go.mod h1:UbMTZqLaRiH3MsBH8va0n7s1pQYcu3uTb8G4tygF4Zg= github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= +github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4 h1:7HZCaLC5+BZpmbhCOZJ293Lz68O7PYrF2EzeiFMwCLk= github.com/facebookgo/subset v0.0.0-20200203212716-c811ad88dec4/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= @@ -330,6 +336,7 @@ github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4 github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= @@ -343,7 +350,9 @@ github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5 github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fzipp/gocyclo v0.4.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.0 h1:jGB9xAJQ12AIGNB4HguylppmDK1Am9ppF7XnGXXJuoU= github.com/gin-gonic/gin v1.7.0/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= github.com/go-critic/go-critic v0.6.2/go.mod h1:td1s27kfmLpe5G/DPjlnFI7o1UCzePptwU7Az0V5iCM= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -362,8 +371,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -383,8 +395,11 @@ github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2 github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM= github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= +github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2 h1:ZpnhV/YsD2/4cESfV5+Hoeu/iUR3ruzNvZ+yQfO03a0= github.com/godbus/dbus v0.0.0-20190726142602-4481cbc300e2/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= @@ -470,12 +485,16 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= @@ -590,6 +609,7 @@ github.com/hashicorp/go-safetemp v1.0.0/go.mod h1:oaerMy3BhqiTbVye6QuFhFtIceqFoD github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= @@ -650,6 +670,7 @@ github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= +github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/levigo v1.0.0 h1:q5EC36kV79HWeTBWsod3mG11EgStG3qArTKcvlksN1U= github.com/jmhodges/levigo v1.0.0/go.mod h1:Q6Qx+uH3RAqyK4rFQroq9RL7mdkABMcfhEI+nNuzMJQ= @@ -665,6 +686,7 @@ github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/u github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -700,6 +722,7 @@ github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfn github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kulti/thelper v0.5.1/go.mod h1:vMu2Cizjy/grP+jmsvOFDx1kYP6+PD1lqg4Yu5exl2U= github.com/kunwardeep/paralleltest v1.0.3/go.mod h1:vLydzomDFpk7yu5UX02RmP0H8QfRPOV/oFhWN85Mjb4= @@ -709,6 +732,7 @@ github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554 h1:nDOkLO7klmnEw1 github.com/lazyledger/smt v0.2.1-0.20210709230900-03ea40719554/go.mod h1:9+Pb2/tg1PvEgW7aFx4bFhDE4bvbI03zuJ8kb7nJ9Jc= github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.3.1/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= github.com/leonklingele/grouper v1.1.0/go.mod h1:uk3I3uDfi9B6PeUjsCKi6ndcf63Uy7snXgR4yDYQVDY= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= @@ -788,9 +812,11 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3PzxT8aQXRPkAt8xlV/e7d7w8GM5g0fa5F0D8= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= @@ -802,6 +828,7 @@ github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2 github.com/mtibben/percent v0.2.1 h1:5gssi8Nqo8QU/r2pynCm+hBQHpkB/uNK7BJCFogWdzs= github.com/mtibben/percent v0.2.1/go.mod h1:KG9uO+SZkUp+VkRHsCdYQV3XSZrrSpR3O9ibNBTZrns= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= @@ -821,11 +848,13 @@ github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1t github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/nishanths/exhaustive v0.7.11/go.mod h1:gX+MP7DWMKJmNa1HfMozK+u04hQd3na9i0hyqf3/dOI= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b h1:MKwruh+HeCSKWphkxuzvRzU4QzDkg7yiPkDVV0cDFgI= github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b/go.mod h1:TLJifjWF6eotcfzDjKZsDqWJ+73Uvj/N85MvVyrvynM= @@ -842,6 +871,7 @@ github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= @@ -851,10 +881,14 @@ github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7J github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE= github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/runc v1.0.3 h1:1hbqejyQWCJBvtKAfdO0b1FmaEf2z/bxnjqbARass5k= github.com/opencontainers/runc v1.0.3/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= @@ -868,6 +902,7 @@ github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJ github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= +github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= @@ -876,6 +911,7 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= @@ -1008,6 +1044,7 @@ github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPx github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sivchari/containedctx v1.0.2/go.mod h1:PwZOeqm4/DLoJOqMSIJs3aKqXRX4YO+uXww087KZ7Bw= github.com/sivchari/tenv v1.4.7/go.mod h1:5nF+bITvkebQVanjU6IuMbvIot/7ReNsUV7I5NbprB0= @@ -1019,6 +1056,7 @@ github.com/sonatard/noctx v0.0.1/go.mod h1:9D2D/EoULe8Yy2joDHJj7bv3sZoq9AaSb8B4l github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/go-diff v0.6.1/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= @@ -1057,6 +1095,7 @@ github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5J github.com/streadway/handy v0.0.0-20200128134331-0f66f006fb2e/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1105,8 +1144,10 @@ github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+l github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ulikunitz/xz v0.5.8 h1:ERv8V6GKqVi23rgu5cj9pVfVzJbOqAY2Ntl88O6c2nQ= github.com/ulikunitz/xz v0.5.8/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= @@ -1808,6 +1849,7 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b h1:QRR6H1YWRnHb4Y/HeNFCTJLFVxaq6wH4YuVdsUOr75U= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= @@ -1821,6 +1863,7 @@ gopkg.in/ini.v1 v1.66.3/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= From ad670f23f5035a87febc65227f070a119d658986 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Tue, 19 Apr 2022 13:07:32 -0500 Subject: [PATCH 41/43] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index dc1dffdacd0d..abe40f074a08 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,7 @@ Ref: https://keepachangelog.com/en/1.0.0/ * [\#10962](https://github.com/cosmos/cosmos-sdk/pull/10962) ADR-040: Add state migration from iavl (v1Store) to smt (v2Store) * (types) [\#10948](https://github.com/cosmos/cosmos-sdk/issues/10948) Add `app-db-backend` to the `app.toml` config to replace the compile-time `types.DBbackend` variable. * (authz)[\#11060](https://github.com/cosmos/cosmos-sdk/pull/11060) Support grant with no expire time. +* [#11691](https://github.com/cosmos/cosmos-sdk/pull/11691) Plugin architecture for ADR-038 ### API Breaking Changes From 11bc48b67abbd215d030d385f65e4379d1f24eac Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 9 Jun 2022 12:27:54 -0500 Subject: [PATCH 42/43] unlock stateCacheLock to prevent deadlock on error --- plugin/plugins/kafka/service/service.go | 2 ++ plugin/plugins/trace/service/service.go | 2 ++ 2 files changed, 4 insertions(+) diff --git a/plugin/plugins/kafka/service/service.go b/plugin/plugins/kafka/service/service.go index 978690befb1e..94994cd22284 100644 --- a/plugin/plugins/kafka/service/service.go +++ b/plugin/plugins/kafka/service/service.go @@ -332,9 +332,11 @@ func (kss *KafkaStreamingService) writeStateChange(ctx sdk.Context, event int64, EventTypeId: int64(i + 1), } if err := kss.codec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + kss.stateCacheLock.Unlock() return err } if err := kss.writeAsJsonToKafka(ctx, StateChangeTopic, key, kvPair); err != nil { + kss.stateCacheLock.Unlock() return err } } diff --git a/plugin/plugins/trace/service/service.go b/plugin/plugins/trace/service/service.go index b59d1b4a7c3d..a299e71cd338 100644 --- a/plugin/plugins/trace/service/service.go +++ b/plugin/plugins/trace/service/service.go @@ -256,9 +256,11 @@ func (tss *TraceStreamingService) writeStateChange(ctx sdk.Context, event string for i, stateChange := range tss.stateCache { key := fmt.Sprintf(LogMsgFmt, tss.currentBlockNumber, event, eventId, StateChangeEventType, i+1) if err := kodec.UnmarshalLengthPrefixed(stateChange, kvPair); err != nil { + tss.stateCacheLock.Unlock() return err } if err := tss.writeEventReqRes(ctx, key, kvPair); err != nil { + tss.stateCacheLock.Unlock() return err } } From 331aa94a922c3319397cbcefb91e6199417e62e3 Mon Sep 17 00:00:00 2001 From: Ergels Gaxhaj Date: Thu, 21 Jul 2022 11:39:07 -0500 Subject: [PATCH 43/43] fix format input count --- plugin/plugins/kafka/kafka.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/plugins/kafka/kafka.go b/plugin/plugins/kafka/kafka.go index 4a4be3e5b22f..a5911b03d782 100644 --- a/plugin/plugins/kafka/kafka.go +++ b/plugin/plugins/kafka/kafka.go @@ -103,21 +103,21 @@ func (ssp *streamingServicePlugin) Register( } // Validate minimum producer config properties - producerConfigKey := fmt.Sprintf("%s.%s.%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM) + producerConfigKey := fmt.Sprintf("%s.%s", tomlKeyPrefix, PRODUCER_CONFIG_PARAM) if len(producerConfig) == 0 { - m := fmt.Sprintf("Failed to register plugin. Empty properties for '%s': " + + m := fmt.Sprintf("Failed to register plugin. Empty properties for '%s': "+ "client will not be able to connect to Kafka cluster", producerConfigKey) return errors.New(m) } else { bootstrapServers := cast.ToString(producerConfig["bootstrap_servers"]) if len(bootstrapServers) == 0 { - m := fmt.Sprintf("Failed to register plugin. No \"%s.%s\" configured:" + + m := fmt.Sprintf("Failed to register plugin. No \"%s.%s\" configured:"+ " client will not be able to connect to Kafka cluster", producerConfigKey, "bootstrap_servers") return errors.New(m) } if strings.TrimSpace(bootstrapServers) == "" { - m := fmt.Sprintf("Failed to register plugin. Empty \"%s.%s\" configured:" + + m := fmt.Sprintf("Failed to register plugin. Empty \"%s.%s\" configured:"+ " client will not be able to connect to Kafka cluster", producerConfigKey, "bootstrap_servers") return errors.New(m) }