diff --git a/.circleci/config.yml b/.circleci/config.yml index 664e35dc2..960fdbe37 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -220,6 +220,7 @@ release_filter: &release_filter filters: tags: only: + - /.*test/ - /^v[0-9]+(\.[0-9]+){2}(-(rc|beta)[0-9]+)?/ branches: ignore: /.*/ diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 763e84dc4..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,1159 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - digest = "1:9a7a1f8cd0991a7c3549700b57c4ea1c213dadf7eb94d69e59ad3600490f81bc" - name = "cloud.google.com/go" - packages = [ - "compute/metadata", - "internal", - ] - pruneopts = "NUT" - revision = "2e6a95edb1071d750f6d7db777bf66cd2997af6c" - version = "v0.7.0" - -[[projects]] - digest = "1:253580c3e9e495538385a96f276a0e06f3ba90c73421df1258de8a8375ff4b09" - name = "github.com/Azure/azure-sdk-for-go" - packages = [ - "arm/compute", - "arm/network", - ] - pruneopts = "NUT" - revision = "bd73d950fa4440dae889bd9917bff7cef539f86e" - -[[projects]] - digest = "1:401dd46323a9f30c7cc9adef35f4961714caf74f61f8e8666f956bc158de9bba" - name = "github.com/Azure/go-autorest" - packages = [ - "autorest", - "autorest/azure", - "autorest/date", - "autorest/to", - "autorest/validation", - ] - pruneopts = "NUT" - revision = "a2fdd780c9a50455cecd249b00bdc3eb73a78e31" - -[[projects]] - digest = "1:be3ccd9f881604e4dd6d15cccfa126aa309232f0ba075ae5f92d3ef729a62758" - name = "github.com/BurntSushi/toml" - packages = ["."] - pruneopts = "NUT" - revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895" - -[[projects]] - digest = "1:d8ebbd207f3d3266d4423ce4860c9f3794956306ded6c7ba312ecc69cdfbf04c" - name = "github.com/PuerkitoBio/purell" - packages = ["."] - pruneopts = "NUT" - revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4" - version = "v1.1.0" - -[[projects]] - branch = "master" - digest = "1:8098cd40cd09879efbf12e33bcd51ead4a66006ac802cd563a66c4f3373b9727" - name = "github.com/PuerkitoBio/urlesc" - packages = ["."] - pruneopts = "NUT" - revision = "bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5" - -[[projects]] - digest = "1:44c7344434890241dde2d0d70d2e47355a1e012549e28ccf06f925aa6f8271ff" - name = "github.com/Sirupsen/logrus" - packages = ["."] - pruneopts = "NUT" - revision = "ba1b36c82c5e05c4f912a88eab0dcd91a171688f" - version = "v0.11.5" - -[[projects]] - digest = "1:e3c230a3e97de27d6b74712ae4745be2c5400f0a6c4b9e348f5800be26f30a76" - name = "github.com/aws/aws-sdk-go" - packages = [ - "aws", - "aws/awserr", - "aws/awsutil", - "aws/client", - "aws/client/metadata", - "aws/corehandlers", - "aws/credentials", - "aws/credentials/ec2rolecreds", - "aws/credentials/endpointcreds", - "aws/credentials/stscreds", - "aws/defaults", - "aws/ec2metadata", - "aws/endpoints", - "aws/request", - "aws/session", - "aws/signer/v4", - "private/protocol", - "private/protocol/ec2query", - "private/protocol/query", - "private/protocol/query/queryutil", - "private/protocol/rest", - "private/protocol/xml/xmlutil", - "service/autoscaling", - "service/ec2", - "service/sts", - ] - pruneopts = "NUT" - revision = "f6ea558f30e0a983d529b32c741e4caed17c7df0" - version = "v1.8.16" - -[[projects]] - branch = "master" - digest = "1:bfe817c134f8681840c8d6c02606982ea29a19fd48086e89aaa8dff7c5d837dc" - name = "github.com/benbjohnson/tmpl" - packages = ["."] - pruneopts = "NUT" - revision = "8e77bc5fc07968736bb74f4b40b4c577028a61b6" - -[[projects]] - branch = "master" - digest = "1:cb0535f5823b47df7dcb9768ebb6c000b79ad115472910c70efe93c9ed9b2315" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "NUT" - revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" - -[[projects]] - digest = "1:aba270497eb2d49f5cba6f4162d524b9a1195a24cbce8be20bf56a0051f47deb" - name = "github.com/blang/semver" - packages = ["."] - pruneopts = "NUT" - revision = "b38d23b8782a487059e8fc8773e9a5b228a77cb6" - version = "v3.5.0" - -[[projects]] - digest = "1:4c7a379d06f493d3cf4301a08667d9b83011a92b1d9defc7ec58a10ade19e796" - name = "github.com/boltdb/bolt" - packages = ["."] - pruneopts = "NUT" - revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9" - version = "v1.3.0" - -[[projects]] - digest = "1:60488563e453e4bbf9f5387fe94c9dd3d28372dc993c5f89104b8396c3593b21" - name = "github.com/cenkalti/backoff" - packages = ["."] - pruneopts = "NUT" - revision = "32cd0c5b3aef12c76ed64aaf678f6c79736be7dc" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:4c313de62fe9184560948eaa94714403a73d3026777a0da276550b7379cf6c80" - name = "github.com/coreos/go-oidc" - packages = [ - "http", - "jose", - "key", - "oauth2", - "oidc", - ] - pruneopts = "NUT" - revision = "be73733bb8cc830d0205609b95d125215f8e9c70" - -[[projects]] - digest = "1:e2c6ad2e212c0c9a8b13e5d1d52a6cd3a42411b636fcea10d286f7b6fc13fea0" - name = "github.com/coreos/pkg" - packages = [ - "health", - "httputil", - "timeutil", - ] - pruneopts = "NUT" - revision = "3ac0863d7acf3bc44daf49afef8919af12f704ef" - version = "v3" - -[[projects]] - branch = "master" - digest = "1:a2c1d0e43bd3baaa071d1b9ed72c27d78169b2b269f71c105ac4ba34b1be4a39" - name = "github.com/davecgh/go-spew" - packages = ["spew"] - pruneopts = "NUT" - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - -[[projects]] - digest = "1:bfebf84ecf0c67e060f1e92cfcf3d4cbf4428a26a8b3cc258a64d7330f4de31b" - name = "github.com/dgrijalva/jwt-go" - packages = ["."] - pruneopts = "NUT" - revision = "d2709f9f1f31ebcda9651b03077758c1f3a0018c" - version = "v3.0.0" - -[[projects]] - digest = "1:516dc8a4003a0ad8aaf2b07d41bafb3c9772557ee8c819b36c28536cc30d0ac2" - name = "github.com/docker/distribution" - packages = [ - "digest", - "reference", - ] - pruneopts = "NUT" - revision = "a25b9ef0c9fe242ac04bb20d3a028442b7d266b6" - version = "v2.6.1" - -[[projects]] - digest = "1:740dc3cccfcdb302c323d17da5f4f2dfa65f7b8c666e0a9ac8bc64f560fb2974" - name = "github.com/docker/docker" - packages = [ - "api/types", - "api/types/blkiodev", - "api/types/container", - "api/types/filters", - "api/types/mount", - "api/types/network", - "api/types/registry", - "api/types/strslice", - "api/types/swarm", - "api/types/versions", - ] - pruneopts = "NUT" - revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363" - version = "v1.13.1" - -[[projects]] - digest = "1:be0211ba62c6b44e36a747047b55d45ad23288920799edda7e8b59da852cbc2c" - name = "github.com/docker/go-connections" - packages = ["nat"] - pruneopts = "NUT" - revision = "990a1a1a70b0da4c4cb70e117971a4f0babfbf1a" - version = "v0.2.1" - -[[projects]] - digest = "1:cb9111a5f582f106b83203d29e92e28a02d1eba0e6ea20f61d67ae54b6a8dbfe" - name = "github.com/docker/go-units" - packages = ["."] - pruneopts = "NUT" - revision = "f2d77a61e3c169b43402a0a1e84f06daf29b8190" - version = "v0.3.1" - -[[projects]] - branch = "master" - digest = "1:3f451047834b63dcc35c5dc54d707440606cfd5b53c730a5f03bebd04d8e6af5" - name = "github.com/dustin/go-humanize" - packages = ["."] - pruneopts = "NUT" - revision = "259d2a102b871d17f30e3cd9881a642961a1e486" - -[[projects]] - digest = "1:492d2263bad08c906b12d9b7bb31213698d801dd01d033081139bd60d9009ca4" - name = "github.com/eclipse/paho.mqtt.golang" - packages = [ - ".", - "packets", - ] - pruneopts = "NUT" - revision = "45f9b18f4864c81d49c3ed01e5faec9eeb05de31" - version = "v1.0.0" - -[[projects]] - digest = "1:11652d24c6b9574c5f4a94370c0e23cbb46aec620f13f0c4f4bdc4635c2989e8" - name = "github.com/emicklei/go-restful" - packages = [ - ".", - "log", - "swagger", - ] - pruneopts = "NUT" - revision = "777bb3f19bcafe2575ffb2a3e46af92509ae9594" - version = "v1.2" - -[[projects]] - branch = "master" - digest = "1:df767c6ddf21aad6abd0da659e246cad6843e78ced90fb019a1625a12e571465" - name = "github.com/evanphx/json-patch" - packages = ["."] - pruneopts = "NUT" - revision = "30afec6a1650c11c861dc1fb58e100cd5219e490" - -[[projects]] - branch = "master" - digest = "1:a546bd0e6ce4ebdcf79507110d9498f697e154e5624e5e84dd2ca2efc776ae32" - name = "github.com/geoffgarside/ber" - packages = ["."] - pruneopts = "NUT" - revision = "854377f11dfb81f04121879829bc53487e377739" - -[[projects]] - branch = "master" - digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756" - name = "github.com/ghodss/yaml" - packages = ["."] - pruneopts = "NUT" - revision = "04f313413ffd65ce25f2541bfd2b2ceec5c0908c" - -[[projects]] - digest = "1:27b47997dc05f1af7b96d113754bd86ab1e7983d544cfd3bc7f48ec3e31ea3f5" - name = "github.com/go-ini/ini" - packages = ["."] - pruneopts = "NUT" - revision = "e7fea39b01aea8d5671f6858f0532f56e8bff3a5" - version = "v1.27.0" - -[[projects]] - branch = "master" - digest = "1:260f7ebefc63024c8dfe2c9f1a2935a89fa4213637a1f522f592f80c001cc441" - name = "github.com/go-openapi/jsonpointer" - packages = ["."] - pruneopts = "NUT" - revision = "779f45308c19820f1a69e9a4cd965f496e0da10f" - -[[projects]] - branch = "master" - digest = "1:98abd61947ff5c7c6fcfec5473d02a4821ed3a2dd99a4fbfdb7925b0dd745546" - name = "github.com/go-openapi/jsonreference" - packages = ["."] - pruneopts = "NUT" - revision = "36d33bfe519efae5632669801b180bf1a245da3b" - -[[projects]] - branch = "master" - digest = "1:ec09a363ecf22580db64d27f9d2c2604cea79e69bace2e50d66af15a492f82c1" - name = "github.com/go-openapi/spec" - packages = ["."] - pruneopts = "NUT" - revision = "e51c28f07047ad90caff03f6450908720d337e0c" - -[[projects]] - branch = "master" - digest = "1:dd9842008e1f630db0d091aa3774103b16bc972cf989492b71f734a03ebd6b5c" - name = "github.com/go-openapi/swag" - packages = ["."] - pruneopts = "NUT" - revision = "24ebf76d720bab64f62824d76bced3184a65490d" - -[[projects]] - digest = "1:a6afc27b2a73a5506832f3c5a1c19a30772cb69e7bd1ced4639eb36a55db224f" - name = "github.com/gogo/protobuf" - packages = [ - "proto", - "sortkeys", - ] - pruneopts = "NUT" - revision = "100ba4e885062801d56799d78530b73b178a78f3" - version = "v0.4" - -[[projects]] - branch = "master" - digest = "1:e2b86e41f3d669fc36b50d31d32d22c8ac656c75aa5ea89717ce7177e134ff2a" - name = "github.com/golang/glog" - packages = ["."] - pruneopts = "NUT" - revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" - -[[projects]] - branch = "master" - digest = "1:065785c3265dc118dda15e31fb57e6ceface395a94b09cce8cd2c8fa8ce7b974" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "protoc-gen-go", - "protoc-gen-go/descriptor", - "protoc-gen-go/generator", - "protoc-gen-go/grpc", - "protoc-gen-go/plugin", - ] - pruneopts = "NUT" - revision = "2bba0603135d7d7f5cb73b2125beeda19c09f4ef" - -[[projects]] - branch = "master" - digest = "1:9413ddbde906f91f062fda0dfa9a7cff43458cd1b2282c0fa25c61d89300b116" - name = "github.com/golang/snappy" - packages = ["."] - pruneopts = "NUT" - revision = "553a641470496b2327abcac10b36396bd98e45c9" - -[[projects]] - digest = "1:c32382738658c8f0e5c8e488967cc4cf1c795481ec8c62505b8976d2a8ad0c42" - name = "github.com/google/go-cmp" - packages = [ - "cmp", - "cmp/cmpopts", - "cmp/internal/diff", - "cmp/internal/function", - "cmp/internal/value", - ] - pruneopts = "NUT" - revision = "8099a9787ce5dc5984ed879a3bda47dc730a8e97" - version = "v0.1.0" - -[[projects]] - branch = "master" - digest = "1:f9425215dccf1c63f659ec781ca46bc81804341821d0cd8d2459c5b58f8bd067" - name = "github.com/google/gofuzz" - packages = ["."] - pruneopts = "NUT" - revision = "44d81051d367757e1c7c6a5a86423ece9afcf63c" - -[[projects]] - branch = "master" - digest = "1:1ab18cf8c2084968d6dca0dd46fbda9efba08664ecd7957b63c7ca57bb2455df" - name = "github.com/google/uuid" - packages = ["."] - pruneopts = "NUT" - revision = "6a5e28554805e78ea6141142aba763936c4761c0" - -[[projects]] - branch = "master" - digest = "1:b394d36f2403ca6e55d6de105ad73522c58919d19e31b0505f05c5afaa302ca7" - name = "github.com/googleapis/gax-go" - packages = ["."] - pruneopts = "NUT" - revision = "9af46dd5a1713e8b5cd71106287eba3cefdde50b" - -[[projects]] - branch = "master" - digest = "1:b60e505ed8574c018837ba28d824a9c2172f8a2837c79dda6c32e70e2e596d1c" - name = "github.com/gorhill/cronexpr" - packages = ["."] - pruneopts = "NUT" - revision = "88b0669f7d75f171bd612b874e52b95c190218df" - -[[projects]] - digest = "1:064c7f0ccdb4036791092fb93ec214a6f09119711801b9e587b6d1e76acc55de" - name = "github.com/hashicorp/consul" - packages = ["api"] - pruneopts = "NUT" - revision = "e9ca44d0a1757ac9aecc6785904a701936c10e4a" - version = "v0.8.1" - -[[projects]] - branch = "master" - digest = "1:7b699584752575e81e3f4e8b00cfb3e5d6fa5419d5d212ef925e02c798847464" - name = "github.com/hashicorp/go-cleanhttp" - packages = ["."] - pruneopts = "NUT" - revision = "3573b8b52aa7b37b9358d966a898feb387f62437" - -[[projects]] - branch = "master" - digest = "1:cdb5ce76cd7af19e3d2d5ba9b6458a2ee804f0d376711215dd3df5f51100d423" - name = "github.com/hashicorp/go-rootcerts" - packages = ["."] - pruneopts = "NUT" - revision = "6bb64b370b90e7ef1fa532be9e591a81c3493e00" - -[[projects]] - digest = "1:0dd7b7b01769f9df356dc99f9e4144bdbabf6c79041ea7c0892379c5737f3c44" - name = "github.com/hashicorp/serf" - packages = ["coordinate"] - pruneopts = "NUT" - revision = "d6574a5bb1226678d7010325fb6c985db20ee458" - version = "v0.8.1" - -[[projects]] - digest = "1:4b32a332c32e1626280df4aceecc6b9eeaa8aa11e5083926ed41b89409ec40b6" - name = "github.com/influxdata/influxdb" - packages = [ - ".", - "client", - "client/v2", - "influxql", - "influxql/internal", - "influxql/neldermead", - "models", - "monitor/diagnostics", - "pkg/escape", - "pkg/limiter", - "services/collectd", - "services/graphite", - "services/meta", - "services/meta/internal", - "services/opentsdb", - "services/udp", - "toml", - "tsdb", - "tsdb/internal", - "uuid", - ] - pruneopts = "NUT" - revision = "e4628bb69266dbd624dc27d674b52705ce0dcbf2" - version = "v1.1.4" - -[[projects]] - branch = "master" - digest = "1:a6411d501f20aa4325c2cef806205a4b4802aec94b296f495db662c6ef46c787" - name = "github.com/influxdata/usage-client" - packages = ["v1"] - pruneopts = "NUT" - revision = "6d3895376368aa52a3a81d2a16e90f0f52371967" - -[[projects]] - branch = "master" - digest = "1:61187cdf67a554f26a184342401d2d7f40cb7064a77de4b52b099a4bc8368290" - name = "github.com/influxdata/wlog" - packages = ["."] - pruneopts = "NUT" - revision = "7c63b0a71ef8300adc255344d275e10e5c3a71ec" - -[[projects]] - digest = "1:7c818eb119c69fc685573449e8f799596ba81827074b006ff0cb71052424f254" - name = "github.com/jmespath/go-jmespath" - packages = ["."] - pruneopts = "NUT" - revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9" - version = "0.2.2" - -[[projects]] - digest = "1:6b1eae4bb93e5ccd23cb09d1e005ecb391316d27701b7a5264f8555a6e2f3d87" - name = "github.com/jonboulle/clockwork" - packages = ["."] - pruneopts = "NUT" - revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" - version = "v0.1.0" - -[[projects]] - branch = "master" - digest = "1:57719b5d47a6adf438ed209107c10d0da22e993916b326df3a41fcb6c9bad533" - name = "github.com/juju/ratelimit" - packages = ["."] - pruneopts = "NUT" - revision = "acf38b000a03e4ab89e40f20f1e548f4e6ac7f72" - -[[projects]] - digest = "1:3a32e9d6c50f433ce5a8a65106c882ec0734a39863bdde6efa1f33c4d3e66acf" - name = "github.com/k-sone/snmpgo" - packages = ["."] - pruneopts = "NUT" - revision = "de09377ff34857b08afdc16ea8c7c2929eb1fc6e" - version = "v3.2.0" - -[[projects]] - digest = "1:805127e5bf73d46bf81aeb8eab8a28697880045761fa28d483308247c2898290" - name = "github.com/kimor79/gollectd" - packages = ["."] - pruneopts = "NUT" - revision = "b5dddb1667dcc1e6355b9305e2c1608a2db6983c" - version = "v1.0.0" - -[[projects]] - digest = "1:9ab29968625f38dd39ab1dbd50797549313de7881782a32be6f2ca664c99dd08" - name = "github.com/mailru/easyjson" - packages = [ - ".", - "bootstrap", - "buffer", - "easyjson", - "gen", - "jlexer", - "jwriter", - "parser", - ] - pruneopts = "NUT" - revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" - -[[projects]] - digest = "1:cb591533458f6eb6e2c1065ff3eac6b50263d7847deb23fc9f79b25bc608970e" - name = "github.com/mattn/go-runewidth" - packages = ["."] - pruneopts = "NUT" - revision = "9e777a8366cce605130a531d2cd6363d07ad7317" - version = "v0.0.2" - -[[projects]] - digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "NUT" - revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" - version = "v1.0.0" - -[[projects]] - branch = "master" - digest = "1:bdff96d801b156a076211f2218e9a3d6ce942e087a8a5fb345b7a088bd65473f" - name = "github.com/miekg/dns" - packages = ["."] - pruneopts = "NUT" - revision = "6ebcb714d36901126ee2807031543b38c56de963" - -[[projects]] - branch = "master" - digest = "1:c16945365aa2772ae7347e8d944ff38abd385bf217a75852f4b490e1af06b1aa" - name = "github.com/mitchellh/copystructure" - packages = ["."] - pruneopts = "NUT" - revision = "f81071c9d77b7931f78c90b416a074ecdc50e959" - -[[projects]] - branch = "master" - digest = "1:b62c4f18ad6eb454ac5253e7791ded3d7867330015ca4b37b6336e57f514585e" - name = "github.com/mitchellh/go-homedir" - packages = ["."] - pruneopts = "NUT" - revision = "b8bc1bf767474819792c23f32d8286a45736f1c6" - -[[projects]] - branch = "master" - digest = "1:77ae0dd3bf0743d1baf1918b01858d423cf578826d70c8b59092ab52e4b9dfb9" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - pruneopts = "NUT" - revision = "5a0325d7fafaac12dda6e7fb8bd222ec1b69875e" - -[[projects]] - branch = "master" - digest = "1:08893d896360bc28ab00692d16a1df1bea0bc8f52da93a8bea7b4d46e141c5c6" - name = "github.com/mitchellh/reflectwalk" - packages = ["."] - pruneopts = "NUT" - revision = "417edcfd99a4d472c262e58f22b4bfe97580f03e" - -[[projects]] - digest = "1:c7754aaef62e30fefcc59727dcd71aea29e3c3868df0bd316bfe62e05131fafb" - name = "github.com/pborman/uuid" - packages = ["."] - pruneopts = "NUT" - revision = "a97ce2ca70fa5a848076093f05e639a89ca34d06" - version = "v1.0" - -[[projects]] - digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" - name = "github.com/pkg/errors" - packages = ["."] - pruneopts = "NUT" - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - pruneopts = "NUT" - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - digest = "1:3e5fd795ebf6a9e13e67d644da76130af7a6003286531f9573f8074c228b66a3" - name = "github.com/prometheus/client_golang" - packages = ["prometheus"] - pruneopts = "NUT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:32d10bdfa8f09ecf13598324dba86ab891f11db3c538b6a34d1c3b5b99d7c36b" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "NUT" - revision = "6f3806018612930941127f2a7c6c453ba2c527d2" - -[[projects]] - branch = "master" - digest = "1:4f7ffe5f45a74be4c6b116a0ff2e435edce68d639becebc1ecaaa22487e177bb" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "log", - "model", - "version", - ] - pruneopts = "NUT" - revision = "9e0844febd9e2856f839c9cb974fbd676d1755a8" - -[[projects]] - branch = "master" - digest = "1:2a011811c4dae8274c3ced565b10f30ab9f43cb799e5a425607464c0c585c1c3" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "xfs", - ] - pruneopts = "NUT" - revision = "6ac8c5d890d415025dd5aae7595bcb2a6e7e2fad" - -[[projects]] - branch = "logger-targetmanager-wait" - digest = "1:2aa5c16ddde403c807c8e4ddba9b3e31faac62ec86e2df6afafe032419f7d055" - name = "github.com/prometheus/prometheus" - packages = [ - "config", - "discovery", - "discovery/azure", - "discovery/consul", - "discovery/dns", - "discovery/ec2", - "discovery/file", - "discovery/gce", - "discovery/kubernetes", - "discovery/marathon", - "discovery/triton", - "discovery/zookeeper", - "relabel", - "retrieval", - "storage", - "storage/local", - "storage/local/chunk", - "storage/local/codable", - "storage/local/index", - "storage/metric", - "util/flock", - "util/httputil", - "util/strutil", - "util/testutil", - "util/treecache", - ] - pruneopts = "NUT" - revision = "58298e738211f46cdab48c404e5514a544774579" - source = "github.com/goller/prometheus" - -[[projects]] - digest = "1:118f00f400c10c1dd21a267fd04697c758dbe9a38cf5fa3fab3bb3625af9efe7" - name = "github.com/russross/blackfriday" - packages = ["."] - pruneopts = "NUT" - revision = "0b647d0506a698cca42caca173e55559b12a69f2" - version = "v1.4" - -[[projects]] - branch = "master" - digest = "1:142520cf3c9bb85449dd0000f820b8c604531587ee654793c54909be7dabadac" - name = "github.com/samuel/go-zookeeper" - packages = ["zk"] - pruneopts = "NUT" - revision = "1d7be4effb13d2d908342d349d71a284a7542693" - -[[projects]] - digest = "1:b1184e4b8e474f452b201392428a78f93471ec90a7ef72fd00bf2b11a2511b30" - name = "github.com/segmentio/kafka-go" - packages = [ - ".", - "sasl", - ] - pruneopts = "NUT" - revision = "9a956db8bd00245835f16007fbfe8ec58b31b8b9" - version = "v0.3.5" - -[[projects]] - branch = "master" - digest = "1:e700de914d366e75d5711582669407619dd26746cb80b0f6bd2cb0e8f0ec18c1" - name = "github.com/serenize/snaker" - packages = ["."] - pruneopts = "NUT" - revision = "543781d2b79bd95c51ffe70e70a55c946ca211ff" - -[[projects]] - branch = "master" - digest = "1:da6203ec5679b8d7af6fe2dbcd694694b9af883a6bae7f702ec5a11815301e94" - name = "github.com/shurcooL/go" - packages = ["indentwriter"] - pruneopts = "NUT" - revision = "20b4b0a352116a106a505a8c528b6513e7e0d5c2" - -[[projects]] - branch = "master" - digest = "1:81fc235daf173fbcabfda91e1d5d1d918a057f33e30b57dc72fa6091047ac718" - name = "github.com/shurcooL/markdownfmt" - packages = ["markdown"] - pruneopts = "NUT" - revision = "10aae0a270abfb5d929ae6ca59c4b0ac0fa8f237" - -[[projects]] - branch = "master" - digest = "1:400359f0b394fb168f4aee9621d42cc005810c6e462009d5fc76055d5e96dcf3" - name = "github.com/shurcooL/sanitized_anchor_name" - packages = ["."] - pruneopts = "NUT" - revision = "1dba4b3954bc059efc3991ec364f9f9a35f597d2" - -[[projects]] - branch = "master" - digest = "1:75d8ef7fd5eb922263b8c8f30aae17ddd6876ed3bc52c17d8406553d123f8770" - name = "github.com/spf13/pflag" - packages = ["."] - pruneopts = "NUT" - revision = "2300d0f8576fe575f71aaa5b9bbe4e1b0dc2eb51" - -[[projects]] - digest = "1:b5c8b4a0ad5f65a85eb2a9f89e30c638ef8b99f8a3f078467cea778869757666" - name = "github.com/stretchr/testify" - packages = ["assert"] - pruneopts = "NUT" - revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0" - version = "v1.1.4" - -[[projects]] - branch = "master" - digest = "1:9137e62a44c92cd9fe7d563b8480a6fbb2cd97815ef194a37696d9635a6a4056" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util", - ] - pruneopts = "NUT" - revision = "8c81ea47d4c41a385645e133e15510fc6a2a74b4" - -[[projects]] - branch = "master" - digest = "1:af13b22439d69b477caf532439fdcbb3e2a18bde35d3135003f8be46bb8d8967" - name = "github.com/ugorji/go" - packages = ["codec"] - pruneopts = "NUT" - revision = "708a42d246822952f38190a8d8c4e6b16a0e600c" - -[[projects]] - branch = "master" - digest = "1:95100f4ac20072567aa10607f61c66ff28965659f77f6ba807bd875a685710d9" - name = "golang.org/x/crypto" - packages = [ - "bcrypt", - "blowfish", - ] - pruneopts = "NUT" - revision = "0242f07995e684be54f2a2776327141acf1cef91" - -[[projects]] - branch = "master" - digest = "1:9c7239e01cf1289afb7460a80b820bcaa5638add06e5cf55e676950b414c817d" - name = "golang.org/x/net" - packages = [ - "context", - "context/ctxhttp", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "lex/httplex", - "trace", - "websocket", - ] - pruneopts = "NUT" - revision = "d212a1ef2de2f5d441c327b8f26cf3ea3ea9f265" - -[[projects]] - branch = "master" - digest = "1:da311e132160fec8dfc9e659915b8f942e5563c27bbf3c45d2c9e67a1434ef65" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt", - ] - pruneopts = "NUT" - revision = "a6bd8cefa1811bd24b86f8902872e4e8225f74c4" - -[[projects]] - branch = "master" - digest = "1:49763f4a63eaef31cde11bc013e9d250e294fb5fc8b02b9c93807fc430106cf9" - name = "golang.org/x/sys" - packages = [ - "unix", - "windows", - "windows/registry", - "windows/svc/eventlog", - ] - pruneopts = "NUT" - revision = "f3918c30c5c2cb527c0b071a27c35120a6c0719a" - -[[projects]] - branch = "master" - digest = "1:35e546e3d2af2735e23a5698e0d5c87f7b0ca5683b3fbc25a10bcf397e032d19" - name = "golang.org/x/text" - packages = [ - "internal/gen", - "internal/triegen", - "internal/ucd", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - "width", - ] - pruneopts = "NUT" - revision = "a9a820217f98f7c8a207ec1e45a874e1fe12c478" - -[[projects]] - branch = "master" - digest = "1:9f32afa47f2da74cef7fd3ace0c5b8bf7476f432e4a1c7163ecdd22cf17154b8" - name = "google.golang.org/api" - packages = [ - "compute/v1", - "gensupport", - "googleapi", - "googleapi/internal/uritemplates", - ] - pruneopts = "NUT" - revision = "fbbaff1827317122a8a0e1b24de25df8417ce87b" - -[[projects]] - digest = "1:7206d98ec77c90c72ec2c405181a1dcf86965803b6dbc4f98ceab7a5047c37a9" - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/urlfetch", - "urlfetch", - ] - pruneopts = "NUT" - revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" - version = "v1.0.0" - -[[projects]] - digest = "1:73ac483b9160d55bbdaa4ca261234e391bd14ac69cfb172bead53e421197a0f0" - name = "google.golang.org/grpc" - packages = [ - ".", - "codes", - "credentials", - "grpclog", - "internal", - "keepalive", - "metadata", - "naming", - "peer", - "stats", - "tap", - "transport", - ] - pruneopts = "NUT" - revision = "8050b9cbc271307e5a716a9d782803d09b0d6f2d" - version = "v1.2.1" - -[[projects]] - branch = "v3" - digest = "1:1244a9b3856f70d5ffb74bbfd780fc9d47f93f2049fa265c6fb602878f507bf8" - name = "gopkg.in/alexcesaro/quotedprintable.v3" - packages = ["."] - pruneopts = "NUT" - revision = "2caba252f4dc53eaf6b553000885530023f54623" - -[[projects]] - digest = "1:c970218a20933dd0a2eb2006de922217fa9276f57d25009b2a934eb1c50031cc" - name = "gopkg.in/fsnotify.v1" - packages = ["."] - pruneopts = "NUT" - revision = "629574ca2a5df945712d3079857300b5e4da0236" - source = "git@github.com:fsnotify/fsnotify" - -[[projects]] - digest = "1:d852dd703c644c976246382fe1539e8585cc20d642d3e68d3dff8de952237497" - name = "gopkg.in/gomail.v2" - packages = ["."] - pruneopts = "NUT" - revision = "41f3572897373c5538c50a2402db15db079fa4fd" - version = "2.0.0" - -[[projects]] - digest = "1:ef72505cf098abdd34efeea032103377bec06abb61d8a06f002d5d296a4b1185" - name = "gopkg.in/inf.v0" - packages = ["."] - pruneopts = "NUT" - revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - version = "v0.9.0" - -[[projects]] - branch = "v2" - digest = "1:ad6f94355d292690137613735965bd3688844880fdab90eccf66321910344942" - name = "gopkg.in/yaml.v2" - packages = ["."] - pruneopts = "NUT" - revision = "a5b47d31c556af34a302ce5d659e6fea44d90de0" - -[[projects]] - digest = "1:250d6f7f059d3ebceea5e6deed3561878b0c9807d5c17d58f7ca1890e4193f88" - name = "k8s.io/client-go" - packages = [ - "1.5/discovery", - "1.5/kubernetes", - "1.5/kubernetes/typed/apps/v1alpha1", - "1.5/kubernetes/typed/authentication/v1beta1", - "1.5/kubernetes/typed/authorization/v1beta1", - "1.5/kubernetes/typed/autoscaling/v1", - "1.5/kubernetes/typed/batch/v1", - "1.5/kubernetes/typed/certificates/v1alpha1", - "1.5/kubernetes/typed/core/v1", - "1.5/kubernetes/typed/extensions/v1beta1", - "1.5/kubernetes/typed/policy/v1alpha1", - "1.5/kubernetes/typed/rbac/v1alpha1", - "1.5/kubernetes/typed/storage/v1beta1", - "1.5/pkg/api", - "1.5/pkg/api/errors", - "1.5/pkg/api/install", - "1.5/pkg/api/meta", - "1.5/pkg/api/meta/metatypes", - "1.5/pkg/api/resource", - "1.5/pkg/api/unversioned", - "1.5/pkg/api/v1", - "1.5/pkg/api/validation/path", - "1.5/pkg/apimachinery", - "1.5/pkg/apimachinery/announced", - "1.5/pkg/apimachinery/registered", - "1.5/pkg/apis/apps", - "1.5/pkg/apis/apps/install", - "1.5/pkg/apis/apps/v1alpha1", - "1.5/pkg/apis/authentication", - "1.5/pkg/apis/authentication/install", - "1.5/pkg/apis/authentication/v1beta1", - "1.5/pkg/apis/authorization", - "1.5/pkg/apis/authorization/install", - "1.5/pkg/apis/authorization/v1beta1", - "1.5/pkg/apis/autoscaling", - "1.5/pkg/apis/autoscaling/install", - "1.5/pkg/apis/autoscaling/v1", - "1.5/pkg/apis/batch", - "1.5/pkg/apis/batch/install", - "1.5/pkg/apis/batch/v1", - "1.5/pkg/apis/batch/v2alpha1", - "1.5/pkg/apis/certificates", - "1.5/pkg/apis/certificates/install", - "1.5/pkg/apis/certificates/v1alpha1", - "1.5/pkg/apis/extensions", - "1.5/pkg/apis/extensions/install", - "1.5/pkg/apis/extensions/v1beta1", - "1.5/pkg/apis/policy", - "1.5/pkg/apis/policy/install", - "1.5/pkg/apis/policy/v1alpha1", - "1.5/pkg/apis/rbac", - "1.5/pkg/apis/rbac/install", - "1.5/pkg/apis/rbac/v1alpha1", - "1.5/pkg/apis/storage", - "1.5/pkg/apis/storage/install", - "1.5/pkg/apis/storage/v1beta1", - "1.5/pkg/auth/user", - "1.5/pkg/conversion", - "1.5/pkg/conversion/queryparams", - "1.5/pkg/fields", - "1.5/pkg/genericapiserver/openapi/common", - "1.5/pkg/labels", - "1.5/pkg/runtime", - "1.5/pkg/runtime/serializer", - "1.5/pkg/runtime/serializer/json", - "1.5/pkg/runtime/serializer/protobuf", - "1.5/pkg/runtime/serializer/recognizer", - "1.5/pkg/runtime/serializer/streaming", - "1.5/pkg/runtime/serializer/versioning", - "1.5/pkg/selection", - "1.5/pkg/third_party/forked/golang/reflect", - "1.5/pkg/types", - "1.5/pkg/util", - "1.5/pkg/util/cert", - "1.5/pkg/util/clock", - "1.5/pkg/util/errors", - "1.5/pkg/util/flowcontrol", - "1.5/pkg/util/framer", - "1.5/pkg/util/integer", - "1.5/pkg/util/intstr", - "1.5/pkg/util/json", - "1.5/pkg/util/labels", - "1.5/pkg/util/net", - "1.5/pkg/util/parsers", - "1.5/pkg/util/rand", - "1.5/pkg/util/runtime", - "1.5/pkg/util/sets", - "1.5/pkg/util/uuid", - "1.5/pkg/util/validation", - "1.5/pkg/util/validation/field", - "1.5/pkg/util/wait", - "1.5/pkg/util/yaml", - "1.5/pkg/version", - "1.5/pkg/watch", - "1.5/pkg/watch/versioned", - "1.5/plugin/pkg/client/auth", - "1.5/plugin/pkg/client/auth/gcp", - "1.5/plugin/pkg/client/auth/oidc", - "1.5/rest", - "1.5/tools/cache", - "1.5/tools/clientcmd/api", - "1.5/tools/metrics", - "1.5/transport", - ] - pruneopts = "NUT" - revision = "1195e3a8ee1a529d53eed7c624527a68555ddf1f" - version = "v1.5.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "github.com/BurntSushi/toml", - "github.com/aws/aws-sdk-go/aws", - "github.com/aws/aws-sdk-go/aws/awserr", - "github.com/aws/aws-sdk-go/aws/credentials", - "github.com/aws/aws-sdk-go/aws/session", - "github.com/aws/aws-sdk-go/service/autoscaling", - "github.com/benbjohnson/tmpl", - "github.com/boltdb/bolt", - "github.com/cenkalti/backoff", - "github.com/davecgh/go-spew/spew", - "github.com/dgrijalva/jwt-go", - "github.com/docker/docker/api/types", - "github.com/docker/docker/api/types/swarm", - "github.com/dustin/go-humanize", - "github.com/eclipse/paho.mqtt.golang", - "github.com/evanphx/json-patch", - "github.com/ghodss/yaml", - "github.com/golang/protobuf/proto", - "github.com/golang/protobuf/protoc-gen-go", - "github.com/google/go-cmp/cmp", - "github.com/google/go-cmp/cmp/cmpopts", - "github.com/google/uuid", - "github.com/gorhill/cronexpr", - "github.com/influxdata/influxdb", - "github.com/influxdata/influxdb/client", - "github.com/influxdata/influxdb/client/v2", - "github.com/influxdata/influxql", - "github.com/influxdata/influxdb/models", - "github.com/influxdata/influxdb/services/collectd", - "github.com/influxdata/influxdb/services/graphite", - "github.com/influxdata/influxdb/services/meta", - "github.com/influxdata/influxdb/services/opentsdb", - "github.com/influxdata/influxdb/services/udp", - "github.com/influxdata/influxdb/toml", - "github.com/influxdata/influxdb/uuid", - "github.com/influxdata/usage-client/v1", - "github.com/influxdata/wlog", - "github.com/k-sone/snmpgo", - "github.com/mailru/easyjson", - "github.com/mailru/easyjson/easyjson", - "github.com/mailru/easyjson/jlexer", - "github.com/mailru/easyjson/jwriter", - "github.com/mitchellh/copystructure", - "github.com/mitchellh/mapstructure", - "github.com/mitchellh/reflectwalk", - "github.com/pkg/errors", - "github.com/prometheus/common/log", - "github.com/prometheus/common/model", - "github.com/prometheus/prometheus/config", - "github.com/prometheus/prometheus/discovery", - "github.com/prometheus/prometheus/discovery/azure", - "github.com/prometheus/prometheus/discovery/consul", - "github.com/prometheus/prometheus/discovery/dns", - "github.com/prometheus/prometheus/discovery/ec2", - "github.com/prometheus/prometheus/discovery/file", - "github.com/prometheus/prometheus/discovery/gce", - "github.com/prometheus/prometheus/discovery/marathon", - "github.com/prometheus/prometheus/discovery/triton", - "github.com/prometheus/prometheus/discovery/zookeeper", - "github.com/prometheus/prometheus/retrieval", - "github.com/prometheus/prometheus/storage", - "github.com/segmentio/kafka-go", - "github.com/serenize/snaker", - "github.com/shurcooL/markdownfmt/markdown", - "github.com/stretchr/testify/assert", - "gopkg.in/gomail.v2", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index da62fa112..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,79 +0,0 @@ -required = [ - "github.com/benbjohnson/tmpl", - "github.com/golang/protobuf/protoc-gen-go", - "github.com/mailru/easyjson/easyjson", -] - -[prune] - unused-packages = true - go-tests = true - non-go = true - -[[constraint]] - branch = "master" - name = "github.com/davecgh/go-spew" - -[[constraint]] - branch = "master" - name = "github.com/mailru/easyjson" - -[[constraint]] - branch = "master" - name = "github.com/evanphx/json-patch" - -[[constraint]] - branch = "master" - name = "github.com/ghodss/yaml" - -[[constraint]] - branch = "master" - name = "github.com/google/uuid" - -[[constraint]] - name = "github.com/influxdata/influxdb" - version = "~1.1.0" - -[[constraint]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - -[[constraint]] - branch = "logger-targetmanager-wait" - name = "github.com/prometheus/prometheus" - source = "github.com/goller/prometheus" - -[[constraint]] - branch = "master" - name = "github.com/shurcooL/markdownfmt" - -[[constraint]] - name = "github.com/eclipse/paho.mqtt.golang" - version = "~1.0.0" - -[[constraint]] - name = "github.com/gorhill/cronexpr" - branch = "master" - -# Pin BurntSushi/toml to the same version used in influxdb -# This also avoids using a version with the WTFPL license -[[constraint]] - name= "github.com/BurntSushi/toml" - revision = "a368813c5e648fee92e5f6c30e3944ff9d5e8895" - -# Force the Azure projects to be a specific older version that Prometheus needs -[[override]] - name = "github.com/Azure/azure-sdk-for-go" - revision = "bd73d950fa4440dae889bd9917bff7cef539f86e" - -[[override]] - name = "github.com/Azure/go-autorest" - revision = "a2fdd780c9a50455cecd249b00bdc3eb73a78e31" - -[[override]] - name= "gopkg.in/fsnotify.v1" - revision = "629574ca2a5df945712d3079857300b5e4da0236" - source = "git@github.com:fsnotify/fsnotify" - -[[override]] - name= "github.com/mailru/easyjson" - revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" diff --git a/alert/topics.go b/alert/topics.go index b34b0618e..e416e1dbb 100644 --- a/alert/topics.go +++ b/alert/topics.go @@ -48,6 +48,7 @@ func (s *Topics) Close() error { return nil } +// Topic returns the topic with the given id, and if it exists or not func (s *Topics) Topic(id string) (*Topic, bool) { s.mu.RLock() t, ok := s.topics[id] @@ -55,24 +56,29 @@ func (s *Topics) Topic(id string) (*Topic, bool) { return t, ok } -func (s *Topics) RestoreTopic(id string, eventStates map[string]EventState) { +func (s *Topics) RestoreTopicNoCopy(topic string, eventStates map[string]*EventState) { s.mu.Lock() defer s.mu.Unlock() - t, ok := s.topics[id] + t := s.ensureTopic(topic) + t.restoreEventStatesNoCopy(eventStates) +} + +func (s *Topics) ensureTopic(topic string) *Topic { + t, ok := s.topics[topic] if !ok { - t = s.newTopic(id) - s.topics[id] = t + t = s.newTopic(topic) + s.topics[topic] = t } - t.restoreEventStates(eventStates) + return t } -func (s *Topics) UpdateEvent(id string, event EventState) { +func (s *Topics) UpdateEvent(topicID string, event EventState) { + s.mu.Lock() defer s.mu.Unlock() - t, ok := s.topics[id] + t, ok := s.topics[topicID] if !ok { - t = s.newTopic(id) - s.topics[id] = t + s.topics[topicID] = s.newTopic(topicID) } t.updateEvent(event) } @@ -104,7 +110,6 @@ func (s *Topics) Collect(event Event) error { } s.mu.Unlock() } - return topic.collect(event) } @@ -266,16 +271,14 @@ func (t *Topic) removeHandler(h Handler) { } } -func (t *Topic) restoreEventStates(eventStates map[string]EventState) { +func (t *Topic) restoreEventStatesNoCopy(eventStates map[string]*EventState) { t.mu.Lock() defer t.mu.Unlock() t.events = make(map[string]*EventState, len(eventStates)) t.sorted = make([]*EventState, 0, len(eventStates)) for id, state := range eventStates { - e := new(EventState) - *e = state - t.events[id] = e - t.sorted = append(t.sorted, e) + t.events[id] = state + t.sorted = append(t.sorted, state) } sort.Sort(sortedStates(t.sorted)) } @@ -315,16 +318,19 @@ func (t *Topic) close() { } func (t *Topic) collect(event Event) error { + prev, ok := t.updateEvent(event.State) if ok { event.previousState = prev } t.collected.Add(1) + return t.handleEvent(event) } func (t *Topic) handleEvent(event Event) error { + t.mu.RLock() defer t.mu.RUnlock() @@ -374,6 +380,7 @@ func (t *Topic) updateEvent(state EventState) (EventState, bool) { type sortedStates []*EventState +// TODO(docmerlin): replaced sortedStates with a heap or something similar func (e sortedStates) Len() int { return len(e) } func (e sortedStates) Swap(i int, j int) { e[i], e[j] = e[j], e[i] } func (e sortedStates) Less(i int, j int) bool { diff --git a/build.sh b/build.sh index bb5e01227..ca310b3b3 100755 --- a/build.sh +++ b/build.sh @@ -13,9 +13,10 @@ BUILD_NUM=${BUILD_NUM-$RANDOM} HOME_DIR=/root imagename=kapacitor-builder-img-$BUILD_NUM +PROTO_VERSION=3.18.3 # Build new docker image -docker build -f Dockerfile_build_ubuntu64 -t $imagename $DIR +docker build -f Dockerfile_build_ubuntu64 --build-arg PROTO_VERSION=$PROTO_VERSION -t $imagename $DIR echo "Running build.py" # Run docker diff --git a/cmd/kapacitord/downgrade/downgrade.go b/cmd/kapacitord/downgrade/downgrade.go new file mode 100644 index 000000000..ba43145ac --- /dev/null +++ b/cmd/kapacitord/downgrade/downgrade.go @@ -0,0 +1,87 @@ +package downgrade + +import ( + "flag" + "fmt" + "io" + "os" + + "github.com/influxdata/influxdb/pkg/errors" + "github.com/influxdata/kapacitor/cmd/kapacitord/run" + "github.com/influxdata/kapacitor/keyvalue" + "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/diagnostic" + "github.com/influxdata/kapacitor/services/httpd" + "github.com/influxdata/kapacitor/services/storage" +) + +const downgradeUsage = `usage: downgrade + + downgrade reverts a topic store format upgrade` + +type Diagnostic interface { + Error(msg string, err error) + KapacitorStarting(version, branch, commit string) + GoVersion() + Info(msg string, ctx ...keyvalue.T) +} + +type HTTPDService interface { + AddRoutes([]httpd.Route) error + DelRoutes([]httpd.Route) +} + +// Command represents the command executed by "kapacitord downgrade". +type Command struct { + Stdout io.Writer + Stderr io.Writer + + storageService *storage.Service + diagService *diagnostic.Service +} + +func NewCommand() *Command { + return &Command{ + Stdout: os.Stdout, + Stderr: os.Stderr, + } +} + +func (cmd *Command) Run(args ...string) (rErr error) { + fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, downgradeUsage) } + + pcc := run.NewPrintConfigCommand() + config, err := pcc.PrepareConfig(args, fs) + if err != nil { + return err + } + + // Initialize Logging Services + cmd.diagService = diagnostic.NewService(config.Logging, cmd.Stdout, cmd.Stderr) + if err = cmd.diagService.Open(); err != nil { + return fmt.Errorf("failed to open diagnostic service: %v", err) + } + defer errors.Capture(&rErr, cmd.diagService.Close) + + d := cmd.diagService.NewStorageHandler() + cmd.storageService = storage.NewService(config.Storage, d) + cmd.storageService.HTTPDService = &NoOpHTTPDService{} + + if err = cmd.storageService.Open(); err != nil { + return fmt.Errorf("open service %T: %s", cmd.storageService, err) + } + defer errors.Capture(&rErr, cmd.storageService.Close)() + cmd.diagService.Logger.Info("Starting downgrade of topic store") + return alert.MigrateTopicStoreV2V1(cmd.storageService) +} + +type NoOpHTTPDService struct { +} + +func (s *NoOpHTTPDService) AddRoutes([]httpd.Route) error { + return nil +} + +func (s *NoOpHTTPDService) DelRoutes([]httpd.Route) { +} diff --git a/cmd/kapacitord/help/help.go b/cmd/kapacitord/help/help.go index de1a850c0..bb2026044 100644 --- a/cmd/kapacitord/help/help.go +++ b/cmd/kapacitord/help/help.go @@ -37,6 +37,7 @@ The commands are: config display the default configuration run run node with existing configuration version displays the Kapacitor version + downgrade reverts a topic store format upgrade "run" is the default command. diff --git a/cmd/kapacitord/main.go b/cmd/kapacitord/main.go index 5750002c0..a9886da45 100644 --- a/cmd/kapacitord/main.go +++ b/cmd/kapacitord/main.go @@ -11,6 +11,7 @@ import ( "syscall" "time" + "github.com/influxdata/kapacitor/cmd/kapacitord/downgrade" "github.com/influxdata/kapacitor/cmd/kapacitord/help" "github.com/influxdata/kapacitor/cmd/kapacitord/run" "github.com/influxdata/kapacitor/services/diagnostic" @@ -142,6 +143,10 @@ func (m *Main) Run(args ...string) error { if err := help.NewCommand().Run(args...); err != nil { return fmt.Errorf("help: %s", err) } + case "downgrade": + if err := downgrade.NewCommand().Run(args...); err != nil { + return fmt.Errorf("downgrade: %w", err) + } default: return fmt.Errorf(`unknown command "%s"`+"\n"+`Run 'kapacitord help' for usage`+"\n\n", name) } diff --git a/cmd/kapacitord/run/config_command.go b/cmd/kapacitord/run/config_command.go index eb8d9dd35..eb2ee23f9 100644 --- a/cmd/kapacitord/run/config_command.go +++ b/cmd/kapacitord/run/config_command.go @@ -31,22 +31,37 @@ func NewPrintConfigCommand() *PrintConfigCommand { func (cmd *PrintConfigCommand) Run(args ...string) error { // Parse command flags. fs := flag.NewFlagSet("", flag.ContinueOnError) + fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } + + config, err := cmd.PrepareConfig(args, fs) + if err != nil { + return err + } + + if err = toml.NewEncoder(cmd.Stdout).Encode(config); err != nil { + return err + } + _, err = fmt.Fprint(cmd.Stdout, "\n") + + return err +} + +func (cmd *PrintConfigCommand) PrepareConfig(args []string, fs *flag.FlagSet) (*server.Config, error) { configPath := fs.String("config", "", "") hostname := fs.String("hostname", "", "") - fs.Usage = func() { fmt.Fprintln(cmd.Stderr, printConfigUsage) } if err := fs.Parse(args); err != nil { - return err + return nil, err } // Parse config from path. config, err := cmd.parseConfig(FindConfigPath(*configPath)) if err != nil { - return fmt.Errorf("parse config: %s", err) + return nil, fmt.Errorf("parse config: %s", err) } // Apply any environment variables on top of the parsed config if err := config.ApplyEnvOverrides(); err != nil { - return fmt.Errorf("apply env config: %v", err) + return nil, fmt.Errorf("apply env config: %v", err) } // Override config properties. @@ -56,13 +71,9 @@ func (cmd *PrintConfigCommand) Run(args ...string) error { // Validate the configuration. if err := config.Validate(); err != nil { - return fmt.Errorf("%s. To generate a valid configuration file run `kapacitord config > kapacitor.generated.conf`.", err) + return nil, fmt.Errorf("%s. To generate a valid configuration file run `kapacitord config > kapacitor.generated.conf`.", err) } - - toml.NewEncoder(cmd.Stdout).Encode(config) - fmt.Fprint(cmd.Stdout, "\n") - - return nil + return config, nil } // FindConfigPath returns the config path specified or searches for a valid config path. diff --git a/go.mod b/go.mod index ceea33967..b556378f0 100644 --- a/go.mod +++ b/go.mod @@ -45,12 +45,12 @@ require ( github.com/serenize/snaker v0.0.0-20161123064335-543781d2b79b github.com/shurcooL/markdownfmt v0.0.0-20170214213350-10aae0a270ab github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 - github.com/stretchr/testify v1.7.0 + github.com/stretchr/testify v1.8.1 github.com/uber/jaeger-client-go v2.28.0+incompatible github.com/urfave/cli/v2 v2.3.0 github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c github.com/zeebo/mwc v0.0.4 - go.etcd.io/bbolt v1.3.5 + go.etcd.io/bbolt v1.3.7 go.uber.org/zap v1.16.0 golang.org/x/crypto v0.0.0-20220214200702-86341886e292 golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f @@ -230,7 +230,7 @@ require ( golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 // indirect golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect + golang.org/x/sys v0.4.0 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect diff --git a/go.sum b/go.sum index 1d139fe8a..420b63d8d 100644 --- a/go.sum +++ b/go.sum @@ -1256,8 +1256,10 @@ github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3 github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= @@ -1265,8 +1267,11 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tcnksm/go-input v0.0.0-20180404061846-548a7d7a8ee8/go.mod h1:IlWNj9v/13q7xFbaK4mbyzMNwrZLaWSHx/aibKIZuIg= @@ -1343,8 +1348,9 @@ github.com/zeebo/mwc v0.0.4/go.mod h1:qNHfgp/ZCpQNcJHwKcO5EP3VgaBrW6DPohsK4Qfyxx github.com/zeebo/xxh3 v0.13.0/go.mod h1:AQY73TOrhF3jNsdiM9zZOb8MThrYbZONHj7ryDBaLpg= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= @@ -1674,8 +1680,9 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211117180635-dee7805ff2e1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= diff --git a/integrations/batcher_test.go b/integrations/batcher_test.go index a1cf93c36..747b4fb76 100644 --- a/integrations/batcher_test.go +++ b/integrations/batcher_test.go @@ -70,7 +70,7 @@ func TestBatch_InvalidQuery(t *testing.T) { tm.TaskStore = taskStore{} tm.DeadmanService = deadman{} tm.Open() - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() testCases := []struct { script string @@ -1630,7 +1630,7 @@ batch .post('` + ts.URL + `') ` clock, et, replayErr, tm := testBatcher(t, "TestBatch_AlertStateChangesOnly", script) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, 40*time.Second) if err != nil { @@ -1710,7 +1710,7 @@ batch .post('` + ts.URL + `') ` clock, et, replayErr, tm := testBatcher(t, "TestBatch_AlertStateChangesOnly", script) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, 40*time.Second) if err != nil { @@ -3740,8 +3740,7 @@ batch c := make(chan bool, 1) go func() { clock, et, replayErr, tm := testBatcher(t, "TestBatch_AlertPostTimeout", script) - defer tm.Close() - + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, 40*time.Second) if err != nil { t.Error(err) @@ -3765,7 +3764,7 @@ func testBatcher(t *testing.T, name, script string) (clock.Setter, *kapacitor.Ex } // Create a new execution env - tm, err := createTaskMaster("testBatcher") + tm, _, err := createTaskMaster(t, "testBatcher", false) if err != nil { t.Fatal(err) } @@ -3817,7 +3816,7 @@ func testBatcherWithOutput( ignoreOrder bool, ) { clock, et, replayErr, tm := testBatcher(t, name, script) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, duration) if err != nil { @@ -3850,3 +3849,13 @@ func testBatcherWithOutput( } } } + +func checkDeferredErrors(t *testing.T, cleanup func() error) func() { + t.Helper() + return func() { + t.Helper() + if err := cleanup(); err != nil { + t.Error(err) + } + } +} diff --git a/integrations/benchmark_test.go b/integrations/benchmark_test.go index 3e1e302ff..94cf5dcd8 100644 --- a/integrations/benchmark_test.go +++ b/integrations/benchmark_test.go @@ -187,7 +187,7 @@ func Bench(b *testing.B, tasksCount, pointCount, expectedProcessedCount int, tic for i := 0; i < b.N; i++ { // Do not time setup b.StopTimer() - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(b, "testStreamer", false) if err != nil { b.Fatal(err) } diff --git a/integrations/streamer_test.go b/integrations/streamer_test.go index 4d908654e..426101cd1 100644 --- a/integrations/streamer_test.go +++ b/integrations/streamer_test.go @@ -17,6 +17,7 @@ import ( "path" "path/filepath" "reflect" + "strings" "sync/atomic" "testing" "text/template" @@ -104,7 +105,9 @@ func init() { out = os.Stderr } diagService = diagnostic.NewService(diagnostic.NewConfig(), out, out) - diagService.Open() + if err := diagService.Open(); err != nil { + panic(err) + } } type testCtxStr string @@ -1620,7 +1623,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle_No_Data", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle_No_Data", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -1729,7 +1733,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -1837,7 +1842,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle_No_Idle", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle_No_Idle", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -1944,7 +1950,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Idle", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -2030,7 +2037,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period_No_Data", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period_No_Data", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -2140,7 +2148,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -2248,7 +2257,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period_No_Idle", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period_No_Idle", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -2355,7 +2365,8 @@ stream ` dataChannel := make(chan edge.PointMessage) - cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period", script, dataChannel, clock, nil) + tm, _, cleanupTest := testStreamerWithInputChannel(t, "TestStream_Barrier_Period", script, dataChannel, clock, nil, nil, false) + defer checkDeferredErrors(t, tm.Close)() defer func() { cleanupTest() @@ -11975,7 +11986,7 @@ stream ` // Create a new execution env - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(t, "testStreamer", false) if err != nil { t.Fatal(err) } @@ -12031,12 +12042,12 @@ stream }, } // Create a new execution env - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(t, "testStreamer", false) if err != nil { t.Fatal(err) } tm.Open() - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() // Create the loopback task taskLoop, err := tm.NewTask("KapacitorLoopback-Loop", scriptLoop, kapacitor.StreamTask, dbrps, 0, nil) @@ -12163,12 +12174,12 @@ stream }, } // Create a new execution env - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(t, "testStreamer", false) if err != nil { t.Fatal(err) } tm.Open() - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() // Create the loopback task taskLoop, err := tm.NewTask("KapacitorLoopback-Loop", scriptLoop, kapacitor.StreamTask, dbrps, 0, nil) @@ -12590,7 +12601,7 @@ stream name := "TestStream_InfluxDBOut" // Create a new execution env - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(t, "testStreamer", false) if err != nil { t.Fatal(err) } @@ -12610,7 +12621,7 @@ stream } t.Log(string(et.Task.Dot())) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() // Wait till we received a request if e := <-done; e != nil { @@ -12650,7 +12661,7 @@ stream name := "TestStream_InfluxDBOut" // Create a new execution env - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(t, "testStreamer", false) if err != nil { t.Fatal(err) } @@ -12670,7 +12681,7 @@ stream } t.Log(string(et.Task.Dot())) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() // Wait till we received a request if e := <-done; e != nil { @@ -13525,7 +13536,7 @@ func testStreamerCardinality( tmInit func(tm *kapacitor.TaskMaster), ) { clock, et, replayErr, tm := testStreamer(t, name, script, tmInit) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, 20*time.Second) if err != nil { @@ -13681,10 +13692,114 @@ data testStreamerWithOutput(t, "TestStream_StateTracking", script, 4*time.Second, er, false, nil) } +func TestStream_AlertReset(t *testing.T) { + requestCount := int32(0) + + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + result := models.Result{} + dec := json.NewDecoder(r.Body) + err := dec.Decode(&result) + if err != nil { + t.Fatal(err) + } + atomic.AddInt32(&requestCount, 1) + })) + defer ts.Close() + + var script = ` +var critThreshold = 80.0 + +var critResetThreshold = 70.0 + +stream + |from() + .measurement('cpu') + .groupBy('host') + |alert() + .id('kapacitor/{{ .Name }}/{{ index .Tags "host" }}') + .details('details') + .idField('id') + .idTag('id') + .levelField('level') + .levelTag('level') + .crit(lambda: "value" > critThreshold) + .critReset(lambda: "value" < critResetThreshold) + .topic('cpu') + |httpPost('` + ts.URL + `') +` + const count = 5 + const alertName = "TestStream_Alert" + + // Create a clock with a Zero point twice the number of + clck := clock.New(time.Now().UTC().Add(time.Duration(count*-2) * time.Second)) + clck.Set(time.Now().UTC()) + + dataChannel, fillFunc := makeAlertResetTestChannel(clck, count, 85.0, 100.0) + go fillFunc() + tm, store, cleanup := testStreamerWithInputChannel(t, alertName, script, dataChannel, clck, nil, nil, true) + defer checkDeferredErrors(t, tm.Close)() + cleanup() + const alertID = "kapacitor/cpu/serverA" + keys1, alertExists1, err := store.BucketEntries("cpu", alertID) + if err != nil { + t.Fatal(err) + } + if len(keys1) != 2 { + t.Fatalf("wrong number of keys, expected 2, got %d: %s", len(keys1), strings.Join(keys1, ", ")) + } else if !alertExists1 { + t.Fatalf("missing alert history for %q", alertID) + } + + dataChannel, fillFunc = makeAlertResetTestChannel(clck, 1, 23.0, 36.0) + go fillFunc() + _, _, cleanup = testStreamerWithInputChannel(t, alertName, script, dataChannel, clck, tm, nil, true) + cleanup() + + keys2, alertExists2, err := store.BucketEntries("cpu", alertID) + if err != nil { + t.Fatal(err) + } + if len(keys2) != 1 { + t.Fatalf("wrong number of keys, expected 1, got %d: %s", len(keys2), strings.Join(keys2, ", ")) + } else if alertExists2 { + t.Fatalf("alert history for %q not deleted", alertID) + } + if rc := atomic.LoadInt32(&requestCount); rc != count+1 { + t.Errorf("got %v exp %v", rc, count+1) + } +} + +func makeAlertResetTestChannel(c clock.Clock, n int, low float64, high float64) (<-chan edge.PointMessage, func()) { + dataChannel := make(chan edge.PointMessage) + + f := func() { + for i := 0; i < n; i++ { + var host string + if i%2 == 0 { + host = "serverA" + } else { + host = "serverB" + } + dataChannel <- edge.NewPointMessage( + "cpu", + "dbname", + "rpname", + models.Dimensions{}, + models.Fields{"value": rand.Float64()*(high-low) + low}, + models.Tags{"host": host, "type": "idle"}, + c.Zero().Add(time.Duration(i)*time.Second), + ) + } + time.Sleep(5 * time.Second) + close(dataChannel) + } + return dataChannel, f +} + // Helper test function for streamer func testStreamer( t *testing.T, - name, + name string, script string, tmInit func(tm *kapacitor.TaskMaster), ) ( @@ -13701,15 +13816,16 @@ func testStreamer( } // Create a new execution env - tm, err := createTaskMaster("testStreamer") + tm, _, err := createTaskMaster(t, "testStreamer", false) if err != nil { t.Fatal(err) } if tmInit != nil { tmInit(tm) } - tm.Open() - + if err = tm.Open(); err != nil { + t.Fatal(err) + } //Create the task task, err := tm.NewTask(name, script, kapacitor.StreamTask, dbrps, 0, nil) if err != nil { @@ -13773,24 +13889,35 @@ func testStreamerWithInputChannel( script string, points <-chan edge.PointMessage, clck clock.Clock, + tm *kapacitor.TaskMaster, tmInit func(tm *kapacitor.TaskMaster), -) (cleanup func()) { + persistTopic bool, +) ( + taskMaster *kapacitor.TaskMaster, + store *storagetest.TestStore, + cleanup func(), +) { if testing.Verbose() { wlog.SetLevel(wlog.DEBUG) } else { wlog.SetLevel(wlog.OFF) } - // Create a new execution env - tm, err := createTaskMaster("testStreamer") - if err != nil { - t.Fatal(err) - } - if tmInit != nil { - tmInit(tm) - } - tm.Open() + var err error + if tm == nil { + // Create a new execution env + tm, store, err = createTaskMaster(t, "testStreamer", persistTopic) + if err != nil { + t.Fatal(err) + } + if tmInit != nil { + tmInit(tm) + } + if err = tm.Open(); err != nil { + t.Fatal(err) + } + } //Create the task task, err := tm.NewTask(name, script, kapacitor.StreamTask, dbrps, 0, nil) if err != nil { @@ -13822,10 +13949,9 @@ func testStreamerWithInputChannel( if err := et.Wait(); err != nil { t.Error(err) } - t.Log(string(et.Task.Dot())) } - return + return tm, store, cleanup } func testStreamerNoOutput( @@ -13837,7 +13963,7 @@ func testStreamerNoOutput( ) { t.Helper() clock, et, replayErr, tm := testStreamer(t, name, script, tmInit) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, duration) if err != nil { t.Error(err) @@ -13855,7 +13981,7 @@ func testStreamerWithOutput( ) { t.Helper() clock, et, replayErr, tm := testStreamer(t, name, script, tmInit) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() err := fastForwardTask(clock, et, replayErr, tm, duration) if err != nil { @@ -13911,7 +14037,7 @@ func testStreamerWithSteppedOutput( ) { t.Skip("Test is not deterministic, need a mechanisim to safely step task execution.") clock, et, replayErr, tm := testStreamer(t, name, script, tmInit) - defer tm.Close() + defer checkDeferredErrors(t, tm.Close)() for s, step := range steps { // Move time forward @@ -14018,7 +14144,7 @@ func compareListIgnoreOrder(got, exp []interface{}, cmpF func(got, exp interface return nil } -func createTaskMaster(name string) (*kapacitor.TaskMaster, error) { +func createTaskMaster(t storagetest.CleanedTest, name string, persistTopics bool) (*kapacitor.TaskMaster, *storagetest.TestStore, error) { d := diagService.NewKapacitorHandler() tm := kapacitor.NewTaskMaster(name, newServerInfo(), d) httpdService := newHTTPDService() @@ -14027,11 +14153,14 @@ func createTaskMaster(name string) (*kapacitor.TaskMaster, error) { tm.DeadmanService = deadman{} tm.HTTPPostService, _ = httppost.NewService(nil, diagService.NewHTTPPostHandler()) as := alertservice.NewService(diagService.NewAlertServiceHandler(), nil, 0) - as.StorageService = storagetest.New() + as.PersistTopics = persistTopics + store := storagetest.New(t, diagService.NewStorageHandler()) + tm.TestCloser = store + as.StorageService = store as.HTTPDService = httpdService if err := as.Open(); err != nil { - return nil, err + return nil, nil, err } tm.AlertService = as - return tm, nil + return tm, store, nil } diff --git a/server/migrate_topic_store_test.go b/server/migrate_topic_store_test.go new file mode 100644 index 000000000..591674280 --- /dev/null +++ b/server/migrate_topic_store_test.go @@ -0,0 +1,274 @@ +package server_test + +import ( + "bytes" + "crypto/sha256" + "errors" + "fmt" + "io" + "os" + "testing" + + "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/alert/alerttest" + "github.com/influxdata/kapacitor/services/storage" +) + +type testData struct { + name string + topicEventStatesMap map[string]map[string]alert.EventState +} + +var tests []testData = []testData{ + { + name: "one topic", + topicEventStatesMap: map[string]map[string]alert.EventState{ + "t1": alerttest.MakeEventStates(alerttest.EventStateSpec{N: 300, Mwc: 6, Dwc: 20}), + }, + }, + { + name: "three topics", + topicEventStatesMap: map[string]map[string]alert.EventState{ + "t1": alerttest.MakeEventStates(alerttest.EventStateSpec{N: 100, Mwc: 5, Dwc: 15}), + "t2": alerttest.MakeEventStates(alerttest.EventStateSpec{N: 130, Mwc: 6, Dwc: 12}), + "t3": alerttest.MakeEventStates(alerttest.EventStateSpec{N: 50, Mwc: 6, Dwc: 17}), + }, + }, + { + name: "two topics", + topicEventStatesMap: map[string]map[string]alert.EventState{ + "t1": alerttest.MakeEventStates(alerttest.EventStateSpec{N: 100, Mwc: 5, Dwc: 15}), + "t2": alerttest.MakeEventStates(alerttest.EventStateSpec{N: 130, Mwc: 6, Dwc: 12}), + }, + }, +} + +func Test_Migrate_TopicStore(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create default config + c := NewConfig(t) + // Force this for the test. + c.Alert.PersistTopics = true + s := OpenServer(c) + Client(s) + defer s.Close() + + // Create V1 topic store. + TopicStatesDAO, err := alert.NewTopicStateKV(s.AlertService.StorageService.Store(alert.AlertNameSpace)) + if err != nil { + t.Fatalf("cannot create version one topic store: %v", err) + } + // Put the test data in the V1 topic store + for topic, es := range tt.topicEventStatesMap { + if err = TopicStatesDAO.Put(alert.TopicState{Topic: topic, EventStates: es}); err != nil { + t.Fatalf("cannot save version one topic store test data for topic %q: %v", topic, err) + } + } + err = alert.DeleteV2TopicStore(s.AlertService.StorageService.Store(alert.TopicStatesNameSpace)) + if err != nil { + t.Fatalf("cannot delete version two topic store: %v", err) + } + err = s.StorageService.Versions().Set(alert.TopicStoreVersionKey, "") + if err != nil { + t.Fatalf("cannot reset version in topic store: %v", err) + } + // Convert the V1 topic Store to a V2 topic store + err = s.AlertService.MigrateTopicStoreV1V2() + if err != nil { + t.Fatalf("failure migrating topic store from version one to version two: %v", err) + } + + // Check that the topic store version was updated + version, err := s.StorageService.Versions().Get(alert.TopicStoreVersionKey) + if err != nil { + t.Fatalf("cannot retrieve version from topic store: %v", err) + } + if version != alert.TopicStoreVersion2 { + t.Fatalf("topic store version: expected: %q, got: %q", alert.TopicStoreVersion2, version) + } + + count := 0 + err = alert.WalkTopicBuckets(s.AlertService.StorageService.Store(alert.TopicStatesNameSpace), func(tx storage.ReadOnlyTx, topic string) error { + esStoredV2, err := alert.LoadTopicBucket(tx, []byte(topic)) + if err != nil { + return err + } + count++ + if esOriginal, ok := tt.topicEventStatesMap[topic]; !ok { + return fmt.Errorf("topic %q not found in version two store: %w", topic, alert.ErrNoTopicStateExists) + } else if ok, msg := eventStateMapCompare(esOriginal, esStoredV2); !ok { + return fmt.Errorf("event states for topic %q differ between original and V2 storage: %s", topic, msg) + } + return nil + }) + if err != nil { + t.Fatalf("migration V1 to V2 error: %v", err) + } else if count != len(tt.topicEventStatesMap) { + t.Fatalf("wrong number of store topics. Expected %d, got %d", len(tt.topicEventStatesMap), count) + } + err = alert.MigrateTopicStoreV2V1(s.StorageService) + + if err != nil { + t.Fatalf("migration V2 to V1 error: %v", err) + } + // Load all the saved topic states (plus one in case of error or duplicates in saving). + topicStates, err := TopicStatesDAO.List("", 0, len(tt.topicEventStatesMap)+1) + if err != nil { + t.Fatalf("failed to load saved topic states: %v", err) + } + count = 0 + for _, ts := range topicStates { + if esOriginal, ok := tt.topicEventStatesMap[ts.Topic]; !ok { + t.Fatalf("topic %q not found in version one store: %v", ts.Topic, alert.ErrNoTopicStateExists) + } else if ok, msg := eventStateMapCompare(esOriginal, ts.EventStates); !ok { + t.Fatalf("event states for topic %q differ between V2 storage and original: %s", ts.Topic, msg) + } else { + count++ + } + } + if count != len(tt.topicEventStatesMap) { + t.Fatalf("wrong number of store topics. Expected %d, got %d", len(tt.topicEventStatesMap), count) + } + }) + } +} + +var errVersionSetFail error = errors.New("version set failure") + +type storageTestService struct { + alert.StorageService + versions storageTestVersions +} + +func (s *storageTestService) Versions() storage.Versions { + return &s.versions +} + +type storageTestVersions struct { + storage.Versions + setCount, setFailOn int +} + +func (sv *storageTestVersions) Set(id string, version string) error { + if sv.setFailOn == sv.setCount { + return fmt.Errorf("on call %d: %w", sv.setFailOn+1, errVersionSetFail) + } else { + sv.setCount++ + return sv.Versions.Set(id, version) + } +} + +func newStorageTestService(sService alert.StorageService, setFailCount int) *storageTestService { + return &storageTestService{ + StorageService: sService, + versions: storageTestVersions{ + Versions: sService.Versions(), + setCount: 0, + setFailOn: setFailCount, + }, + } +} + +func Test_MigrateTopicStoreFail(t *testing.T) { + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // Create default config + c := NewConfig(t) + // Force this for the test. + c.Alert.PersistTopics = true + s := OpenServer(c) + Client(s) + defer s.Close() + + // Wrap the StorageService in a testing decorator + s.AlertService.StorageService = newStorageTestService(s.AlertService.StorageService, 0) + // Create V1 topic store. + TopicStatesDAO, err := alert.NewTopicStateKV(s.AlertService.StorageService.Store(alert.AlertNameSpace)) + if err != nil { + t.Fatalf("cannot create version one topic store: %v", err) + } + // Put the test data in the V1 topic store + for topic, es := range tt.topicEventStatesMap { + if err = TopicStatesDAO.Put(alert.TopicState{Topic: topic, EventStates: es}); err != nil { + t.Fatalf("cannot save version one topic store test data for topic %q: %v", topic, err) + } + } + err = alert.DeleteV2TopicStore(s.AlertService.StorageService.Store(alert.TopicStatesNameSpace)) + if err != nil { + t.Fatalf("cannot delete version two topic store: %v", err) + } + err = s.StorageService.Versions().Set(alert.TopicStoreVersionKey, "") + if err != nil { + t.Fatalf("cannot reset version in topic store: %v", err) + } + + hash, err := HashFileSHA256(s.AlertService.StorageService.Path()) + if err != nil { + t.Fatal(err) + } + // Convert the V1 topic Store to a V2 topic store and fail. + err = s.AlertService.MigrateTopicStoreV1V2() + if err == nil || !errors.Is(err, errVersionSetFail) { + t.Fatalf("wrong or missing error. expected %v, got %v", errVersionSetFail, err) + } + + backup := s.AlertService.StorageService.Path() + alert.TopicStoreBackupSuffix + _, err = os.Stat(backup) + if err == nil || !errors.Is(err, os.ErrNotExist) { + t.Fatalf("backup file %q should be deleted. expected %v, got %v", backup, os.ErrNotExist, err) + } + newHash, err := HashFileSHA256(s.AlertService.StorageService.Path()) + if err != nil { + t.Fatal(err) + } else if !bytes.Equal(hash, newHash) { + t.Fatalf("restored BoltDB not the same as original: %q", s.AlertService.StorageService.Path()) + } + }) + } +} + +func HashFileSHA256(name string) ([]byte, error) { + f, err := os.Open(name) + if err != nil { + return nil, fmt.Errorf("cannot open %q for hasing: %w", name, err) + } + hash := sha256.New() + if _, err := io.Copy(hash, f); err != nil { + return nil, fmt.Errorf("failed to compute hash for %q: %w", name, err) + } + return hash.Sum(nil), nil +} + +func eventStateMapCompare(em1, em2 map[string]alert.EventState) (bool, string) { + for id, es1 := range em1 { + if es2, ok := em2[id]; !ok { + return false, fmt.Sprintf("second map missing id: %q", id) + } else if match, msg := eventStateCompare(&es1, &es2); !match { + return match, msg + } + } + for id := range em2 { + if _, ok := em1[id]; !ok { + return false, fmt.Sprintf("first map missing id: %q", id) + } + } + return true, "" +} + +func eventStateCompare(es1, es2 *alert.EventState) (bool, string) { + if es1.Level != es2.Level { + return false, fmt.Sprintf("EventState.Level differs: %v != %v", es1.Level, es2.Level) + } else if es1.Message != es2.Message { + return false, fmt.Sprintf("EventState.Message differs: %q != %q", es1.Message, es2.Message) + } else if fmt.Sprintf("%v", es1.Time) != fmt.Sprintf("%v", es2.Time) { + // This is a hack to avoid JSON loss of precision causing test failures + return false, fmt.Sprintf("EventState.Time differs: %v != %v", es1.Time, es2.Time) + } else if es1.Duration != es2.Duration { + return false, fmt.Sprintf("EventState.Duration differs: %v != %v", es1.Duration, es2.Duration) + } else if es1.Details != es2.Details { + return false, fmt.Sprintf("EventState.Details differ: %q != %q", es1.Details, es2.Details) + } else { + return true, "" + } +} diff --git a/server/server.go b/server/server.go index d7923dc14..9a3df6ea6 100644 --- a/server/server.go +++ b/server/server.go @@ -1135,7 +1135,6 @@ func (s *Server) startServices() error { return fmt.Errorf("open service %T: %s", service, err) } s.Diag.Debug("opened service", keyvalue.KV("service", fmt.Sprintf("%T", service))) - // Apply config overrides after the config override service has been opened and before any dynamic services. if service == s.ConfigOverrideService && !s.config.SkipConfigOverrides && s.config.ConfigOverride.Enabled { // Apply initial config updates diff --git a/server/server_test.go b/server/server_test.go index 6b24981b8..82ef5caec 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -268,6 +268,7 @@ func TestServer_Pprof_Index(t *testing.T) { }) } } + func TestServer_Authenticate_Fail(t *testing.T) { conf := NewConfig(t) conf.HTTP.AuthEnabled = true @@ -480,6 +481,7 @@ func TestServer_CreateUser(t *testing.T) { t.Fatalf("unexpected permissions got %s exp %s", user.Permissions, permissions) } } + func TestServer_CreateTask(t *testing.T) { s, cli := OpenDefaultServer(t) defer s.Close() @@ -2857,7 +2859,6 @@ test value=1 0000000011 func TestServer_UpdateTaskID(t *testing.T) { s, cli := OpenDefaultServer(t) defer s.Close() - id := "testTaskID" ttype := client.StreamTask dbrps := []client.DBRP{ @@ -9990,70 +9991,70 @@ func TestServer_AlertHandlers_CRUD(t *testing.T) { }, } for _, tc := range testCases { - // Create default config - c := NewConfig(t) - s := OpenServer(c) - cli := Client(s) - defer s.Close() - - h, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tc.topic), tc.create) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(h, tc.expCreate) { - t.Errorf("unexpected handler created:\ngot\n%#v\nexp\n%#v\n", h, tc.expCreate) - } + t.Run(tc.topic, func(t *testing.T) { + // Create default config + c := NewConfig(t) + s := OpenServer(c) + cli := Client(s) + defer s.Close() + h, err := cli.CreateTopicHandler(cli.TopicHandlersLink(tc.topic), tc.create) + if err != nil { + t.Fatal(err) + } - h, err = cli.PatchTopicHandler(h.Link, tc.patch) - if err != nil { - t.Fatal(err) - } + if !reflect.DeepEqual(h, tc.expCreate) { + t.Errorf("unexpected handler created:\ngot\n%#v\nexp\n%#v\n", h, tc.expCreate) + } - if !reflect.DeepEqual(h, tc.expPatch) { - t.Errorf("unexpected handler patched:\ngot\n%#v\nexp\n%#v\n", h, tc.expPatch) - } + h, err = cli.PatchTopicHandler(h.Link, tc.patch) + if err != nil { + t.Fatal(err) + } - h, err = cli.ReplaceTopicHandler(h.Link, tc.put) - if err != nil { - t.Fatal(err) - } + if !reflect.DeepEqual(h, tc.expPatch) { + t.Errorf("unexpected handler patched:\ngot\n%#v\nexp\n%#v\n", h, tc.expPatch) + } - if !reflect.DeepEqual(h, tc.expPut) { - t.Errorf("unexpected handler put:\ngot\n%#v\nexp\n%#v\n", h, tc.expPut) - } + h, err = cli.ReplaceTopicHandler(h.Link, tc.put) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(h, tc.expPut) { + t.Errorf("unexpected handler put:\ngot\n%#v\nexp\n%#v\n", h, tc.expPut) + } - // Restart server - s.Restart() + // Restart server + s.Restart() - rh, err := cli.TopicHandler(h.Link) - if err != nil { - t.Fatalf("could not find handler after restart: %v", err) - } - if got, exp := rh, h; !reflect.DeepEqual(got, exp) { - t.Errorf("unexpected handler after restart:\ngot\n%#v\nexp\n%#v\n", got, exp) - } - - err = cli.DeleteTopicHandler(h.Link) - if err != nil { - t.Fatal(err) - } + rh, err := cli.TopicHandler(h.Link) + if err != nil { + t.Fatalf("could not find handler after restart: %v", err) + } + if got, exp := rh, h; !reflect.DeepEqual(got, exp) { + t.Errorf("unexpected handler after restart:\ngot\n%#v\nexp\n%#v\n", got, exp) + } - _, err = cli.TopicHandler(h.Link) - if err == nil { - t.Errorf("expected handler to be deleted") - } + err = cli.DeleteTopicHandler(h.Link) + if err != nil { + t.Fatal(err) + } - handlers, err := cli.ListTopicHandlers(cli.TopicHandlersLink(tc.topic), nil) - if err != nil { - t.Fatal(err) - } - for _, h := range handlers.Handlers { - if h.ID == tc.expPut.ID { + _, err = cli.TopicHandler(h.Link) + if err == nil { t.Errorf("expected handler to be deleted") - break } - } + + handlers, err := cli.ListTopicHandlers(cli.TopicHandlersLink(tc.topic), nil) + if err != nil { + t.Fatal(err) + } + for _, h := range handlers.Handlers { + if h.ID == tc.expPut.ID { + t.Errorf("expected handler to be deleted") + break + } + } + }) } } @@ -12001,6 +12002,7 @@ stream v := url.Values{} v.Add("precision", "s") s.MustWrite("mydb", "myrp", point, v) + time.Sleep(15 * time.Second) s.Restart() @@ -12029,9 +12031,8 @@ stream exp := []alert.Data{alertData} got := ts.Data() if !reflect.DeepEqual(exp, got) { - t.Errorf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got) + t.Fatalf("unexpected tcp request:\nexp\n%+v\ngot\n%+v\n", exp, got) } - // Check event on topic l := cli.TopicEventsLink(tcpTopic) expTopicEvents := client.TopicEvents{ @@ -12055,7 +12056,7 @@ stream t.Fatal(err) } if !reflect.DeepEqual(te, expTopicEvents) { - t.Errorf("unexpected topic events for publish topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents) + t.Fatalf("unexpected topic events for publish topic:\ngot\n%+v\nexp\n%+v\n", te, expTopicEvents) } } @@ -12157,19 +12158,9 @@ alert,host=serverB value=0 0000000004 // Topic should have must recent event l := cli.TopicEventsLink(topic) expTopicEvents := client.TopicEvents{ - Link: l, - Topic: topic, - Events: []client.TopicEvent{{ - Link: client.Link{Relation: client.Self, Href: fmt.Sprintf("/kapacitor/v1/alerts/topics/%s/events/id", topic)}, - ID: "id", - State: client.EventState{ - Message: "message", - Details: "details", - Time: time.Date(1970, 1, 1, 0, 0, 4, 0, time.UTC), - Duration: client.Duration(time.Second), - Level: "OK", - }, - }}, + Link: l, + Topic: topic, + Events: []client.TopicEvent{ /* Level OK alerts are not stored any more with the V2 topic store */ }, } te, err := cli.ListTopicEvents(l, nil) diff --git a/services/alert/alerttest/alerttest.go b/services/alert/alerttest/alerttest.go index ff8e0f973..462643a21 100644 --- a/services/alert/alerttest/alerttest.go +++ b/services/alert/alerttest/alerttest.go @@ -2,15 +2,21 @@ package alerttest import ( "encoding/json" + "fmt" + "math/rand" "net" "net/http" "net/http/httptest" "os" + "strings" "sync" + "time" + "github.com/influxdata/influxdb/pkg/errors" "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/command" "github.com/influxdata/kapacitor/command/commandtest" + salert "github.com/influxdata/kapacitor/services/alert" ) type Log struct { @@ -23,12 +29,12 @@ func NewLog(p string) *Log { } } -func (l *Log) Data() ([]alert.Data, error) { +func (l *Log) Data() (d []alert.Data, rErr error) { f, err := os.Open(l.path) if err != nil { return nil, err } - defer f.Close() + defer errors.Capture(&rErr, f.Close)() dec := json.NewDecoder(f) var data []alert.Data for dec.More() { @@ -158,3 +164,124 @@ func (s *PostServer) Close() { s.closed = true s.ts.Close() } + +// helpers + +type EventStateSpec struct { + N int + Mwc int // message word count + Dwc int // details word count +} + +func makeSentence(seededRand *rand.Rand, n int) string { + s := make([]string, n) + for i := 0; i < n; i++ { + s[i] = words[seededRand.Int31n(int32(len(words)))] + } + return strings.Join(s, " ") +} + +func MakeEventStates(s EventStateSpec) map[string]salert.EventState { + // Force rand sequence to be deterministic. + seededRand := rand.New(rand.NewSource(int64(s.N))) + + es := make(map[string]salert.EventState, s.N) + for i := 0; i < s.N; i++ { + es[fmt.Sprintf("event_state_id_%d", i)] = salert.EventState{ + Message: makeSentence(seededRand, s.Mwc), + Details: makeSentence(seededRand, s.Dwc), + Time: time.Unix(0, int64(i*1e9)), + Duration: time.Duration((i * int(time.Millisecond)) % 10 * int(time.Second)), + Level: alert.Level(i % 0x3), // assumes levels 0-3 + } + } + return es +} + +var words = [...]string{ + "lorem", "ipsum", "dolor", "sit", "amet", "consectetuer", "adipiscing", "elit", "integer", "in", "mi", "a", "mauris", + "ornare", "sagittis", "suspendisse", "potenti", "suspendisse", "dapibus", "dignissim", "dolor", "nam", + "sapien", "tellus", "tempus", "et", "tempus", "ac", "tincidunt", "in", "arcu", "duis", "dictum", "proin", "magna", + "nulla", "pellentesque", "non", "commodo", "et", "iaculis", "sit", "amet", "mi", "mauris", "condimentum", "massa", + "ut", "metus", "donec", "viverra", "sapien", "mattis", "rutrum", "tristique", "lacus", "eros", "semper", "tellus", + "et", "molestie", "nisi", "sapien", "eu", "massa", "vestibulum", "ante", "ipsum", "primis", "in", "faucibus", "orci", + "luctus", "et", "ultrices", "posuere", "cubilia", "curae", "fusce", "erat", "tortor", "mollis", "ut", "accumsan", + "ut", "lacinia", "gravida", "libero", "curabitur", "massa", "felis", "accumsan", "feugiat", "convallis", "sit", + "amet", "porta", "vel", "neque", "duis", "et", "ligula", "non", "elit", "ultricies", "rutrum", "suspendisse", + "tempor", "quisque", "posuere", "malesuada", "velit", "sed", "pellentesque", "mi", "a", "purus", "integer", + "imperdiet", "orci", "a", "eleifend", "mollis", "velit", "nulla", "iaculis", "arcu", "eu", "rutrum", "magna", "quam", + "sed", "elit", "nullam", "egestas", "integer", "interdum", "purus", "nec", "mauris", "vestibulum", "ac", "mi", "in", + "nunc", "suscipit", "dapibus", "duis", "consectetuer", "ipsum", "et", "pharetra", "sollicitudin", "metus", + "turpis", "facilisis", "magna", "vitae", "dictum", "ligula", "nulla", "nec", "mi", "nunc", "ante", "urna", "gravida", + "sit", "amet", "congue", "et", "accumsan", "vitae", "magna", "praesent", "luctus", "nullam", "in", "velit", + "praesent", "est", "curabitur", "turpis", "class", "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", + "per", "conubia", "nostra", "per", "inceptos", "hymenaeos", "cras", "consectetuer", "nibh", "in", "lacinia", + "ornare", "turpis", "sem", "tempor", "massa", "sagittis", "feugiat", "mauris", "nibh", "non", "tellus", + "phasellus", "mi", "fusce", "enim", "mauris", "ultrices", "turpis", "eu", "adipiscing", "viverra", "justo", + "libero", "ullamcorper", "massa", "id", "ultrices", "velit", "est", "quis", "tortor", "quisque", "condimentum", + "lacus", "volutpat", "nonummy", "accumsan", "est", "nunc", "imperdiet", "magna", "vulputate", "aliquet", "nisi", + "risus", "at", "est", "aliquam", "imperdiet", "gravida", "tortor", "praesent", "interdum", "accumsan", "ante", + "vivamus", "est", "ligula", "consequat", "sed", "pulvinar", "eu", "consequat", "vitae", "eros", "nulla", "elit", + "nunc", "congue", "eget", "scelerisque", "a", "tempor", "ac", "nisi", "morbi", "facilisis", "pellentesque", + "habitant", "morbi", "tristique", "senectus", "et", "netus", "et", "malesuada", "fames", "ac", "turpis", "egestas", + "in", "hac", "habitasse", "platea", "dictumst", "suspendisse", "vel", "lorem", "ut", "ligula", "tempor", + "consequat", "quisque", "consectetuer", "nisl", "eget", "elit", "proin", "quis", "mauris", "ac", "orci", + "accumsan", "suscipit", "sed", "ipsum", "sed", "vel", "libero", "nec", "elit", "feugiat", "blandit", "vestibulum", + "purus", "nulla", "accumsan", "et", "volutpat", "at", "pellentesque", "vel", "urna", "suspendisse", "nonummy", + "aliquam", "pulvinar", "libero", "donec", "vulputate", "orci", "ornare", "bibendum", "condimentum", "lorem", + "elit", "dignissim", "sapien", "ut", "aliquam", "nibh", "augue", "in", "turpis", "phasellus", "ac", "eros", + "praesent", "luctus", "lorem", "a", "mollis", "lacinia", "leo", "turpis", "commodo", "sem", "in", "lacinia", "mi", + "quam", "et", "quam", "curabitur", "a", "libero", "vel", "tellus", "mattis", "imperdiet", "in", "congue", "neque", "ut", + "scelerisque", "bibendum", "libero", "lacus", "ullamcorper", "sapien", "quis", "aliquet", "massa", "velit", + "vel", "orci", "fusce", "in", "nulla", "quis", "est", "cursus", "gravida", "in", "nibh", "lorem", "ipsum", "dolor", "sit", + "amet", "consectetuer", "adipiscing", "elit", "integer", "fermentum", "pretium", "massa", "morbi", "feugiat", + "iaculis", "nunc", "aenean", "aliquam", "pretium", "orci", "cum", "sociis", "natoque", "penatibus", "et", "magnis", + "dis", "parturient", "montes", "nascetur", "ridiculus", "mus", "vivamus", "quis", "tellus", "vel", "quam", + "varius", "bibendum", "fusce", "est", "metus", "feugiat", "at", "porttitor", "et", "cursus", "quis", "pede", "nam", "ut", + "augue", "nulla", "posuere", "phasellus", "at", "dolor", "a", "enim", "cursus", "vestibulum", "duis", "id", "nisi", + "duis", "semper", "tellus", "ac", "nulla", "vestibulum", "scelerisque", "lobortis", "dolor", "aenean", "a", + "felis", "aliquam", "erat", "volutpat", "donec", "a", "magna", "vitae", "pede", "sagittis", "lacinia", "cras", + "vestibulum", "diam", "ut", "arcu", "mauris", "a", "nunc", "duis", "sollicitudin", "erat", "sit", "amet", "turpis", + "proin", "at", "libero", "eu", "diam", "lobortis", "fermentum", "nunc", "lorem", "turpis", "imperdiet", "id", + "gravida", "eget", "aliquet", "sed", "purus", "ut", "vehicula", "laoreet", "ante", "mauris", "eu", "nunc", "sed", "sit", + "amet", "elit", "nec", "ipsum", "aliquam", "egestas", "donec", "non", "nibh", "cras", "sodales", "pretium", "massa", + "praesent", "hendrerit", "est", "et", "risus", "vivamus", "eget", "pede", "curabitur", "tristique", + "scelerisque", "dui", "nullam", "ullamcorper", "vivamus", "venenatis", "velit", "eget", "enim", "nunc", "eu", + "nunc", "eget", "felis", "malesuada", "fermentum", "quisque", "magna", "mauris", "ligula", "felis", "luctus", "a", + "aliquet", "nec", "vulputate", "eget", "magna", "quisque", "placerat", "diam", "sed", "arcu", "praesent", + "sollicitudin", "aliquam", "non", "sapien", "quisque", "id", "augue", "class", "aptent", "taciti", "sociosqu", + "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", "hymenaeos", "etiam", "lacus", "lectus", + "mollis", "quis", "mattis", "nec", "commodo", "facilisis", "nibh", "sed", "sodales", "sapien", "ac", "ante", "duis", + "eget", "lectus", "in", "nibh", "lacinia", "auctor", "fusce", "interdum", "lectus", "non", "dui", "integer", + "accumsan", "quisque", "quam", "curabitur", "scelerisque", "imperdiet", "nisl", "suspendisse", "potenti", + "nam", "massa", "leo", "iaculis", "sed", "accumsan", "id", "ultrices", "nec", "velit", "suspendisse", "potenti", + "mauris", "bibendum", "turpis", "ac", "viverra", "sollicitudin", "metus", "massa", "interdum", "orci", "non", + "imperdiet", "orci", "ante", "at", "ipsum", "etiam", "eget", "magna", "mauris", "at", "tortor", "eu", "lectus", + "tempor", "tincidunt", "phasellus", "justo", "purus", "pharetra", "ut", "ultricies", "nec", "consequat", "vel", + "nisi", "fusce", "vitae", "velit", "at", "libero", "sollicitudin", "sodales", "aenean", "mi", "libero", "ultrices", + "id", "suscipit", "vitae", "dapibus", "eu", "metus", "aenean", "vestibulum", "nibh", "ac", "massa", "vivamus", + "vestibulum", "libero", "vitae", "purus", "in", "hac", "habitasse", "platea", "dictumst", "curabitur", + "blandit", "nunc", "non", "arcu", "ut", "nec", "nibh", "morbi", "quis", "leo", "vel", "magna", "commodo", "rhoncus", + "donec", "congue", "leo", "eu", "lacus", "pellentesque", "at", "erat", "id", "mi", "consequat", "congue", "praesent", + "a", "nisl", "ut", "diam", "interdum", "molestie", "fusce", "suscipit", "rhoncus", "sem", "donec", "pretium", + "aliquam", "molestie", "vivamus", "et", "justo", "at", "augue", "aliquet", "dapibus", "pellentesque", "felis", + "morbi", "semper", "in", "venenatis", "imperdiet", "neque", "donec", "auctor", "molestie", "augue", "nulla", "id", + "arcu", "sit", "amet", "dui", "lacinia", "convallis", "proin", "tincidunt", "proin", "a", "ante", "nunc", "imperdiet", + "augue", "nullam", "sit", "amet", "arcu", "quisque", "laoreet", "viverra", "felis", "lorem", "ipsum", "dolor", "sit", + "amet", "consectetuer", "adipiscing", "elit", "in", "hac", "habitasse", "platea", "dictumst", "pellentesque", + "habitant", "morbi", "tristique", "senectus", "et", "netus", "et", "malesuada", "fames", "ac", "turpis", "egestas", + "class", "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", + "hymenaeos", "nullam", "nibh", "sapien", "volutpat", "ut", "placerat", "quis", "ornare", "at", "lorem", "class", + "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", + "hymenaeos", "morbi", "dictum", "massa", "id", "libero", "ut", "neque", "phasellus", "tincidunt", "nibh", "ut", + "tincidunt", "lacinia", "lacus", "nulla", "aliquam", "mi", "a", "interdum", "dui", "augue", "non", "pede", "duis", + "nunc", "magna", "vulputate", "a", "porta", "at", "tincidunt", "a", "nulla", "praesent", "facilisis", + "suspendisse", "sodales", "feugiat", "purus", "cras", "et", "justo", "a", "mauris", "mollis", "imperdiet", "morbi", + "erat", "mi", "ultrices", "eget", "aliquam", "elementum", "iaculis", "id", "velit", "in", "scelerisque", "enim", + "sit", "amet", "turpis", "sed", "aliquam", "odio", "nonummy", "ullamcorper", "mollis", "lacus", "nibh", "tempor", + "dolor", "sit", "amet", "varius", "sem", "neque", "ac", "dui", "nunc", "et", "est", "eu", "massa", "eleifend", "mollis", + "mauris", "aliquet", "orci", "quis", "tellus", "ut", "mattis", "praesent", "mollis", "consectetuer", "quam", + "nulla", "nulla", "nunc", "accumsan", "nunc", "sit", "amet", "scelerisque", "porttitor", "nibh", "pede", "lacinia", + "justo", "tristique", "mattis", "purus", "eros", "non", "velit", "aenean", "sagittis", "commodo", "erat", + "aliquam", "id", "lacus", "morbi", "vulputate", "vestibulum", "elit", +} diff --git a/services/alert/api.go b/services/alert/api.go index b08f2469d..b1986d6db 100644 --- a/services/alert/api.go +++ b/services/alert/api.go @@ -298,6 +298,7 @@ func (s *apiServer) handleListEvents(topic string, w http.ResponseWriter, r *htt httpd.HttpError(w, fmt.Sprintf("failed to get topic events: %s", err.Error()), true, http.StatusInternalServerError) return } + res := client.TopicEvents{ Link: s.topicEventsLink(topic, client.Self), Topic: topic, diff --git a/services/alert/dao.go b/services/alert/dao.go index ef3995435..3db31cfc3 100644 --- a/services/alert/dao.go +++ b/services/alert/dao.go @@ -9,9 +9,10 @@ import ( "regexp" "time" + "github.com/mailru/easyjson/jlexer" + "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/services/storage" - "github.com/mailru/easyjson/jlexer" "github.com/pkg/errors" ) @@ -24,7 +25,7 @@ var ( type HandlerSpecDAO interface { // Retrieve a handler Get(topic, id string) (HandlerSpec, error) - GetTx(tx storage.ReadOnlyTx, topic, id string) (HandlerSpec, error) + GetTx(tx storage.ReadOperator, topic, id string) (HandlerSpec, error) // Create a handler. // ErrHandlerSpecExists is returned if a handler already exists with the same ID. @@ -46,7 +47,7 @@ type HandlerSpecDAO interface { // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. // More results may exist while the number of returned items is equal to limit. List(topic, pattern string, offset, limit int) ([]HandlerSpec, error) - ListTx(tx storage.ReadOnlyTx, topic, pattern string, offset, limit int) ([]HandlerSpec, error) + ListTx(tx storage.ReadOperator, topic, pattern string, offset, limit int) ([]HandlerSpec, error) Rebuild() error } @@ -152,7 +153,8 @@ func (kv *handlerSpecKV) error(err error) error { func (kv *handlerSpecKV) Get(topic, id string) (HandlerSpec, error) { return kv.getHelper(kv.store.Get(fullID(topic, id))) } -func (kv *handlerSpecKV) GetTx(tx storage.ReadOnlyTx, topic, id string) (HandlerSpec, error) { + +func (kv *handlerSpecKV) GetTx(tx storage.ReadOperator, topic, id string) (HandlerSpec, error) { return kv.getHelper(kv.store.GetTx(tx, fullID(topic, id))) } @@ -194,7 +196,7 @@ func (kv *handlerSpecKV) List(topic, pattern string, offset, limit int) ([]Handl } return kv.listHelper(kv.store.List(storage.DefaultIDIndex, fullID(topic, pattern), offset, limit)) } -func (kv *handlerSpecKV) ListTx(tx storage.ReadOnlyTx, topic, pattern string, offset, limit int) ([]HandlerSpec, error) { +func (kv *handlerSpecKV) ListTx(tx storage.ReadOperator, topic, pattern string, offset, limit int) ([]HandlerSpec, error) { if pattern == "" { pattern = "*" } @@ -223,27 +225,6 @@ var ( ErrNoTopicStateExists = errors.New("no topic state exists") ) -// Data access object for TopicState data. -type TopicStateDAO interface { - // Retrieve a handler - Get(id string) (TopicState, error) - - // Put a topic state, replaces any existing state. - Put(h TopicState) error - - // Delete a handler. - // It is not an error to delete an non-existent handler. - Delete(id string) error - - // List handlers matching a pattern. - // The pattern is shell/glob matching see https://golang.org/pkg/path/#Match - // Offset and limit are pagination bounds. Offset is inclusive starting at index 0. - // More results may exist while the number of returned items is equal to limit. - List(pattern string, offset, limit int) ([]TopicState, error) - - Rebuild() error -} - const topicStateVersion = 1 //easyjson:json @@ -254,13 +235,32 @@ type TopicState struct { //easyjson:json type EventState struct { - Message string `json:"message"` - Details string `json:"details"` - Time time.Time `json:"time"` - Duration time.Duration `json:"duration"` + Message string `json:"message,omitempty"` + Details string `json:"details,omitempty"` + Time time.Time `json:"time,omitempty"` + Duration time.Duration `json:"duration,omitempty"` Level alert.Level `json:"level"` } +func (e *EventState) Reset() { + e.Message = "" + e.Details = "" + e.Time = time.Time{} + e.Duration = 0 + e.Level = 0 +} + +func (e *EventState) AlertEventState(id string) *alert.EventState { + return &alert.EventState{ + ID: id, + Message: e.Message, + Details: e.Details, + Time: e.Time, + Duration: e.Duration, + Level: e.Level, + } +} + func (t TopicState) ObjectID() string { return t.Topic } @@ -276,13 +276,25 @@ func (t *TopicState) UnmarshalBinary(data []byte) error { }) } +type TopicStateDAO interface { + Get(id string) (TopicState error) + Put(t TopicState) error + Replace(t TopicState) error + Delete(id string) error + List(pattern string, offset, limit int) ([]TopicState, error) + Rebuild() error + DeleteMultiple(keys []string) error +} + // Key/Value store based implementation of the TopicStateDAO type topicStateKV struct { store *storage.IndexedStore } -func newTopicStateKV(store storage.Interface) (*topicStateKV, error) { - c := storage.DefaultIndexedStoreConfig("topics", func() storage.BinaryObject { +const topicStateKVPrefix = "topics" + +func NewTopicStateKV(store storage.Interface) (*topicStateKV, error) { + c := storage.DefaultIndexedStoreConfig(topicStateKVPrefix, func() storage.BinaryObject { return new(TopicState) }) istore, err := storage.NewIndexedStore(store, c) @@ -344,3 +356,21 @@ func (kv *topicStateKV) List(pattern string, offset, limit int) ([]TopicState, e func (kv *topicStateKV) Rebuild() error { return kv.store.Rebuild() } + +func (kv *topicStateKV) DeleteMultiple(keys []string) error { + err := kv.store.Store().Update(func(tx storage.Tx) error { + for _, tk := range keys { + if err := kv.store.DeleteTx(tx, tk); err != nil { + return fmt.Errorf("cannot delete topic %q: %w", tk, err) + } + } + return nil + }) + if err != nil { + return err + } + if err = kv.Rebuild(); err != nil { + return fmt.Errorf("cannot rebuild topic store index: %w", err) + } + return nil +} diff --git a/services/alert/dao_easyjson.go b/services/alert/dao_easyjson.go index 9b13b26f3..32012c3c3 100644 --- a/services/alert/dao_easyjson.go +++ b/services/alert/dao_easyjson.go @@ -168,29 +168,50 @@ func easyjson7be57abeEncodeGithubComInfluxdataKapacitorServicesAlert1(out *jwrit out.RawByte('{') first := true _ = first - { + if in.Message != "" { const prefix string = ",\"message\":" + first = false out.RawString(prefix[1:]) out.String(string(in.Message)) } - { + if in.Details != "" { const prefix string = ",\"details\":" - out.RawString(prefix) + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } out.String(string(in.Details)) } - { + if true { const prefix string = ",\"time\":" - out.RawString(prefix) + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } out.Raw((in.Time).MarshalJSON()) } - { + if in.Duration != 0 { const prefix string = ",\"duration\":" - out.RawString(prefix) + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } out.Int64(int64(in.Duration)) } { const prefix string = ",\"level\":" - out.RawString(prefix) + if first { + first = false + out.RawString(prefix[1:]) + } else { + out.RawString(prefix) + } out.RawText((in.Level).MarshalText()) } out.RawByte('}') diff --git a/services/alert/dao_test.go b/services/alert/dao_test.go index c5ec43528..b16393f83 100644 --- a/services/alert/dao_test.go +++ b/services/alert/dao_test.go @@ -2,13 +2,9 @@ package alert_test import ( "fmt" - "math/rand" - "strings" - "testing" - "time" - - kalert "github.com/influxdata/kapacitor/alert" "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/alert/alerttest" + "testing" ) func BenchmarkTopicState_MarshalBinary(b *testing.B) { @@ -24,17 +20,28 @@ func BenchmarkTopicState_MarshalBinary(b *testing.B) { var ts alert.TopicState ts.Topic = "topics/test/default" - ts.EventStates = makeEventStates(eventStateSpec{n: bm.n, mwc: 5, dwc: 15}) - - data, _ := ts.MarshalBinary() - b.SetBytes(int64(len(data))) - data = nil + ts.EventStates = alerttest.MakeEventStates(alerttest.EventStateSpec{N: bm.n, Mwc: 5, Dwc: 15}) + + totalMarshalBytes := int64(0) + for k := range ts.EventStates { + data, err := ts.EventStates[k].MarshalJSON() + if err != nil { + panic(err) + } + totalMarshalBytes += int64(len(data)) + } + b.SetBytes(totalMarshalBytes) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - ts.MarshalBinary() + for k := range ts.EventStates { + _, err := ts.EventStates[k].MarshalJSON() + if err != nil { + panic(err) + } + } } }) } @@ -53,139 +60,32 @@ func BenchmarkTopicState_UnmarshalBinary(b *testing.B) { var ts alert.TopicState ts.Topic = "topics/test/default" - ts.EventStates = makeEventStates(eventStateSpec{n: bm.n, mwc: 5, dwc: 15}) - - data, _ := ts.MarshalBinary() - ts = alert.TopicState{} + ts.EventStates = alerttest.MakeEventStates(alerttest.EventStateSpec{N: bm.n, Mwc: 5, Dwc: 15}) + + marshaled := make([][]byte, 0, len(ts.EventStates)) + totalMarshalBytes := int64(0) + for k := range ts.EventStates { + data, err := ts.EventStates[k].MarshalJSON() + if err != nil { + panic(err) + } + totalMarshalBytes += int64(len(data)) + marshaled = append(marshaled, data) + } - b.SetBytes(int64(len(data))) + b.SetBytes(totalMarshalBytes) b.ReportAllocs() b.ResetTimer() for i := 0; i < b.N; i++ { - var ts alert.TopicState - ts.UnmarshalBinary(data) + for _, d := range marshaled { + e := alert.EventState{} + err := e.UnmarshalJSON(d) + if err != nil { + panic(err) + } + } } }) } } - -// helpers - -type eventStateSpec struct { - n int - mwc int // message word count - dwc int // details word count -} - -func makeEventStates(s eventStateSpec) map[string]alert.EventState { - rand.Seed(int64(s.n)) // force `n` to be deterministic - - es := make(map[string]alert.EventState, s.n) - for i := 0; i < s.n; i++ { - es[fmt.Sprintf("event_state_id_%d", i)] = alert.EventState{ - Message: makeSentence(s.mwc), - Details: makeSentence(s.dwc), - Time: time.Unix(0, int64(i*1e9)), - Duration: time.Duration((i * int(time.Millisecond)) % 10 * int(time.Second)), - Level: kalert.Level(i % 0x3), // assumes levels 0-3 - } - } - return es -} - -func makeSentence(n int) string { - s := make([]string, n) - for i := 0; i < n; i++ { - s[i] = words[rand.Int31n(int32(len(words)))] - } - return strings.Join(s, " ") -} - -var words = [...]string{ - "lorem", "ipsum", "dolor", "sit", "amet", "consectetuer", "adipiscing", "elit", "integer", "in", "mi", "a", "mauris", - "ornare", "sagittis", "suspendisse", "potenti", "suspendisse", "dapibus", "dignissim", "dolor", "nam", - "sapien", "tellus", "tempus", "et", "tempus", "ac", "tincidunt", "in", "arcu", "duis", "dictum", "proin", "magna", - "nulla", "pellentesque", "non", "commodo", "et", "iaculis", "sit", "amet", "mi", "mauris", "condimentum", "massa", - "ut", "metus", "donec", "viverra", "sapien", "mattis", "rutrum", "tristique", "lacus", "eros", "semper", "tellus", - "et", "molestie", "nisi", "sapien", "eu", "massa", "vestibulum", "ante", "ipsum", "primis", "in", "faucibus", "orci", - "luctus", "et", "ultrices", "posuere", "cubilia", "curae", "fusce", "erat", "tortor", "mollis", "ut", "accumsan", - "ut", "lacinia", "gravida", "libero", "curabitur", "massa", "felis", "accumsan", "feugiat", "convallis", "sit", - "amet", "porta", "vel", "neque", "duis", "et", "ligula", "non", "elit", "ultricies", "rutrum", "suspendisse", - "tempor", "quisque", "posuere", "malesuada", "velit", "sed", "pellentesque", "mi", "a", "purus", "integer", - "imperdiet", "orci", "a", "eleifend", "mollis", "velit", "nulla", "iaculis", "arcu", "eu", "rutrum", "magna", "quam", - "sed", "elit", "nullam", "egestas", "integer", "interdum", "purus", "nec", "mauris", "vestibulum", "ac", "mi", "in", - "nunc", "suscipit", "dapibus", "duis", "consectetuer", "ipsum", "et", "pharetra", "sollicitudin", "metus", - "turpis", "facilisis", "magna", "vitae", "dictum", "ligula", "nulla", "nec", "mi", "nunc", "ante", "urna", "gravida", - "sit", "amet", "congue", "et", "accumsan", "vitae", "magna", "praesent", "luctus", "nullam", "in", "velit", - "praesent", "est", "curabitur", "turpis", "class", "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", - "per", "conubia", "nostra", "per", "inceptos", "hymenaeos", "cras", "consectetuer", "nibh", "in", "lacinia", - "ornare", "turpis", "sem", "tempor", "massa", "sagittis", "feugiat", "mauris", "nibh", "non", "tellus", - "phasellus", "mi", "fusce", "enim", "mauris", "ultrices", "turpis", "eu", "adipiscing", "viverra", "justo", - "libero", "ullamcorper", "massa", "id", "ultrices", "velit", "est", "quis", "tortor", "quisque", "condimentum", - "lacus", "volutpat", "nonummy", "accumsan", "est", "nunc", "imperdiet", "magna", "vulputate", "aliquet", "nisi", - "risus", "at", "est", "aliquam", "imperdiet", "gravida", "tortor", "praesent", "interdum", "accumsan", "ante", - "vivamus", "est", "ligula", "consequat", "sed", "pulvinar", "eu", "consequat", "vitae", "eros", "nulla", "elit", - "nunc", "congue", "eget", "scelerisque", "a", "tempor", "ac", "nisi", "morbi", "facilisis", "pellentesque", - "habitant", "morbi", "tristique", "senectus", "et", "netus", "et", "malesuada", "fames", "ac", "turpis", "egestas", - "in", "hac", "habitasse", "platea", "dictumst", "suspendisse", "vel", "lorem", "ut", "ligula", "tempor", - "consequat", "quisque", "consectetuer", "nisl", "eget", "elit", "proin", "quis", "mauris", "ac", "orci", - "accumsan", "suscipit", "sed", "ipsum", "sed", "vel", "libero", "nec", "elit", "feugiat", "blandit", "vestibulum", - "purus", "nulla", "accumsan", "et", "volutpat", "at", "pellentesque", "vel", "urna", "suspendisse", "nonummy", - "aliquam", "pulvinar", "libero", "donec", "vulputate", "orci", "ornare", "bibendum", "condimentum", "lorem", - "elit", "dignissim", "sapien", "ut", "aliquam", "nibh", "augue", "in", "turpis", "phasellus", "ac", "eros", - "praesent", "luctus", "lorem", "a", "mollis", "lacinia", "leo", "turpis", "commodo", "sem", "in", "lacinia", "mi", - "quam", "et", "quam", "curabitur", "a", "libero", "vel", "tellus", "mattis", "imperdiet", "in", "congue", "neque", "ut", - "scelerisque", "bibendum", "libero", "lacus", "ullamcorper", "sapien", "quis", "aliquet", "massa", "velit", - "vel", "orci", "fusce", "in", "nulla", "quis", "est", "cursus", "gravida", "in", "nibh", "lorem", "ipsum", "dolor", "sit", - "amet", "consectetuer", "adipiscing", "elit", "integer", "fermentum", "pretium", "massa", "morbi", "feugiat", - "iaculis", "nunc", "aenean", "aliquam", "pretium", "orci", "cum", "sociis", "natoque", "penatibus", "et", "magnis", - "dis", "parturient", "montes", "nascetur", "ridiculus", "mus", "vivamus", "quis", "tellus", "vel", "quam", - "varius", "bibendum", "fusce", "est", "metus", "feugiat", "at", "porttitor", "et", "cursus", "quis", "pede", "nam", "ut", - "augue", "nulla", "posuere", "phasellus", "at", "dolor", "a", "enim", "cursus", "vestibulum", "duis", "id", "nisi", - "duis", "semper", "tellus", "ac", "nulla", "vestibulum", "scelerisque", "lobortis", "dolor", "aenean", "a", - "felis", "aliquam", "erat", "volutpat", "donec", "a", "magna", "vitae", "pede", "sagittis", "lacinia", "cras", - "vestibulum", "diam", "ut", "arcu", "mauris", "a", "nunc", "duis", "sollicitudin", "erat", "sit", "amet", "turpis", - "proin", "at", "libero", "eu", "diam", "lobortis", "fermentum", "nunc", "lorem", "turpis", "imperdiet", "id", - "gravida", "eget", "aliquet", "sed", "purus", "ut", "vehicula", "laoreet", "ante", "mauris", "eu", "nunc", "sed", "sit", - "amet", "elit", "nec", "ipsum", "aliquam", "egestas", "donec", "non", "nibh", "cras", "sodales", "pretium", "massa", - "praesent", "hendrerit", "est", "et", "risus", "vivamus", "eget", "pede", "curabitur", "tristique", - "scelerisque", "dui", "nullam", "ullamcorper", "vivamus", "venenatis", "velit", "eget", "enim", "nunc", "eu", - "nunc", "eget", "felis", "malesuada", "fermentum", "quisque", "magna", "mauris", "ligula", "felis", "luctus", "a", - "aliquet", "nec", "vulputate", "eget", "magna", "quisque", "placerat", "diam", "sed", "arcu", "praesent", - "sollicitudin", "aliquam", "non", "sapien", "quisque", "id", "augue", "class", "aptent", "taciti", "sociosqu", - "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", "hymenaeos", "etiam", "lacus", "lectus", - "mollis", "quis", "mattis", "nec", "commodo", "facilisis", "nibh", "sed", "sodales", "sapien", "ac", "ante", "duis", - "eget", "lectus", "in", "nibh", "lacinia", "auctor", "fusce", "interdum", "lectus", "non", "dui", "integer", - "accumsan", "quisque", "quam", "curabitur", "scelerisque", "imperdiet", "nisl", "suspendisse", "potenti", - "nam", "massa", "leo", "iaculis", "sed", "accumsan", "id", "ultrices", "nec", "velit", "suspendisse", "potenti", - "mauris", "bibendum", "turpis", "ac", "viverra", "sollicitudin", "metus", "massa", "interdum", "orci", "non", - "imperdiet", "orci", "ante", "at", "ipsum", "etiam", "eget", "magna", "mauris", "at", "tortor", "eu", "lectus", - "tempor", "tincidunt", "phasellus", "justo", "purus", "pharetra", "ut", "ultricies", "nec", "consequat", "vel", - "nisi", "fusce", "vitae", "velit", "at", "libero", "sollicitudin", "sodales", "aenean", "mi", "libero", "ultrices", - "id", "suscipit", "vitae", "dapibus", "eu", "metus", "aenean", "vestibulum", "nibh", "ac", "massa", "vivamus", - "vestibulum", "libero", "vitae", "purus", "in", "hac", "habitasse", "platea", "dictumst", "curabitur", - "blandit", "nunc", "non", "arcu", "ut", "nec", "nibh", "morbi", "quis", "leo", "vel", "magna", "commodo", "rhoncus", - "donec", "congue", "leo", "eu", "lacus", "pellentesque", "at", "erat", "id", "mi", "consequat", "congue", "praesent", - "a", "nisl", "ut", "diam", "interdum", "molestie", "fusce", "suscipit", "rhoncus", "sem", "donec", "pretium", - "aliquam", "molestie", "vivamus", "et", "justo", "at", "augue", "aliquet", "dapibus", "pellentesque", "felis", - "morbi", "semper", "in", "venenatis", "imperdiet", "neque", "donec", "auctor", "molestie", "augue", "nulla", "id", - "arcu", "sit", "amet", "dui", "lacinia", "convallis", "proin", "tincidunt", "proin", "a", "ante", "nunc", "imperdiet", - "augue", "nullam", "sit", "amet", "arcu", "quisque", "laoreet", "viverra", "felis", "lorem", "ipsum", "dolor", "sit", - "amet", "consectetuer", "adipiscing", "elit", "in", "hac", "habitasse", "platea", "dictumst", "pellentesque", - "habitant", "morbi", "tristique", "senectus", "et", "netus", "et", "malesuada", "fames", "ac", "turpis", "egestas", - "class", "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", - "hymenaeos", "nullam", "nibh", "sapien", "volutpat", "ut", "placerat", "quis", "ornare", "at", "lorem", "class", - "aptent", "taciti", "sociosqu", "ad", "litora", "torquent", "per", "conubia", "nostra", "per", "inceptos", - "hymenaeos", "morbi", "dictum", "massa", "id", "libero", "ut", "neque", "phasellus", "tincidunt", "nibh", "ut", - "tincidunt", "lacinia", "lacus", "nulla", "aliquam", "mi", "a", "interdum", "dui", "augue", "non", "pede", "duis", - "nunc", "magna", "vulputate", "a", "porta", "at", "tincidunt", "a", "nulla", "praesent", "facilisis", - "suspendisse", "sodales", "feugiat", "purus", "cras", "et", "justo", "a", "mauris", "mollis", "imperdiet", "morbi", - "erat", "mi", "ultrices", "eget", "aliquam", "elementum", "iaculis", "id", "velit", "in", "scelerisque", "enim", - "sit", "amet", "turpis", "sed", "aliquam", "odio", "nonummy", "ullamcorper", "mollis", "lacus", "nibh", "tempor", - "dolor", "sit", "amet", "varius", "sem", "neque", "ac", "dui", "nunc", "et", "est", "eu", "massa", "eleifend", "mollis", - "mauris", "aliquet", "orci", "quis", "tellus", "ut", "mattis", "praesent", "mollis", "consectetuer", "quam", - "nulla", "nulla", "nunc", "accumsan", "nunc", "sit", "amet", "scelerisque", "porttitor", "nibh", "pede", "lacinia", - "justo", "tristique", "mattis", "purus", "eros", "non", "velit", "aenean", "sagittis", "commodo", "erat", - "aliquam", "id", "lacus", "morbi", "vulputate", "vestibulum", "elit", -} diff --git a/services/alert/easyjson-bootstrap2172438621.go b/services/alert/easyjson-bootstrap2172438621.go new file mode 100644 index 000000000..cb7e08e79 --- /dev/null +++ b/services/alert/easyjson-bootstrap2172438621.go @@ -0,0 +1,27 @@ +//go:build ignore +// +build ignore + +// TEMPORARY AUTOGENERATED FILE: easyjson bootstapping code to launch +// the actual generator. + +package main + +import ( + "fmt" + "os" + + "github.com/mailru/easyjson/gen" + + pkg "github.com/influxdata/kapacitor/services/alert" +) + +func main() { + g := gen.NewGenerator("dao_easyjson.go") + g.SetPkg("alert", "github.com/influxdata/kapacitor/services/alert") + g.Add(pkg.EasyJSON_exporter_EventState(nil)) + g.Add(pkg.EasyJSON_exporter_TopicState(nil)) + if err := g.Run(os.Stdout); err != nil { + fmt.Fprintln(os.Stderr, err) + os.Exit(1) + } +} diff --git a/services/alert/migrate_topic_store.go b/services/alert/migrate_topic_store.go new file mode 100644 index 000000000..2e4071cd9 --- /dev/null +++ b/services/alert/migrate_topic_store.go @@ -0,0 +1,221 @@ +package alert + +import ( + "fmt" + "io" + "os" + "strconv" + + errors2 "github.com/influxdata/influxdb/pkg/errors" + "github.com/influxdata/kapacitor/keyvalue" + "github.com/influxdata/kapacitor/services/storage" + "github.com/pkg/errors" +) + +const ( + TopicStoreVersionKey = "topic_store_version" + TopicStoreVersion2 = "2" + TopicStoreBackupSuffix = ".v1.bak" +) + +// MigrateTopicStoreV1V2 - Convert a V1 to a V2 topic store. +// Also ensures that a topic store has a V2 version number set. +func (s *Service) MigrateTopicStoreV1V2() (rErr error) { + version, err := s.StorageService.Versions().Get(TopicStoreVersionKey) + if err != nil && !errors.Is(err, storage.ErrNoKeyExists) { + return fmt.Errorf("cannot determine topic store version: %w", err) + } + if version == TopicStoreVersion2 { + s.diag.Info(fmt.Sprintf("Topic Store is already version %s. Cannot upgrade.", TopicStoreVersion2)) + return nil + } + + backup := s.StorageService.Path() + TopicStoreBackupSuffix + var n int64 + if n, err = CopyFile(s.StorageService.Path(), backup); err != nil { + return fmt.Errorf("cannot backup v1 topic store: %w", err) + } + s.diag.Info("backup file created", keyvalue.T{Key: "bytes", Value: strconv.FormatInt(n, 10)}) + + defer func() { + // Remove the backup as the last thing + // Either on failure it was restored as the BoltDB + // or on success it was unneeded. + if bErr := os.RemoveAll(backup); bErr != nil { + // Log error removing the backup file, but do not return them as failures + s.diag.Error("failed to remove backup file", bErr, keyvalue.T{Key: "backup", Value: backup}) + } + }() + + topicsDAO, err := NewTopicStateKV(s.StorageService.Store(AlertNameSpace)) + if err != nil { + return fmt.Errorf("cannot open version 1 topic store: %w", err) + } + + offset := 0 + const limit = 100 + + topicKeys := make([]string, 0, limit) + err = s.StorageService.Store(TopicStatesNameSpace).Update(func(txV2 storage.Tx) error { + for { + topicStates, err := topicsDAO.List("", offset, limit) + if err != nil { + return fmt.Errorf("cannot read version 1 topic store: %w", err) + } + for _, ts := range topicStates { + topicKeys = append(topicKeys, ts.Topic) + txBucket := txV2.Bucket([]byte(ts.Topic)) + for id, es := range ts.EventStates { + data, err := es.MarshalJSON() + if err != nil { + return fmt.Errorf("error converting event %q in topic %q to JSON: %w", id, ts.Topic, err) + } + if err = txBucket.Put(id, data); err != nil { + return fmt.Errorf("cannot store event %q in topic %q: %w", id, ts.Topic, err) + } + } + } + offset += limit + if len(topicStates) != limit { + break + } + } + return nil + }) + if err != nil { + // Okay to leave here without the backup being restored because the Bolt transaction should roll back. + return err + } + + // If the upgrade fails, restore the backup we took. Do not do this earlier + // because if the transaction to write the V2 data failed, the DB should be unchanged. + defer func() { + if rErr != nil { + err := s.StorageService.CloseBolt() + if err != nil { + // log restoration error, but do not overwrite the original error + s.diag.Error("failed to close kapacitor database to restore backup", err) + } + if err = os.Rename(backup, s.StorageService.Path()); err != nil { + s.diag.Error("failed to restore kapacitor backup database", err) + } + } + }() + + if err = topicsDAO.DeleteMultiple(topicKeys); err != nil { + return err + } + + if err = s.StorageService.Versions().Set(TopicStoreVersionKey, TopicStoreVersion2); err != nil { + return fmt.Errorf("cannot set topic store version to %s: %w", TopicStoreVersion2, err) + } + s.diag.Info("Topic Store updated", keyvalue.T{Key: "version", Value: TopicStoreVersion2}) + return nil +} + +func MigrateTopicStoreV2V1(storageService StorageService) error { + version, err := storageService.Versions().Get(TopicStoreVersionKey) + if err != nil && !errors.Is(err, storage.ErrNoKeyExists) { + return fmt.Errorf("cannot determine topic store version: %w", err) + } + if errors.Is(err, storage.ErrNoKeyExists) || (version != TopicStoreVersion2) { + // V1 has no version number + msg := fmt.Sprintf("Topic Store is not version %s, but version %s. Cannot downgrade.", TopicStoreVersion2, version) + storageService.Diagnostic().Error(msg, errors.New("wrong version")) + return nil + } + + topicsDAO, err := NewTopicStateKV(storageService.Store(AlertNameSpace)) + if err != nil { + return fmt.Errorf("cannot create version 1 topic store: %w", err) + } + + topicsStore := storageService.Store(TopicStatesNameSpace) + + topics := make([]TopicState, 0, 100) + err = WalkTopicBuckets(topicsStore, func(tx storage.ReadOnlyTx, topic string) error { + eventStates, err := LoadTopicBucket(tx, []byte(topic)) + if err != nil { + return fmt.Errorf("cannot load topic %q: %w", topic, err) + } + topics = append(topics, TopicState{Topic: topic, EventStates: eventStates}) + return nil + }) + if err != nil { + return err + } + + for i := range topics { + if err = topicsDAO.Put(topics[i]); err != nil { + return fmt.Errorf("cannot save topic %q: %w", topics[i].Topic, err) + } + } + + if err = DeleteV2TopicStore(topicsStore); err != nil { + return err + } + if err = storageService.Versions().Set(TopicStoreVersionKey, ""); err != nil { + return fmt.Errorf("cannot set topic store version to %s after upgrade: %w", TopicStoreVersion2, err) + } + storageService.Diagnostic().Info("Topic Store upgraded", keyvalue.T{Key: "version", Value: TopicStoreVersion2}) + return nil +} + +func DeleteV2TopicStore(topicsStore storage.Interface) error { + return topicsStore.Update(func(txV2 storage.Tx) error { + kv, err := txV2.List("") + if err != nil { + return fmt.Errorf("cannot retrieve version 2 topic list: %w", err) + } + + for _, b := range kv { + if b == nil { + continue + } + if err = txV2.Delete(b.Key); err != nil { + return fmt.Errorf("cannot delete topic %q: %w", b.Key, err) + + } + } + return nil + }) +} + +func LoadTopicBucket(tx storage.ReadOnlyTx, topic []byte) (map[string]EventState, error) { + q, err := tx.Bucket(topic).List("") + if err != nil { + return nil, fmt.Errorf("cannot load topic %q: %w", topic, err) + } + EventStates := make(map[string]EventState, len(q)) + es := &EventState{} //create a buffer to hold the unmarshalled EventState + for _, b := range q { + err = es.UnmarshalJSON(b.Value) + if err != nil { + return nil, fmt.Errorf("cannot unmarshal an event in topic %q: %w", topic, err) + } + EventStates[b.Key] = *es + es.Reset() + } + return EventStates, nil +} + +func CopyFile(src, dest string) (n int64, err error) { + fIn, err := os.OpenFile(src, os.O_RDONLY, 0) + if err != nil { + return 0, fmt.Errorf("cannot open %q: %w", src, err) + } + defer errors2.Capture(&err, fIn.Close)() + + fOut, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0600) + if err != nil { + return 0, fmt.Errorf("cannot create %q: %w", dest, err) + } + defer errors2.Capture(&err, fOut.Close)() + + if n, err = io.Copy(fOut, fIn); err != nil { + return 0, fmt.Errorf("cannot copy %q to %q: %w", src, dest, err) + } else { + + return n, nil + } +} diff --git a/services/alert/service.go b/services/alert/service.go index 23305d8c5..a6573fb2c 100644 --- a/services/alert/service.go +++ b/services/alert/service.go @@ -1,6 +1,7 @@ package alert import ( + "bytes" "encoding" "encoding/json" "fmt" @@ -36,6 +37,7 @@ import ( "github.com/influxdata/kapacitor/services/telegram" "github.com/influxdata/kapacitor/services/victorops" "github.com/influxdata/kapacitor/services/zenoss" + "github.com/mailru/easyjson/jlexer" "github.com/mitchellh/mapstructure" "github.com/pkg/errors" ) @@ -50,13 +52,25 @@ type Diagnostic interface { MigratingOldHandlerSpec(id string) Error(msg string, err error, ctx ...keyvalue.T) + Info(msg string, ctx ...keyvalue.T) +} + +type StorageService interface { + Store(namespace string) storage.Interface + Register(name string, store storage.StoreActioner) + Versions() storage.Versions + Diagnostic() storage.Diagnostic + Path() string + CloseBolt() error } type Service struct { - mu sync.RWMutex - disabled map[string]struct{} - specsDAO HandlerSpecDAO - topicsDAO TopicStateDAO + mu sync.RWMutex + disabled map[string]struct{} + // Handler store API + specsDAO HandlerSpecDAO + // V2 topic store + topicsStore storage.Interface PersistTopics bool APIServer *apiServer @@ -75,11 +89,7 @@ type Service struct { DelRoutes([]httpd.Route) } - StorageService interface { - Store(namespace string) storage.Interface - Register(name string, store storage.StoreActioner) - Versions() storage.Versions - } + StorageService StorageService Commander command.Commander @@ -176,30 +186,26 @@ func NewService(d Diagnostic, disabled map[string]struct{}, topicBufLen int) *Se const ( // Public name of the handler specs store. handlerSpecsAPIName = "handler-specs" - // Public name of the handler specs store. - topicStatesAPIName = "topic-states" - // The storage namespace for all task data. - alertNamespace = "alert_store" + // The storage namespace V1 topic store and task data. + // In V2, still stores handlers + AlertNameSpace = "alert_store" + // TopicStatesNameSpace - The storage namespace for the V2 topic store and nothing else + TopicStatesNameSpace = "topic_states_store" ) func (s *Service) Open() error { s.mu.Lock() defer s.mu.Unlock() - // Create DAO - store := s.StorageService.Store(alertNamespace) + store := s.StorageService.Store(AlertNameSpace) specsDAO, err := newHandlerSpecKV(store) if err != nil { return err } s.specsDAO = specsDAO s.StorageService.Register(handlerSpecsAPIName, s.specsDAO) - topicsDAO, err := newTopicStateKV(store) - if err != nil { - return err - } - s.topicsDAO = topicsDAO - s.StorageService.Register(topicStatesAPIName, s.topicsDAO) + s.topicsStore = s.StorageService.Store(TopicStatesNameSpace) + // NOTE: since the topics store doesn't use the indexing store, we don't need to register the api // Migrate v1.2 handlers if err := s.migrateHandlerSpecs(store); err != nil { @@ -211,6 +217,10 @@ func (s *Service) Open() error { return err } + if err := s.MigrateTopicStoreV1V2(); err != nil { + return err + } + // Load saved topic state if err := s.loadSavedTopicStates(); err != nil { return err @@ -377,15 +387,8 @@ func (s *Service) loadSavedHandlerSpecs() error { return nil } -func (s *Service) convertEventStatesToAlert(states map[string]EventState) map[string]alert.EventState { - newStates := make(map[string]alert.EventState, len(states)) - for id, state := range states { - newStates[id] = s.convertEventStateToAlert(id, state) - } - return newStates -} -func (s *Service) convertEventStateToAlert(id string, state EventState) alert.EventState { - return alert.EventState{ +func convertEventStateToAlert(id string, state *EventState) *alert.EventState { + return &alert.EventState{ ID: id, Message: state.Message, Details: state.Details, @@ -395,16 +398,8 @@ func (s *Service) convertEventStateToAlert(id string, state EventState) alert.Ev } } -func (s *Service) convertEventStatesFromAlert(states map[string]alert.EventState) map[string]EventState { - newStates := make(map[string]EventState, len(states)) - for id, state := range states { - newStates[id] = s.convertEventStateFromAlert(state) - } - return newStates -} - -func (s *Service) convertEventStateFromAlert(state alert.EventState) EventState { - return EventState{ +func convertEventStateFromAlert(state alert.EventState) *EventState { + return &EventState{ Message: state.Message, Details: state.Details, Time: state.Time, @@ -414,24 +409,54 @@ func (s *Service) convertEventStateFromAlert(state alert.EventState) EventState } func (s *Service) loadSavedTopicStates() error { - offset := 0 - limit := 100 - for { - topicStates, err := s.topicsDAO.List("", offset, limit) + buf := bytes.Buffer{} + return WalkTopicBuckets(s.topicsStore, func(tx storage.ReadOnlyTx, topic string) error { + _, _ = buf.WriteString(topic) // WriteString error is always nil + eventStates, err := s.loadConvertTopicBucket(tx, buf.Bytes()) if err != nil { return err } + s.topics.RestoreTopicNoCopy(topic, eventStates) + buf.Reset() + return nil + }) +} - for _, ts := range topicStates { - s.topics.RestoreTopic(ts.Topic, s.convertEventStatesToAlert(ts.EventStates)) +func WalkTopicBuckets(topicsStore storage.Interface, fn func(tx storage.ReadOnlyTx, topic string) error) error { + return topicsStore.View(func(tx storage.ReadOnlyTx) error { + kv, err := tx.List("") + if err != nil { + return fmt.Errorf("cannot retrieve topic list: %w", err) } - offset += limit - if len(topicStates) != limit { - break + for _, b := range kv { + if b == nil { + continue + } + if err = fn(tx, b.Key); err != nil { + return err + } } + return nil + }) +} + +func (s *Service) loadConvertTopicBucket(tx storage.ReadOnlyTx, topic []byte) (map[string]*alert.EventState, error) { + q, err := tx.Bucket(topic).List("") + if err != nil { + return nil, err } - return nil + eventstates := make(map[string]*alert.EventState, len(q)) + es := &EventState{} //create a buffer to hold the unmarshalled EventState + for _, b := range q { + err = es.UnmarshalJSON(b.Value) + if err != nil { + return nil, err + } + eventstates[b.Key] = convertEventStateToAlert(b.Key, es) + es.Reset() + } + return eventstates, nil } func validatePattern(pattern string) error { @@ -462,25 +487,54 @@ func (s *Service) Collect(event alert.Event) error { if err != nil { return err } - return s.persistTopicState(event.Topic) + // Events with alert.OK status should always only be resets from other statuses. + if event.State.Level == alert.OK && s.PersistTopics { + if err := s.clearHistory(&event); err != nil { + return fmt.Errorf("failed to clear event history for topic %q: %w", event.Topic, err) + } else { + return nil + } + } else { + return s.persistEventState(event) + } } -func (s *Service) persistTopicState(topic string) error { +func (s *Service) persistEventState(event alert.Event) error { if !s.PersistTopics { return nil } - t, ok := s.topics.Topic(topic) - if !ok { + if _, ok := s.topics.Topic(event.Topic); !ok { // Topic was deleted since event was collected, nothing to do. return nil } - ts := TopicState{ - Topic: topic, - EventStates: s.convertEventStatesFromAlert(t.EventStates(alert.OK)), - } - return s.topicsDAO.Put(ts) + return s.topicsStore.Update(func(tx storage.Tx) error { + tx = tx.Bucket([]byte(event.Topic)) + if tx == nil { + return nil + } + data, err := convertEventStateFromAlert(event.State).MarshalJSON() + if err != nil { + return fmt.Errorf("cannot marshal event %q in topic %q: %w", event.State.ID, event.Topic, err) + } + return tx.Put(event.State.ID, data) + }) +} + +func (s *Service) clearHistory(event *alert.Event) error { + // clear on-disk EventStates, but leave the in-memory history + return s.topicsStore.Update(func(tx storage.Tx) error { + tx = tx.Bucket([]byte(event.Topic)) + if tx == nil { + return nil + } + // Clear previous alert on recovery reset/recovery. + if err := tx.Delete(event.State.ID); err != nil { + return fmt.Errorf("cannot delete alert %q in topic %q on reset: %w", event.State.ID, event.Topic, err) + } + return nil + }) } func (s *Service) restoreClosedTopic(topic string) error { @@ -501,13 +555,29 @@ func (s *Service) restoreClosedTopic(topic string) error { // restoreTopic restores a topic's state from the storage and registers any handlers. // Caller must have lock to call. func (s *Service) restoreTopic(topic string) error { - // Restore events state from storage - ts, err := s.topicsDAO.Get(topic) - if err != nil && err != ErrNoTopicStateExists { + err := s.topicsStore.View(func(tx storage.ReadOnlyTx) error { + q, err := tx.Bucket([]byte(topic)).List("") + if err != nil { + return err + } + eventStates := make(map[string]*alert.EventState, len(q)) + lex := jlexer.Lexer{} + es := &EventState{} //create a buffer to hold the unmarshalled EventState + for _, b := range q { + lex.Data = b.Value + es.UnmarshalEasyJSON(&lex) + if err := lex.Error(); err != nil { + return err + } + eventStates[b.Key] = es.AlertEventState(b.Key) + es.Reset() + } + s.topics.RestoreTopicNoCopy(topic, eventStates) + return nil + }) + if err != nil { return err - } else if err != ErrNoTopicStateExists { - s.topics.RestoreTopic(topic, s.convertEventStatesToAlert(ts.EventStates)) - } // else nothing to restore + } // Re-Register all handlers for _, h := range s.handlers[topic] { @@ -530,8 +600,7 @@ func (s *Service) CloseTopic(topic string) error { s.topics.DeleteTopic(topic) s.closedTopics[topic] = true - // Save the final topic state - return s.persistTopicState(topic) + return nil } func (s *Service) DeleteTopic(topic string) error { @@ -539,12 +608,17 @@ func (s *Service) DeleteTopic(topic string) error { defer s.mu.Unlock() delete(s.closedTopics, topic) s.topics.DeleteTopic(topic) - return s.topicsDAO.Delete(topic) + return s.topicsStore.Update(func(tx storage.Tx) error { + return tx.Delete(topic) + }) } func (s *Service) UpdateEvent(topic string, event alert.EventState) error { s.topics.UpdateEvent(topic, event) - return s.persistTopicState(topic) + return s.persistEventState(alert.Event{ + Topic: topic, + State: event, + }) } func (s *Service) RegisterAnonHandler(topic string, h alert.Handler) { @@ -680,6 +754,7 @@ func (s *Service) TopicStates(pattern string, minLevel alert.Level) (map[string] // EventState returns the current state of the event. func (s *Service) EventState(topic, event string) (alert.EventState, bool, error) { t, ok := s.topics.Topic(topic) + if !ok { return alert.EventState{}, false, nil } diff --git a/services/config/service_test.go b/services/config/service_test.go index 7a6093f25..36ca59d50 100644 --- a/services/config/service_test.go +++ b/services/config/service_test.go @@ -53,10 +53,10 @@ type TestConfig struct { SectionCs []SectionC `override:"section-c,element-key=name"` } -func OpenNewSerivce(testConfig interface{}, updates chan<- config.ConfigUpdate) (*config.Service, *httpdtest.Server) { +func OpenNewService(t *testing.T, testConfig interface{}, updates chan<- config.ConfigUpdate) (*config.Service, *httpdtest.Server) { c := config.NewConfig() service := config.NewService(c, testConfig, diagService.NewConfigOverrideHandler(), updates) - service.StorageService = storagetest.New() + service.StorageService = storagetest.New(t, diagService.NewStorageHandler()) server := httpdtest.NewServer(testing.Verbose()) service.HTTPDService = server if err := service.Open(); err != nil { @@ -187,7 +187,7 @@ func TestService_UpdateSection(t *testing.T) { }, } updates := make(chan config.ConfigUpdate, len(testCases)) - service, server := OpenNewSerivce(testConfig, updates) + service, server := OpenNewService(t, testConfig, updates) defer server.Close() defer service.Close() basePath := server.Server.URL + httpd.BasePath + "/config" @@ -1035,7 +1035,7 @@ func TestService_GetConfig(t *testing.T) { } for i, tc := range testCases { updates := make(chan config.ConfigUpdate, len(testCases)) - service, server := OpenNewSerivce(testConfig, updates) + service, server := OpenNewService(t, testConfig, updates) defer server.Close() defer service.Close() basePath := server.Server.URL + httpd.BasePath + "/config" diff --git a/services/diagnostic/handlers.go b/services/diagnostic/handlers.go index 8b8d72a7b..8fe606938 100644 --- a/services/diagnostic/handlers.go +++ b/services/diagnostic/handlers.go @@ -182,6 +182,10 @@ func (h *AlertServiceHandler) Error(msg string, err error, ctx ...keyvalue.T) { Err(h.L, msg, err, ctx) } +func (h *AlertServiceHandler) Info(msg string, ctx ...keyvalue.T) { + Info(h.L, msg, ctx) +} + // Kapcitor Handler type KapacitorHandler struct { @@ -665,6 +669,11 @@ func (h *StorageHandler) Error(msg string, err error) { h.l.Error(msg, Error(err)) } +func (h *StorageHandler) Info(msg string, ctx ...keyvalue.T) { + fields := logFieldsFromContext(ctx) + h.l.Info(msg, fields...) +} + // TaskStore Handler type TaskStoreHandler struct { diff --git a/services/scraper/service.go b/services/scraper/service.go index 0b0597cee..048edaabe 100644 --- a/services/scraper/service.go +++ b/services/scraper/service.go @@ -68,6 +68,8 @@ func (a *appendable) Appender(ctx context.Context) storage.Appender { return a.svc } +var scrapemanagerLock = sync.Mutex{} // promethius calling scrape.NewManager is not concurrency safe. + // NewService creates a new scraper service func NewService(c []Config, d Diagnostic) *Service { s := &Service{ @@ -77,8 +79,9 @@ func NewService(c []Config, d Diagnostic) *Service { var ctxScrape context.Context ctxScrape, s.cancelScrape = context.WithCancel(context.Background()) s.discoveryManager = discovery.NewManager(ctxScrape, d, discovery.Name("discoveryScrapeManager")) + scrapemanagerLock.Lock() s.scrapeManager = scrape.NewManager(d, &appendable{&ServiceAppenderAdapter{s}}) - + scrapemanagerLock.Unlock() return s } diff --git a/services/storage/api.go b/services/storage/api.go index 8c39a1e42..c056f8436 100644 --- a/services/storage/api.go +++ b/services/storage/api.go @@ -24,7 +24,7 @@ const ( ) type APIServer struct { - Registrar StoreActionerRegistrar + Registrar *StoreActionerRegistrar DB *bolt.DB routes []httpd.Route diag Diagnostic diff --git a/services/storage/bolt.go b/services/storage/bolt.go index 92c16618d..862ce1112 100644 --- a/services/storage/bolt.go +++ b/services/storage/bolt.go @@ -9,13 +9,35 @@ import ( // Bolt implementation of Store type Bolt struct { db *bolt.DB - bucket []byte + bucket [][]byte } -func NewBolt(db *bolt.DB, bucket string) *Bolt { +func NewBolt(db *bolt.DB, bucket ...[]byte) *Bolt { return &Bolt{ db: db, - bucket: []byte(bucket), + bucket: bucket, + } +} + +// Bucket tells the Bolt to do following actions in a bucket. A nil bucket will return a *Bolt that is targeted to the root bucket. +func (b *Bolt) Bucket(bucket []byte) *Bolt { + if bucket == nil { + return &Bolt{ + db: b.db, + bucket: nil, + } + } + return &Bolt{ + db: b.db, + bucket: append(b.bucket, bucket), + } +} + +// Store tells the Bolt to do following actions in a bucket. A nil bucket will return a *Bolt that is targeted to the root bucket. +func (b *Bolt) Store(buckets ...[]byte) Interface { + return &Bolt{ + db: b.db, + bucket: buckets, } } @@ -28,15 +50,17 @@ func (b *Bolt) Update(f func(tx Tx) error) error { } func (b *Bolt) put(tx *bolt.Tx, key string, value []byte) error { - bucket, err := tx.CreateBucketIfNotExists(b.bucket) + bucket, err := tx.CreateBucketIfNotExists(b.bucket[0]) if err != nil { return err } - err = bucket.Put([]byte(key), value) - if err != nil { - return err + for _, buckName := range b.bucket[1:] { + bucket, err = bucket.CreateBucketIfNotExists(buckName) + if err != nil { + return err + } } - return nil + return bucket.Put([]byte(key), value) } func (b *Bolt) Put(key string, value []byte) error { @@ -46,11 +70,10 @@ func (b *Bolt) Put(key string, value []byte) error { } func (b *Bolt) get(tx *bolt.Tx, key string) (*KeyValue, error) { - bucket := tx.Bucket(b.bucket) + bucket := b.bucketHelper(tx) if bucket == nil { return nil, ErrNoKeyExists } - val := bucket.Get([]byte(key)) if val == nil { return nil, ErrNoKeyExists @@ -71,12 +94,28 @@ func (b *Bolt) Get(key string) (kv *KeyValue, err error) { return } +// Delete removes a key from a bolt. If the key is a bucket, it removes that. func (b *Bolt) delete(tx *bolt.Tx, key string) error { - bucket := tx.Bucket(b.bucket) + bucket := b.bucketHelper(tx) if bucket == nil { return nil } - return bucket.Delete([]byte(key)) + cursor := bucket.Cursor() + if cursor == nil { + return nil + } + // handling for buckets + bkey := []byte(key) + k, v := cursor.Seek(bkey) + if key != string(k) { + return nil + } + if v == nil { + return bucket.DeleteBucket(bkey) + } + // handling for regular keys + return bucket.Delete(bkey) + } func (b *Bolt) Delete(key string) error { @@ -86,7 +125,7 @@ func (b *Bolt) Delete(key string) error { } func (b *Bolt) exists(tx *bolt.Tx, key string) (bool, error) { - bucket := tx.Bucket(b.bucket) + bucket := b.bucketHelper(tx) if bucket == nil { return false, nil } @@ -103,22 +142,53 @@ func (b *Bolt) Exists(key string) (exists bool, err error) { return } -func (b *Bolt) list(tx *bolt.Tx, prefixStr string) (kvs []*KeyValue, err error) { - bucket := tx.Bucket(b.bucket) +func (b *Bolt) bucketHelper(tx *bolt.Tx) *bolt.Bucket { + if len(b.bucket) == 0 { + return tx.Cursor().Bucket() //grab root bucket + } // get the right bucket + bucket := tx.Bucket(b.bucket[0]) if bucket == nil { - return + return nil + } + for _, buckName := range b.bucket[1:] { + bucket = bucket.Bucket(buckName) + if bucket == nil { + return nil + } } + return bucket +} - cursor := bucket.Cursor() - prefix := []byte(prefixStr) +// cursor returns a cursor at the appropriate bucket or nil if that bucket doesn't exist +func (b *Bolt) cursor(tx *bolt.Tx) *bolt.Cursor { + if len(b.bucket) == 0 { + return tx.Cursor() //grab root bucket + } // get the right bucket + bucket := tx.Bucket(b.bucket[0]) + if bucket == nil { + return nil + } + for _, buckName := range b.bucket[1:] { + bucket = bucket.Bucket(buckName) + if bucket == nil { + return nil + } + } + return bucket.Cursor() +} - for key, v := cursor.Seek(prefix); bytes.HasPrefix(key, prefix); key, v = cursor.Next() { - value := make([]byte, len(v)) - copy(value, v) +func (b *Bolt) list(tx *bolt.Tx, prefixStr string) (kvs []*KeyValue, err error) { + cursor := b.cursor(tx) + if cursor == nil { + return nil, nil // no objects returned + } + prefix := []byte(prefixStr) + for key, v := cursor.Seek(prefix); key != nil && bytes.HasPrefix(key, prefix); key, v = cursor.Next() { + // we want to be able to grab buckets AND keys here kvs = append(kvs, &KeyValue{ Key: string(key), - Value: value, + Value: append([]byte(nil), v...), }) } return @@ -137,7 +207,11 @@ func (b *Bolt) BeginTx() (Tx, error) { } func (b *Bolt) BeginReadOnlyTx() (ReadOnlyTx, error) { - return b.newTx(false) + tx, err := b.newTx(false) + if err != nil { + return nil, err + } + return &boltTXReadOnly{*tx}, nil } func (b *Bolt) newTx(write bool) (*boltTx, error) { @@ -151,12 +225,36 @@ func (b *Bolt) newTx(write bool) (*boltTx, error) { }, nil } +type boltTXReadOnly struct { + boltTx +} + +func (t *boltTXReadOnly) Bucket(name []byte) ReadOnlyTx { + return &boltTXReadOnly{ + boltTx{ + b: t.b.Bucket(name), + tx: t.tx, + }} +} + // BoltTx wraps an underlying bolt.Tx type to implement the Tx interface. type boltTx struct { b *Bolt tx *bolt.Tx } +// Cursor TODO (DSB): Is this even remotely correct? +func (t *boltTx) Cursor() *bolt.Cursor { + return t.b.cursor(t.tx) +} + +func (t *boltTx) Bucket(name []byte) Tx { + return &boltTx{ + b: t.b.Bucket(name), + tx: t.tx, + } +} + func (t *boltTx) Get(key string) (*KeyValue, error) { return t.b.get(t.tx, key) } diff --git a/services/storage/indexed.go b/services/storage/indexed.go index f68052d6e..324d1bc21 100644 --- a/services/storage/indexed.go +++ b/services/storage/indexed.go @@ -130,6 +130,10 @@ func NewIndexedStore(store Interface, c IndexedStoreConfig) (*IndexedStore, erro }, nil } +func (s *IndexedStore) Store() Interface { + return s.store +} + // Create a key for the object data func (s *IndexedStore) dataKey(id string) string { return s.dataPrefix + id @@ -155,7 +159,7 @@ func (s *IndexedStore) Get(id string) (o BinaryObject, err error) { return } -func (s *IndexedStore) GetTx(tx ReadOnlyTx, id string) (BinaryObject, error) { +func (s *IndexedStore) GetTx(tx ReadOperator, id string) (BinaryObject, error) { key := s.dataKey(id) if exists, err := tx.Exists(key); err != nil { return nil, err @@ -306,7 +310,7 @@ func (s *IndexedStore) List(index, pattern string, offset, limit int) (objects [ }) return } -func (s *IndexedStore) ListTx(tx ReadOnlyTx, index, pattern string, offset, limit int) ([]BinaryObject, error) { +func (s *IndexedStore) ListTx(tx ReadOperator, index, pattern string, offset, limit int) ([]BinaryObject, error) { return s.list(tx, index, pattern, offset, limit, false) } @@ -323,7 +327,7 @@ func (s *IndexedStore) ReverseListTx(tx ReadOnlyTx, index, pattern string, offse return s.list(tx, index, pattern, offset, limit, true) } -func (s *IndexedStore) list(tx ReadOnlyTx, index, pattern string, offset, limit int, reverse bool) ([]BinaryObject, error) { +func (s *IndexedStore) list(tx ReadOperator, index, pattern string, offset, limit int, reverse bool) ([]BinaryObject, error) { // List all object ids sorted by index ids, err := tx.List(s.indexKey(index, "") + "/") if err != nil { diff --git a/services/storage/indexed_test.go b/services/storage/indexed_test.go index b5cc5ef2b..17b6fe2ba 100644 --- a/services/storage/indexed_test.go +++ b/services/storage/indexed_test.go @@ -6,6 +6,8 @@ import ( "testing" "time" + "github.com/influxdata/kapacitor/services/storage/storagetest" + "github.com/davecgh/go-spew/spew" "github.com/influxdata/kapacitor/services/storage" ) @@ -29,192 +31,190 @@ func (o *object) UnmarshalBinary(data []byte) error { } func TestIndexedStore_CRUD(t *testing.T) { - for name, sc := range stores { - t.Run(name, func(t *testing.T) { - db, err := sc() - if err != nil { - t.Fatal(err) - } - defer db.Close() - - s := db.Store("crud") - c := storage.DefaultIndexedStoreConfig("crud", func() storage.BinaryObject { - return new(object) - }) - c.Indexes = append(c.Indexes, storage.Index{ - Name: "date", - ValueFunc: func(o storage.BinaryObject) (string, error) { - obj, ok := o.(*object) - if !ok { - return "", storage.ImpossibleTypeErr(obj, o) - } - return obj.Date.UTC().Format(time.RFC3339), nil - }, - }) - is, err := storage.NewIndexedStore(s, c) - if err != nil { - t.Fatal(err) - } - - // Create new object - o1 := &object{ - ID: "1", - Value: "obj1", - Date: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), - } - if err := is.Create(o1); err != nil { - t.Fatal(err) - } - if err := is.Create(o1); err != storage.ErrObjectExists { - t.Fatal("expected ErrObjectExists creating object1 got", err) - } - // Check o1 - got1, err := is.Get("1") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got1, o1) { - t.Errorf("unexpected object 1 retrieved:\ngot\n%s\nexp\n%s\n", spew.Sdump(got1), spew.Sdump(o1)) - } - // Check ID list - expIDList := []storage.BinaryObject{o1} - gotIDList, err := is.List("id", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotIDList, expIDList) { - t.Errorf("unexpected object list by ID:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) - } - // Check Date list - expDateList := []storage.BinaryObject{o1} - gotDateList, err := is.List("date", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotDateList, expDateList) { - t.Errorf("unexpected object list by Date:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) - } - - // Create second object, using put - o2 := &object{ - ID: "2", - Value: "obj2", - Date: time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC), - } - if err := is.Put(o2); err != nil { - t.Fatal(err) - } - if err := is.Create(o2); err != storage.ErrObjectExists { - t.Fatal("expected ErrObjectExists creating object2 got", err) - } - // Check o2 - got2, err := is.Get("2") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got2, o2) { - t.Errorf("unexpected object 2 retrieved:\ngot\n%s\nexp\n%s\n", spew.Sdump(got2), spew.Sdump(o2)) - } - // Check ID list - expIDList = []storage.BinaryObject{o1, o2} - gotIDList, err = is.List("id", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotIDList, expIDList) { - t.Errorf("unexpected object list by ID:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) - } - // Check Date list - expDateList = []storage.BinaryObject{o2, o1} - gotDateList, err = is.List("date", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotDateList, expDateList) { - t.Errorf("unexpected object list by Date:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) - } - - // Modify objects - o1.Value = "modified obj1" - is.Replace(o1) - o2.Date = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) - is.Put(o2) - - // Check o1 - got1, err = is.Get("1") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got1, o1) { - t.Errorf("unexpected object 1 retrieved after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(got1), spew.Sdump(o1)) - } - - // Check o2 - got2, err = is.Get("2") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(got2, o2) { - t.Errorf("unexpected object 2 retrieved after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(got2), spew.Sdump(o2)) - } - - // Check ID list - expIDList = []storage.BinaryObject{o1, o2} - gotIDList, err = is.List("id", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotIDList, expIDList) { - t.Errorf("unexpected object list by ID after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) - } - // Check Date list - expDateList = []storage.BinaryObject{o1, o2} - gotDateList, err = is.List("date", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotDateList, expDateList) { - t.Errorf("unexpected object list by Date after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) - } - - // Delete object 2 - if err := is.Delete("2"); err != nil { - t.Fatal(err) - } - - // Check o2 - if _, err := is.Get("2"); err != storage.ErrNoObjectExists { - t.Error("expected ErrNoObjectExists for delete object 2, got:", err) - } - - // Check ID list - expIDList = []storage.BinaryObject{o1} - gotIDList, err = is.List("id", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotIDList, expIDList) { - t.Errorf("unexpected object list by ID after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) - } - // Check Date list - expDateList = []storage.BinaryObject{o1} - gotDateList, err = is.List("date", "", 0, 100) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(gotDateList, expDateList) { - t.Errorf("unexpected object list by Date after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) - } - - // Try to replace non existent object - o3 := &object{ - ID: "3", - Value: "obj3", - Date: time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC), - } - if err := is.Replace(o3); err != storage.ErrNoObjectExists { - t.Error("expected error replacing non existent object, got:", err) - } + t.Run("bolt", func(t *testing.T) { + db, err := storagetest.NewBolt(t) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("crud") + c := storage.DefaultIndexedStoreConfig("crud", func() storage.BinaryObject { + return new(object) + }) + c.Indexes = append(c.Indexes, storage.Index{ + Name: "date", + ValueFunc: func(o storage.BinaryObject) (string, error) { + obj, ok := o.(*object) + if !ok { + return "", storage.ImpossibleTypeErr(obj, o) + } + return obj.Date.UTC().Format(time.RFC3339), nil + }, }) - } + is, err := storage.NewIndexedStore(s, c) + if err != nil { + t.Fatal(err) + } + + // Create new object + o1 := &object{ + ID: "1", + Value: "obj1", + Date: time.Date(2017, 1, 1, 0, 0, 0, 0, time.UTC), + } + if err := is.Create(o1); err != nil { + t.Fatal(err) + } + if err := is.Create(o1); err != storage.ErrObjectExists { + t.Fatal("expected ErrObjectExists creating object1 got", err) + } + // Check o1 + got1, err := is.Get("1") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got1, o1) { + t.Errorf("unexpected object 1 retrieved:\ngot\n%s\nexp\n%s\n", spew.Sdump(got1), spew.Sdump(o1)) + } + // Check ID list + expIDList := []storage.BinaryObject{o1} + gotIDList, err := is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList := []storage.BinaryObject{o1} + gotDateList, err := is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Create second object, using put + o2 := &object{ + ID: "2", + Value: "obj2", + Date: time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC), + } + if err := is.Put(o2); err != nil { + t.Fatal(err) + } + if err := is.Create(o2); err != storage.ErrObjectExists { + t.Fatal("expected ErrObjectExists creating object2 got", err) + } + // Check o2 + got2, err := is.Get("2") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got2, o2) { + t.Errorf("unexpected object 2 retrieved:\ngot\n%s\nexp\n%s\n", spew.Sdump(got2), spew.Sdump(o2)) + } + // Check ID list + expIDList = []storage.BinaryObject{o1, o2} + gotIDList, err = is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList = []storage.BinaryObject{o2, o1} + gotDateList, err = is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Modify objects + o1.Value = "modified obj1" + is.Replace(o1) + o2.Date = time.Date(2018, 1, 1, 0, 0, 0, 0, time.UTC) + is.Put(o2) + + // Check o1 + got1, err = is.Get("1") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got1, o1) { + t.Errorf("unexpected object 1 retrieved after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(got1), spew.Sdump(o1)) + } + + // Check o2 + got2, err = is.Get("2") + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(got2, o2) { + t.Errorf("unexpected object 2 retrieved after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(got2), spew.Sdump(o2)) + } + + // Check ID list + expIDList = []storage.BinaryObject{o1, o2} + gotIDList, err = is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList = []storage.BinaryObject{o1, o2} + gotDateList, err = is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Delete object 2 + if err := is.Delete("2"); err != nil { + t.Fatal(err) + } + + // Check o2 + if _, err := is.Get("2"); err != storage.ErrNoObjectExists { + t.Error("expected ErrNoObjectExists for delete object 2, got:", err) + } + + // Check ID list + expIDList = []storage.BinaryObject{o1} + gotIDList, err = is.List("id", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotIDList, expIDList) { + t.Errorf("unexpected object list by ID after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotIDList), spew.Sdump(expIDList)) + } + // Check Date list + expDateList = []storage.BinaryObject{o1} + gotDateList, err = is.List("date", "", 0, 100) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(gotDateList, expDateList) { + t.Errorf("unexpected object list by Date after modification:\ngot\n%s\nexp\n%s\n", spew.Sdump(gotDateList), spew.Sdump(expDateList)) + } + + // Try to replace non-existent object + o3 := &object{ + ID: "3", + Value: "obj3", + Date: time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC), + } + if err := is.Replace(o3); err != storage.ErrNoObjectExists { + t.Error("expected error replacing non existent object, got:", err) + } + }) } diff --git a/services/storage/mem.go b/services/storage/mem.go deleted file mode 100644 index f6b751280..000000000 --- a/services/storage/mem.go +++ /dev/null @@ -1,171 +0,0 @@ -package storage - -import ( - "fmt" - "sort" - "strings" - "sync" -) - -// MemStore is an in memory only implementation of the storage.Interface. -// This is intend to be used for testing use cases only. -type MemStore struct { - mu sync.Mutex - Name string - store map[string][]byte -} - -func NewMemStore(name string) *MemStore { - return &MemStore{ - Name: name, - store: make(map[string][]byte), - } -} - -func (s *MemStore) View(f func(tx ReadOnlyTx) error) error { - return DoView(s, f) -} - -func (s *MemStore) Update(f func(tx Tx) error) error { - return DoUpdate(s, f) -} - -func (s *MemStore) Put(key string, value []byte) error { - s.mu.Lock() - s.store[key] = value - s.mu.Unlock() - return nil -} - -func (s *MemStore) Get(key string) (*KeyValue, error) { - s.mu.Lock() - value, ok := s.store[key] - s.mu.Unlock() - if !ok { - return nil, ErrNoKeyExists - } - return &KeyValue{ - Key: key, - Value: value, - }, nil -} - -func (s *MemStore) Delete(key string) error { - s.mu.Lock() - delete(s.store, key) - s.mu.Unlock() - return nil -} - -func (s *MemStore) Exists(key string) (bool, error) { - s.mu.Lock() - _, ok := s.store[key] - s.mu.Unlock() - return ok, nil -} - -type keySortedKVs []*KeyValue - -func (s keySortedKVs) Len() int { return len(s) } -func (s keySortedKVs) Less(i int, j int) bool { return s[i].Key < s[j].Key } -func (s keySortedKVs) Swap(i int, j int) { s[i], s[j] = s[j], s[i] } - -func (s *MemStore) List(prefix string) ([]*KeyValue, error) { - s.mu.Lock() - kvs := make([]*KeyValue, 0, len(s.store)) - for k, v := range s.store { - if strings.HasPrefix(k, prefix) { - kvs = append(kvs, &KeyValue{Key: k, Value: v}) - } - } - s.mu.Unlock() - sort.Sort(keySortedKVs(kvs)) - return kvs, nil -} - -func (s *MemStore) BeginTx() (Tx, error) { - return s.newTx() -} - -func (s *MemStore) BeginReadOnlyTx() (ReadOnlyTx, error) { - return s.newTx() -} - -func (s *MemStore) newTx() (*memTx, error) { - // A Tx carries the lock, and must be committed or rolledback before another operation can continue. - s.mu.Lock() - store := make(map[string][]byte, len(s.store)) - for k, v := range s.store { - store[k] = v - } - return &memTx{ - m: s, - store: store, - }, nil -} - -type memTxState int - -const ( - unCommitted memTxState = iota - committed - rolledback -) - -type memTx struct { - state memTxState - m *MemStore - store map[string][]byte -} - -func (t *memTx) Get(key string) (*KeyValue, error) { - value, ok := t.store[key] - if !ok { - return nil, ErrNoKeyExists - } - return &KeyValue{Key: key, Value: value}, nil -} - -func (t *memTx) Exists(key string) (bool, error) { - _, ok := t.store[key] - return ok, nil -} - -func (t *memTx) List(prefix string) ([]*KeyValue, error) { - kvs := make([]*KeyValue, 0, len(t.store)) - for k, v := range t.store { - if strings.HasPrefix(k, prefix) { - kvs = append(kvs, &KeyValue{Key: k, Value: v}) - } - } - sort.Sort(keySortedKVs(kvs)) - return kvs, nil -} - -func (t *memTx) Put(key string, value []byte) error { - t.store[key] = value - return nil -} - -func (t *memTx) Delete(key string) error { - delete(t.store, key) - return nil -} - -func (t *memTx) Commit() error { - if t.state == unCommitted { - t.m.store = t.store - t.state = committed - t.m.mu.Unlock() - return nil - } - return fmt.Errorf("cannot commit transaction, transaction in state %v", t.state) -} - -func (t *memTx) Rollback() error { - if t.state == unCommitted { - t.state = rolledback - t.m.mu.Unlock() - } - return nil -} diff --git a/services/storage/registrar.go b/services/storage/registrar.go index c3c99e71e..7171b4709 100644 --- a/services/storage/registrar.go +++ b/services/storage/registrar.go @@ -8,24 +8,18 @@ type StoreActioner interface { Rebuild() error } -type StoreActionerRegistrar interface { - List() []string - Register(name string, store StoreActioner) - Get(name string) (StoreActioner, bool) -} - -func NewStorageResitrar() StoreActionerRegistrar { - return &storeActionerRegistrar{ +func NewStorageRegistrar() *StoreActionerRegistrar { + return &StoreActionerRegistrar{ stores: make(map[string]StoreActioner), } } -type storeActionerRegistrar struct { +type StoreActionerRegistrar struct { mu sync.RWMutex stores map[string]StoreActioner } -func (sr *storeActionerRegistrar) List() []string { +func (sr *StoreActionerRegistrar) List() []string { sr.mu.RLock() defer sr.mu.RUnlock() list := make([]string, 0, len(sr.stores)) @@ -35,13 +29,13 @@ func (sr *storeActionerRegistrar) List() []string { return list } -func (sr *storeActionerRegistrar) Register(name string, store StoreActioner) { +func (sr *StoreActionerRegistrar) Register(name string, store StoreActioner) { sr.mu.Lock() defer sr.mu.Unlock() sr.stores[name] = store } -func (sr *storeActionerRegistrar) Get(name string) (store StoreActioner, ok bool) { +func (sr *StoreActionerRegistrar) Get(name string) (store StoreActioner, ok bool) { sr.mu.RLock() defer sr.mu.RUnlock() store, ok = sr.stores[name] diff --git a/services/storage/service.go b/services/storage/service.go index 4d652c1bd..6431ac284 100644 --- a/services/storage/service.go +++ b/services/storage/service.go @@ -1,10 +1,12 @@ package storage import ( + "fmt" "os" "path" "sync" + "github.com/influxdata/kapacitor/keyvalue" "github.com/influxdata/kapacitor/services/httpd" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" @@ -12,6 +14,7 @@ import ( type Diagnostic interface { Error(msg string, err error) + Info(msg string, ctx ...keyvalue.T) } type Service struct { @@ -21,7 +24,7 @@ type Service struct { stores map[string]Interface mu sync.Mutex - registrar StoreActionerRegistrar + registrar *StoreActionerRegistrar apiServer *APIServer versions Versions @@ -59,7 +62,7 @@ func (s *Service) Open() error { } s.boltdb = db - s.registrar = NewStorageResitrar() + s.registrar = NewStorageRegistrar() s.apiServer = &APIServer{ DB: s.boltdb, Registrar: s.registrar, @@ -90,6 +93,17 @@ func (s *Service) Close() error { return nil } +func (s *Service) CloseBolt() error { + s.mu.Lock() + defer s.mu.Unlock() + if s.boltdb != nil { + if err := s.boltdb.Close(); err != nil { + return fmt.Errorf("cannot close BoltDB: %w", err) + } + } + return nil +} + // Return a namespaced store. // Calling Store with the same namespace returns the same Store. func (s *Service) Store(name string) Interface { @@ -102,7 +116,7 @@ func (s *Service) store(name string) Interface { if store, ok := s.stores[name]; ok { return store } else { - store = NewBolt(s.boltdb, name) + store = NewBolt(s.boltdb, []byte(name)) s.stores[name] = store return store } @@ -115,3 +129,11 @@ func (s *Service) Versions() Versions { func (s *Service) Register(name string, store StoreActioner) { s.registrar.Register(name, store) } + +func (s *Service) Diagnostic() Diagnostic { + return s.diag +} + +func (s *Service) Path() string { + return s.dbpath +} diff --git a/services/storage/storage.go b/services/storage/storage.go index aef5376d2..2b0a8fc3b 100644 --- a/services/storage/storage.go +++ b/services/storage/storage.go @@ -1,6 +1,10 @@ package storage -import "errors" +import ( + "errors" + + "go.etcd.io/bbolt" +) // Common errors that can be returned var ( @@ -9,9 +13,9 @@ var ( // ReadOperator provides an interface for performing read operations. type ReadOperator interface { - // Retrieve a value. + // Get - Retrieve a value. Get(key string) (*KeyValue, error) - // Check if a key exists> + // Exists - Check if a key exists> Exists(key string) (bool, error) // List all values with given prefix. List(prefix string) ([]*KeyValue, error) @@ -19,7 +23,8 @@ type ReadOperator interface { // WriteOperator provides an interface for performing write operations. type WriteOperator interface { - // Store a value. + + // Put - Store a value. Put(key string, value []byte) error // Delete a key. // Deleting a non-existent key is not an error. @@ -30,6 +35,9 @@ type WriteOperator interface { type ReadOnlyTx interface { ReadOperator + // Bucket returns a ReadOnlyTx for that bucket. If the bucket doesn't exist Tx should be nil. + Bucket(name []byte) ReadOnlyTx + // Rollback signals that the transaction is complete. // If the transaction was not committed, then all changes are reverted. // Rollback must always be called for every transaction. @@ -38,12 +46,23 @@ type ReadOnlyTx interface { // Tx provides an interface for performing read and write storage operations in a single transaction. type Tx interface { - ReadOnlyTx + ReadOperator WriteOperator + // Cursor - returns a cursor for that bucket + Cursor() *bbolt.Cursor + + // Bucket returns a Tx for that bucket. If the bucket doesn't exist Tx should be nil. + Bucket(name []byte) Tx + // Commit finalizes the transaction. // Once a transaction is committed, rolling back the transaction has no effect. Commit() error + + // Rollback signals that the transaction is complete. + // If the transaction was not committed, then all changes are reverted. + // Rollback must always be called for every transaction. + Rollback() error } type TxOperator interface { @@ -68,6 +87,8 @@ type Interface interface { // Update creates a new read-write transaction and always rolls it back. // If the function returns a nil error the transaction is committed, otherwise the error is returned. Update(func(Tx) error) error + + Store(Buckets ...[]byte) Interface } // View manages a read only transaction. diff --git a/services/storage/storage_test.go b/services/storage/storage_test.go index 336deee55..0882fd133 100644 --- a/services/storage/storage_test.go +++ b/services/storage/storage_test.go @@ -3,313 +3,237 @@ package storage_test import ( "bytes" "fmt" - "os" - "path/filepath" "testing" "github.com/influxdata/kapacitor/services/storage" + "github.com/influxdata/kapacitor/services/storage/storagetest" "github.com/pkg/errors" - bolt "go.etcd.io/bbolt" ) // Error used to specifically trigger a rollback for tests. var rollbackErr = errors.New("rollback") -type createStoreCloser func() (storeCloser, error) - -// stores is a map of all storage implementations, -// each test will be run against the stores found in this map. -var stores = map[string]createStoreCloser{ - "bolt": newBolt, - "mem": newMemStore, -} - -type storeCloser interface { - Store(namespace string) storage.Interface - Close() -} - -type boltDB struct { - db *bolt.DB - dir string -} - -func (b boltDB) Close() { - _ = b.db.Close() - os.RemoveAll(b.dir) -} - -func newBolt() (storeCloser, error) { - tmpDir, err := os.MkdirTemp("", "storage-bolt") - if err != nil { - return nil, fmt.Errorf("failed to create temp directory: %v", err) - } - db, err := bolt.Open(filepath.Join(tmpDir, "bolt.db"), 0600, nil) - if err != nil { - return boltDB{}, err - } - return boltDB{ - db: db, - dir: tmpDir, - }, nil -} - -func (b boltDB) Store(bucket string) storage.Interface { - return storage.NewBolt(b.db, bucket) -} - -type memStore struct { - stores map[string]storage.Interface -} - -func newMemStore() (storeCloser, error) { - return memStore{ - stores: make(map[string]storage.Interface), - }, nil -} - -func (s memStore) Store(name string) storage.Interface { - m, ok := s.stores[name] - if ok { - return m - } - m = storage.NewMemStore(name) - s.stores[name] = m - return m -} - -func (s memStore) Close() { -} - func TestStorage_CRUD(t *testing.T) { - for name, sc := range stores { - t.Run(name, func(t *testing.T) { - db, err := sc() - if err != nil { + t.Run("bolt", func(t *testing.T) { + db, err := storagetest.NewBolt(t) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("crud") + s.Update(func(tx storage.Tx) error { + key := "key0" + value := []byte("test value") + if exists, err := tx.Exists(key); err != nil { t.Fatal(err) + } else if exists { + t.Fatal("expected key to not exist") } - defer db.Close() - - s := db.Store("crud") - s.Update(func(tx storage.Tx) error { - key := "key0" - value := []byte("test value") - if exists, err := tx.Exists(key); err != nil { - t.Fatal(err) - } else if exists { - t.Fatal("expected key to not exist") - } - - if err := tx.Put(key, value); err != nil { - t.Fatal(err) - } - if exists, err := tx.Exists(key); err != nil { - t.Fatal(err) - } else if !exists { - t.Fatal("expected key to exist") - } - - got, err := tx.Get(key) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(got.Value, value) { - t.Fatalf("unexpected value got %q exp %q", string(got.Value), string(value)) - } - - if err := tx.Delete(key); err != nil { - t.Fatal(err) - } - - if exists, err := tx.Exists(key); err != nil { - t.Fatal(err) - } else if exists { - t.Fatal("expected key to not exist after delete") - } - return nil - }) - }) - } -} -func TestStorage_Update(t *testing.T) { - for name, sc := range stores { - t.Run(name, func(t *testing.T) { - db, err := sc() - if err != nil { + if err := tx.Put(key, value); err != nil { t.Fatal(err) } - defer db.Close() - - s := db.Store("commit") - value := []byte("test value") - err = s.Update(func(tx storage.Tx) error { - return tx.Put("key0", value) - }) - if err != nil { + if exists, err := tx.Exists(key); err != nil { t.Fatal(err) + } else if !exists { + t.Fatal("expected key to exist") } - var got *storage.KeyValue - err = s.View(func(tx storage.ReadOnlyTx) error { - got, err = tx.Get("key0") - return err - }) + got, err := tx.Get(key) if err != nil { t.Fatal(err) } if !bytes.Equal(got.Value, value) { - t.Errorf("unexpected value got %q exp %q", string(got.Value), string(value)) + t.Fatalf("unexpected value got %q exp %q", string(got.Value), string(value)) } - }) - } -} -func TestStorage_Update_Rollback(t *testing.T) { - for name, sc := range stores { - t.Run(name, func(t *testing.T) { - db, err := sc() - if err != nil { + if err := tx.Delete(key); err != nil { t.Fatal(err) } - defer db.Close() - - s := db.Store("rollback") - value := []byte("test value") - // Put value - err = s.Update(func(tx storage.Tx) error { - return tx.Put("key0", value) - }) - if err != nil { + if exists, err := tx.Exists(key); err != nil { t.Fatal(err) + } else if exists { + t.Fatal("expected key to not exist after delete") } + return nil + }) + }) +} - err = s.Update(func(tx storage.Tx) error { - if err := tx.Put("key0", []byte("overridden value is rolledback")); err != nil { - return err - } - return rollbackErr - }) +func TestStorage_Update(t *testing.T) { + t.Run("bolt", func(t *testing.T) { + db, err := storagetest.NewBolt(t) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("commit") + value := []byte("test value") + err = s.Update(func(tx storage.Tx) error { + return tx.Put("key0", value) + }) + if err != nil { + t.Fatal(err) + } + + var got *storage.KeyValue + err = s.View(func(tx storage.ReadOnlyTx) error { + got, err = tx.Get("key0") + return err + }) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(got.Value, value) { + t.Errorf("unexpected value got %q exp %q", string(got.Value), string(value)) + } + }) +} - if err == nil { - t.Fatal("expected error") - } else if err != rollbackErr { - t.Fatalf("unexpected error: got %v exp %v", err, rollbackErr) - } +func TestStorage_Update_Rollback(t *testing.T) { + t.Run("bolt", func(t *testing.T) { + db, err := storagetest.NewBolt(t) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + s := db.Store("rollback") + value := []byte("test value") + + // Put value + err = s.Update(func(tx storage.Tx) error { + return tx.Put("key0", value) + }) + if err != nil { + t.Fatal(err) + } - var got *storage.KeyValue - s.View(func(tx storage.ReadOnlyTx) error { - got, err = tx.Get("key0") + err = s.Update(func(tx storage.Tx) error { + if err := tx.Put("key0", []byte("overridden value is rolledback")); err != nil { return err - }) - if err != nil { - t.Fatal(err) } + return rollbackErr + }) - if !bytes.Equal(got.Value, value) { - t.Errorf("unexpected value got %q exp %q", string(got.Value), string(value)) - } + if err == nil { + t.Fatal("expected error") + } else if err != rollbackErr { + t.Fatalf("unexpected error: got %v exp %v", err, rollbackErr) + } + + var got *storage.KeyValue + s.View(func(tx storage.ReadOnlyTx) error { + got, err = tx.Get("key0") + return err }) - } + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(got.Value, value) { + t.Errorf("unexpected value got %q exp %q", string(got.Value), string(value)) + } + }) } func TestStorage_Update_Concurrent(t *testing.T) { - for name, sc := range stores { - t.Run(name, func(t *testing.T) { - db, err := sc() - if err != nil { - t.Fatal(err) - } - defer db.Close() - - bucketFmt := func(w int) string { - return fmt.Sprintf("bucket%d", w) - } - valueFmt := func(w, i, k int) []byte { - return []byte(fmt.Sprintf("worker %d iteration %d key %d", w, i, k)) - } - keyFmt := func(w, i, k int) string { - return fmt.Sprintf("key%d", k) - } - - putLoop := func(s storage.Interface, w, i, k int) error { - // Begin new transaction - err := s.Update(func(tx storage.Tx) error { - // Put a set of values - for x := 0; x < k; x++ { - v := valueFmt(w, i, x) - k := keyFmt(w, i, x) - if err := tx.Put(k, v); err != nil { - return err - } - } - // Do not commit every third transaction - if i%3 == 0 { - return rollbackErr + t.Run("bolt", func(t *testing.T) { + db, err := storagetest.NewBolt(t) + if err != nil { + t.Fatal(err) + } + defer db.Close() + + bucketFmt := func(w int) string { + return fmt.Sprintf("bucket%d", w) + } + valueFmt := func(w, i, k int) []byte { + return []byte(fmt.Sprintf("worker %d iteration %d key %d", w, i, k)) + } + keyFmt := func(w, i, k int) string { + return fmt.Sprintf("key%d", k) + } + + putLoop := func(s storage.Interface, w, i, k int) error { + // Begin new transaction + err := s.Update(func(tx storage.Tx) error { + // Put a set of values + for x := 0; x < k; x++ { + v := valueFmt(w, i, x) + k := keyFmt(w, i, x) + if err := tx.Put(k, v); err != nil { + return err } - return nil - }) - // Mask explicit rollback errors - if err == rollbackErr { - err = nil } - return err - } - - testF := func(s storage.Interface, w, i, k int) error { - for x := 0; x < i; x++ { - if err := putLoop(s, w, x, k); err != nil { - return errors.Wrapf(err, "worker %d", w) - } + // Do not commit every third transaction + if i%3 == 0 { + return rollbackErr } return nil + }) + // Mask explicit rollback errors + if err == rollbackErr { + err = nil } + return err + } - // Concurrency counts - w := 10 // number of workers - i := 10 // number of iterations - k := 10 // number of keys to write - - errs := make(chan error, w) - for x := 0; x < w; x++ { - s := db.Store(bucketFmt(x)) - go func(s storage.Interface, w, i, k int) { - errs <- testF(s, w, i, k) - }(s, x, i, k) - } - for x := 0; x < w; x++ { - err := <-errs - if err != nil { - t.Fatal(err) + testF := func(s storage.Interface, w, i, k int) error { + for x := 0; x < i; x++ { + if err := putLoop(s, w, x, k); err != nil { + return errors.Wrapf(err, "worker %d", w) } } + return nil + } + + // Concurrency counts + w := 10 // number of workers + i := 10 // number of iterations + k := 10 // number of keys to write + + errs := make(chan error, w) + for x := 0; x < w; x++ { + s := db.Store(bucketFmt(x)) + go func(s storage.Interface, w, i, k int) { + errs <- testF(s, w, i, k) + }(s, x, i, k) + } + for x := 0; x < w; x++ { + err := <-errs + if err != nil { + t.Fatal(err) + } + } - for x := 0; x < w; x++ { - s := db.Store(bucketFmt(x)) - for z := 0; z < k; z++ { - y := i - 1 - if y%3 == 0 { - // The last iteration was not committed, expect the previous - y-- - } - key := keyFmt(x, y, z) - value := valueFmt(x, y, z) - var kv *storage.KeyValue - err := s.View(func(tx storage.ReadOnlyTx) error { - kv, err = tx.Get(key) - return err - }) - if err != nil { - t.Fatalf("%s err:%v", key, err) - } - if !bytes.Equal(kv.Value, value) { - t.Errorf("unexpected value for key %s: got %q exp %q", key, string(kv.Value), string(value)) - } + for x := 0; x < w; x++ { + s := db.Store(bucketFmt(x)) + for z := 0; z < k; z++ { + y := i - 1 + if y%3 == 0 { + // The last iteration was not committed, expect the previous + y-- + } + key := keyFmt(x, y, z) + value := valueFmt(x, y, z) + var kv *storage.KeyValue + err := s.View(func(tx storage.ReadOnlyTx) error { + kv, err = tx.Get(key) + return err + }) + if err != nil { + t.Fatalf("%s err:%v", key, err) + } + if !bytes.Equal(kv.Value, value) { + t.Errorf("unexpected value for key %s: got %q exp %q", key, string(kv.Value), string(value)) } } - }) - } + } + }) } diff --git a/services/storage/storagetest/storage.go b/services/storage/storagetest/storage.go index 3dfcf948d..9b72374f4 100644 --- a/services/storage/storagetest/storage.go +++ b/services/storage/storagetest/storage.go @@ -1,26 +1,125 @@ package storagetest -import "github.com/influxdata/kapacitor/services/storage" +import ( + "fmt" + "os" + "path" + + "github.com/influxdata/kapacitor/services/alert" + "github.com/influxdata/kapacitor/services/storage" + bolt "go.etcd.io/bbolt" +) + +type CleanedTest interface { + TempDir() string +} type TestStore struct { - versions storage.Versions - registrar storage.StoreActionerRegistrar + db *BoltDB + versions storage.Versions + registrar *storage.StoreActionerRegistrar + diagnostic storage.Diagnostic +} + +// BoltDB is a database that deletes itself when closed +type BoltDB struct { + *bolt.DB +} + +// NewBolt is an in-memory db that deletes itself when closed, do not use except for testing. +func NewBolt(t CleanedTest) (*BoltDB, error) { + dir := t.TempDir() + f, err := os.CreateTemp(dir, "boltDB*.db") + if err != nil { + return nil, err + } + dbName := f.Name() + if err = f.Close(); err != nil { + return nil, err + } + db, err := bolt.Open(dbName, 0600, &bolt.Options{ + Timeout: 0, + NoGrowSync: false, + }) + if err != nil { + return nil, err + } + return &BoltDB{db}, nil } -func New() TestStore { - return TestStore{ - versions: storage.NewVersions(storage.NewMemStore("versions")), - registrar: storage.NewStorageResitrar(), +func (b BoltDB) Store(bucket string) storage.Interface { + return storage.NewBolt(b.DB, []byte(bucket)) +} + +func (b BoltDB) Close() error { + dbPath := b.Path() + err := b.DB.Close() + if err != nil { + return err } + return os.RemoveAll(path.Dir(dbPath)) } -func (s TestStore) Store(name string) storage.Interface { - return storage.NewMemStore(name) +func New(t CleanedTest, diagnostic storage.Diagnostic) *TestStore { + db, err := NewBolt(t) + if err != nil { + panic(err) + } + return &TestStore{ + db: db, + versions: storage.NewVersions(db.Store("versions")), + registrar: storage.NewStorageRegistrar(), + diagnostic: diagnostic, + } } -func (s TestStore) Versions() storage.Versions { +func (s *TestStore) Store(name string) storage.Interface { + return s.db.Store(name) +} + +func (s *TestStore) Versions() storage.Versions { return s.versions } -func (s TestStore) Register(name string, store storage.StoreActioner) { + +func (s *TestStore) Register(name string, store storage.StoreActioner) { s.registrar.Register(name, store) } + +func (s *TestStore) Close() error { + return s.db.Close() +} + +func (s *TestStore) Diagnostic() storage.Diagnostic { + return s.diagnostic +} + +func (s *TestStore) BucketEntries(topic string, alertID string) (keys []string, exists bool, err error) { + store := s.db.Store(alert.TopicStatesNameSpace) + err = store.View(func(tx storage.ReadOnlyTx) error { + bucket := tx.Bucket([]byte(topic)) + if bucket == nil { + return fmt.Errorf("%q: %w", topic, bolt.ErrBucketNotFound) + } + if kvs, err := bucket.List(""); err != nil { + return fmt.Errorf("failed to list contents of bucket %q: %w", topic, err) + } else { + keys = make([]string, 0, len(kvs)) + for _, aID := range kvs { + keys = append(keys, aID.Key) + if aID.Key == alertID { + exists = true + } + } + } + return nil + }) + return keys, exists, err +} + +func (s *TestStore) CloseBolt() error { + return s.db.DB.Close() +} + +func (s *TestStore) Path() string { + return s.db.DB.Path() +} diff --git a/task/backend/executor/executor_test.go b/task/backend/executor/executor_test.go index 7652d27c3..7c0d091a8 100644 --- a/task/backend/executor/executor_test.go +++ b/task/backend/executor/executor_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io" "os" "strings" "sync" @@ -15,6 +16,7 @@ import ( "github.com/influxdata/influxdb/v2/kit/prom" "github.com/influxdata/influxdb/v2/kit/prom/promtest" tracetest "github.com/influxdata/influxdb/v2/kit/tracing/testing" + "github.com/influxdata/kapacitor/services/diagnostic" "github.com/influxdata/kapacitor/services/storage/storagetest" "github.com/influxdata/kapacitor/task/backend" "github.com/influxdata/kapacitor/task/backend/scheduler" @@ -27,6 +29,13 @@ import ( "go.uber.org/zap/zaptest" ) +var diagService *diagnostic.Service + +func init() { + diagService = diagnostic.NewService(diagnostic.NewConfig(), io.Discard, io.Discard) + diagService.Open() +} + func TestMain(m *testing.M) { var code int func() { @@ -50,7 +59,7 @@ func taskExecutorSystem(t *testing.T) tes { qs = newFakeQueryService() ) - taskStore := kv.New(storagetest.New()) + taskStore := kv.New(storagetest.New(t, diagService.NewStorageHandler())) require.NoError(t, taskStore.Open()) var ( tcs = &taskControlService{TaskControlService: taskStore} diff --git a/task/kv/task.go b/task/kv/task.go index 9df2439db..5da3825da 100644 --- a/task/kv/task.go +++ b/task/kv/task.go @@ -122,7 +122,7 @@ func kvToInfluxTask(k *kvTask) *taskmodel.Task { // FindTaskByID returns a single task func (s *Service) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel.Task, error) { var t *taskmodel.Task - err := s.kv.View(func(tx storage.ReadOnlyTx) error { + return t, s.kv.View(func(tx storage.ReadOnlyTx) error { task, err := s.findTaskByID(ctx, tx, id) if err != nil { return err @@ -130,11 +130,6 @@ func (s *Service) FindTaskByID(ctx context.Context, id platform.ID) (*taskmodel. t = task return nil }) - if err != nil { - return nil, err - } - - return t, nil } func IsNotFound(err error) bool { @@ -143,7 +138,7 @@ func IsNotFound(err error) bool { // findTaskByID is an internal method used to do any action with tasks internally // that do not require authorization. -func (s *Service) findTaskByID(ctx context.Context, tx storage.ReadOnlyTx, id platform.ID) (*taskmodel.Task, error) { +func (s *Service) findTaskByID(_ context.Context, tx storage.ReadOperator, id platform.ID) (*taskmodel.Task, error) { b := &wrappedReadTx{ tx: tx, prefix: taskPrefix, @@ -636,7 +631,7 @@ func (s *Service) FindRuns(ctx context.Context, filter taskmodel.RunFilter) ([]* return runs, len(runs), nil } -func (s *Service) findRuns(ctx context.Context, tx storage.ReadOnlyTx, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { +func (s *Service) findRuns(ctx context.Context, tx storage.ReadOperator, filter taskmodel.RunFilter) ([]*taskmodel.Run, int, error) { if filter.Limit == 0 { filter.Limit = taskmodel.TaskDefaultPageSize } @@ -710,7 +705,7 @@ func (s *Service) FindRunByID(ctx context.Context, taskID, runID platform.ID) (* return run, nil } -func (s *Service) findRunByID(ctx context.Context, tx storage.ReadOnlyTx, taskID, runID platform.ID) (*taskmodel.Run, error) { +func (s *Service) findRunByID(ctx context.Context, tx storage.ReadOperator, taskID, runID platform.ID) (*taskmodel.Run, error) { bucket := wrappedReadTx{ tx: tx, prefix: taskRunPrefix, @@ -963,7 +958,7 @@ func (s *Service) CurrentlyRunning(ctx context.Context, taskID platform.ID) ([]* return runs, nil } -func (s *Service) currentlyRunning(ctx context.Context, tx storage.ReadOnlyTx, taskID platform.ID) ([]*taskmodel.Run, error) { +func (s *Service) currentlyRunning(ctx context.Context, tx storage.ReadOperator, taskID platform.ID) ([]*taskmodel.Run, error) { bucket := wrappedReadTx{ tx: tx, prefix: taskRunPrefix, @@ -1006,7 +1001,7 @@ func (s *Service) ManualRuns(ctx context.Context, taskID platform.ID) ([]*taskmo return runs, nil } -func (s *Service) manualRuns(ctx context.Context, tx storage.ReadOnlyTx, taskID platform.ID) ([]*taskmodel.Run, error) { +func (s *Service) manualRuns(ctx context.Context, tx storage.ReadOperator, taskID platform.ID) ([]*taskmodel.Run, error) { b := wrappedReadTx{ tx: tx, prefix: taskRunPrefix, diff --git a/task/kv/task_test.go b/task/kv/task_test.go index a0a2fe341..c27afa191 100644 --- a/task/kv/task_test.go +++ b/task/kv/task_test.go @@ -2,11 +2,13 @@ package kv_test import ( "context" + "io" "testing" "time" "github.com/benbjohnson/clock" "github.com/google/go-cmp/cmp" + "github.com/influxdata/kapacitor/services/diagnostic" "github.com/influxdata/kapacitor/services/storage/storagetest" "github.com/influxdata/kapacitor/task/kv" "github.com/influxdata/kapacitor/task/options" @@ -16,11 +18,18 @@ import ( "github.com/stretchr/testify/require" ) +var diagService *diagnostic.Service + +func init() { + diagService = diagnostic.NewService(diagnostic.NewConfig(), io.Discard, io.Discard) + diagService.Open() +} + func TestKvTaskService(t *testing.T) { servicetest.TestTaskService( t, func(t *testing.T) (*servicetest.System, context.CancelFunc) { - service := kv.New(storagetest.New()) + service := kv.New(storagetest.New(t, diagService.NewStorageHandler())) service.Open() ctx, cancelFunc := context.WithCancel(context.Background()) @@ -55,7 +64,7 @@ func newService(t *testing.T, ctx context.Context, c clock.Clock) *testService { c = clock.New() } - service := kv.New(storagetest.New(), kv.WithClock(c)) + service := kv.New(storagetest.New(t, diagService.NewStorageHandler()), kv.WithClock(c)) service.Open() return &testService{ diff --git a/task/kv/wrapper.go b/task/kv/wrapper.go index 59aa2605c..8ddbcdcc4 100644 --- a/task/kv/wrapper.go +++ b/task/kv/wrapper.go @@ -30,7 +30,7 @@ func (t *wrappedTx) Delete(key string) error { // BoltTx wraps an underlying bolt.Tx type to implement the Tx interface. type wrappedReadTx struct { - tx storage.ReadOnlyTx + tx storage.ReadOperator prefix string } diff --git a/task/servicetest/servicetest.go b/task/servicetest/servicetest.go index 9fc16276e..679afd0b6 100644 --- a/task/servicetest/servicetest.go +++ b/task/servicetest/servicetest.go @@ -57,7 +57,7 @@ func TestTaskService(t *testing.T, fn BackendComponentFactory, testCategory ...s testTaskCRUD(t, sys) }) - t.Run("FindTasks paging", func(t *testing.T) { + t.Run("FindTasks_paging", func(t *testing.T) { testTaskFindTasksPaging(t, sys) }) diff --git a/task_master.go b/task_master.go index 36e4297c6..ccd2bf068 100644 --- a/task_master.go +++ b/task_master.go @@ -3,6 +3,7 @@ package kapacitor import ( "errors" "fmt" + "io" "log" "sync" "time" @@ -267,6 +268,8 @@ type TaskMaster struct { drained bool mu sync.RWMutex wg sync.WaitGroup + + TestCloser io.Closer } func (tm *TaskMaster) WritePointsPrivileged(ctx tsdb.WriteContext, database, retentionPolicy string, consistencyLevel imodels.ConsistencyLevel, points []imodels.Point) error { @@ -294,6 +297,9 @@ func NewTaskMaster(id string, info vars.Infoer, d Diagnostic) *TaskMaster { closed: true, TimingService: noOpTimingService{}, + + // Any cleanup/close function for test purposes. Not to be used in production + TestCloser: nil, } } @@ -332,6 +338,7 @@ func (tm *TaskMaster) New(id string) *TaskMaster { n.TeamsService = tm.TeamsService n.ServiceNowService = tm.ServiceNowService n.ZenossService = tm.ZenossService + n.TestCloser = tm.TestCloser return n } @@ -382,6 +389,9 @@ func (tm *TaskMaster) Close() error { _ = tm.stopTask(et.Task.ID) } tm.diag.TaskMasterClosed() + if tm.TestCloser != nil { + return tm.TestCloser.Close() + } return nil } diff --git a/test.sh b/test.sh index 568f8e066..4d2f5f225 100755 --- a/test.sh +++ b/test.sh @@ -36,6 +36,8 @@ NO_UNCOMMITTED=${NO_UNCOMMITTED-false} HOME_DIR=/root # GOPATH GOPATH=/go +# PROTO VERSION +PROTO_VERSION=3.18.3 no_uncomitted_arg="$no_uncommitted_arg" if [ ! $NO_UNCOMMITTED ] @@ -67,7 +69,7 @@ function run_test_docker { imagename="$imagename-$BUILD_NUM" echo "Building docker image $imagename" - docker build -f "$dockerfile" --build-arg -t "$imagename" . + docker build -f "$dockerfile" --build-arg PROTO_VERSION=$PROTO_VERSION -t "$imagename" . echo "Running test in docker $name with args $@"