From cec274dc83a7e75e5ddfb4c803ed9e621bdd8392 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Thu, 1 Jul 2021 04:08:08 +0530 Subject: [PATCH 01/29] loadConfig Signed-off-by: Namanl2001 --- pkg/store/config.go | 94 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 pkg/store/config.go diff --git a/pkg/store/config.go b/pkg/store/config.go new file mode 100644 index 0000000000..a7afd455b3 --- /dev/null +++ b/pkg/store/config.go @@ -0,0 +1,94 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package store + +import ( + "fmt" + "io/ioutil" + "path/filepath" + + "gopkg.in/yaml.v2" + + "github.com/prometheus/prometheus/discovery/file" +) + +// Config represents the configuration of a set of Store API endpoints. +type Config struct { + TlsConfig TlsConfiguration `yaml:"tls_config"` + EndPoints []string `yaml:"endpoints"` + EndPoints_sd []file.SDConfig `yaml:"endpoints_sd_files"` + Mode string `yaml:"mode"` +} + +// TlsConfiguration represents the TLS configuration for a set of Store API endpoints. +type TlsConfiguration struct { + // TLS Certificates to use to identify this client to the server. + Cert string `yaml:"cert_file"` + // TLS Key for the client's certificate. + Key string `yaml:"key_file"` + // TLS CA Certificates to use to verify gRPC servers. + CaCert string `yaml:"ca_file"` + // Server name to verify the hostname on the returned gRPC certificates. See https://tools.ietf.org/html/rfc4366#section-3.1 + ServerName string `yaml:"server_name"` +} + +func LoadConfig(yamlPath string, endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig) ([]Config, error) { + var endpointConfig []Config + + if len(yamlPath) > 0 { + filename, _ := filepath.Abs(yamlPath) + yamlFile, err := ioutil.ReadFile(filename) + if err != nil { + return []Config{}, fmt.Errorf("cannot read file at path %s", yamlPath) + } + + if err := yaml.UnmarshalStrict(yamlFile, &endpointConfig); err != nil { + return []Config{}, fmt.Errorf("yaml file not in proper format") + } + } + + // No dynamic endpoints in strict mode + for _, config := range endpointConfig { + if config.Mode == "strict" && len(config.EndPoints_sd) != 0 { + return []Config{}, fmt.Errorf("no sd-files allowed in strict mode") + } + } + + // Checking if some endpoints are inputted more than once + mp := map[string]bool{} + for _, config := range endpointConfig { + for _, ep := range config.EndPoints { + if mp[ep] { + return []Config{}, fmt.Errorf("%s endpoint provided more than once", ep) + } + mp[ep] = true + } + } + + // Adding --endpoint, --endpoint_sd_files info to []endpointConfig + cfg1 := Config{} + for _, addr := range endpointAddrs { + if mp[addr] { + return []Config{}, fmt.Errorf("%s endpoint provided more than once", addr) + } + mp[addr] = true + cfg1.EndPoints = append(cfg1.EndPoints, addr) + } + cfg1.EndPoints_sd = []file.SDConfig{*fileSDConfig} + endpointConfig = append(endpointConfig, cfg1) + + // Adding --store-strict endpoints + cfg2 := Config{} + for _, addr := range strictEndpointAddrs { + if mp[addr] { + return []Config{}, fmt.Errorf("%s endpoint provided more than once", addr) + } + mp[addr] = true + cfg2.EndPoints = append(cfg2.EndPoints, addr) + } + cfg2.Mode = "strict" + endpointConfig = append(endpointConfig, cfg2) + + return endpointConfig, nil +} From 0b352704a284c10d7530c8c5ded5c4b0f03e179a Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Thu, 1 Jul 2021 04:09:13 +0530 Subject: [PATCH 02/29] helperFunc Signed-off-by: Namanl2001 --- pkg/extgrpc/client.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index d6a6dc8b3d..0b1fa3cf91 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -15,10 +15,18 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/tls" "github.com/thanos-io/thanos/pkg/tracing" ) +func StoreClientGRPCOptsFromTlsConfig(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, skipVerify bool, tlsConfig *store.TlsConfiguration) ([]grpc.DialOption, error) { + if tlsConfig != nil { + return StoreClientGRPCOpts(logger, reg, tracer, true, skipVerify, tlsConfig.Cert, tlsConfig.Key, tlsConfig.CaCert, tlsConfig.ServerName) + } + return StoreClientGRPCOpts(logger, reg, tracer, false, skipVerify, "", "", "", "") +} + // StoreClientGRPCOpts creates gRPC dial options for connecting to a store client. func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, secure, skipVerify bool, cert, key, caCert, serverName string) ([]grpc.DialOption, error) { grpcMets := grpc_prometheus.NewClientMetrics() From a0561e73ffdb9450ad3a4d5e8af69e8fec1c3cef Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Thu, 1 Jul 2021 04:10:50 +0530 Subject: [PATCH 03/29] iterating endpoints Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 335 ++++++++++++++++++++++++++------------------ pkg/ui/query.go | 4 +- 2 files changed, 198 insertions(+), 141 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 373cd57913..3a14e888a8 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -33,6 +33,7 @@ import ( "github.com/thanos-io/thanos/pkg/discovery/cache" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/exemplars" + "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" "github.com/thanos-io/thanos/pkg/extgrpc" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/extprom" @@ -40,14 +41,17 @@ import ( "github.com/thanos-io/thanos/pkg/gate" "github.com/thanos-io/thanos/pkg/logging" "github.com/thanos-io/thanos/pkg/metadata" + "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/prober" "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/rules" + "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" httpserver "github.com/thanos-io/thanos/pkg/server/http" "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/targets" + "github.com/thanos-io/thanos/pkg/targets/targetspb" "github.com/thanos-io/thanos/pkg/tls" "github.com/thanos-io/thanos/pkg/ui" ) @@ -127,6 +131,9 @@ func registerQuery(app *extkingpin.App) { fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback."). Default("5m")) + fileEndpointConfig := cmd.Flag("endpoint.config", "YAML file that contains store API servers configuration."). + PlaceHolder("").String() + // TODO(bwplotka): Grab this from TTL at some point. dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions."). Default("30s")) @@ -210,13 +217,12 @@ func registerQuery(app *extkingpin.App) { return errors.Errorf("Address %s is duplicated for --target flag.", dup) } - var fileSD *file.Discovery + var fileSDConfig *file.SDConfig if len(*fileSDFiles) > 0 { - conf := &file.SDConfig{ + fileSDConfig = &file.SDConfig{ Files: *fileSDFiles, RefreshInterval: *fileSDInterval, } - fileSD = file.NewDiscovery(conf, logger) } if *webRoutePrefix == "" { @@ -275,7 +281,8 @@ func registerQuery(app *extkingpin.App) { *enableTargetPartialResponse, *enableMetricMetadataPartialResponse, *enableExemplarPartialResponse, - fileSD, + fileSDConfig, + *fileEndpointConfig, time.Duration(*dnsSDInterval), *dnsSDResolver, time.Duration(*unhealthyStoreTimeout), @@ -340,7 +347,8 @@ func runQuery( enableTargetPartialResponse bool, enableMetricMetadataPartialResponse bool, enableExemplarPartialResponse bool, - fileSD *file.Discovery, + fileSDConfig *file.SDConfig, + endpointConfigYAML string, dnsSDInterval time.Duration, dnsSDResolver string, unhealthyStoreTimeout time.Duration, @@ -358,56 +366,72 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, secure, skipVerify, cert, key, caCert, serverName) + var endpointsConfig []store.Config + var err error + if len(endpointConfigYAML) > 0 { + endpointsConfig, err = store.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig) + } else { + endpointsConfig, err = store.LoadConfig("", storeAddrs, strictStores, fileSDConfig) + } if err != nil { - return errors.Wrap(err, "building gRPC client") + return errors.Wrap(err, "loading store config") } - fileSDCache := cache.New() - dnsStoreProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_store_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) + var storeSets []*query.EndpointSet + for _, config := range endpointsConfig { + dialOpts, err := extgrpc.StoreClientGRPCOptsFromTlsConfig(logger, reg, tracer, skipVerify, &config.TlsConfig) + if err != nil { + return errors.Wrap(err, "building gRPC client") + } + + fileSDCache := cache.New() + dnsStoreProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_store_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) - for _, store := range strictStores { - if dns.IsDynamicNode(store) { - return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", store) + if config.Mode == "strict" { + for _, store := range config.EndPoints { + if dns.IsDynamicNode(store) { + return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", store) + } + } } - } - dnsRuleProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) + dnsRuleProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) - dnsTargetProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_target_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) + dnsTargetProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_target_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) - dnsMetadataProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_metadata_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) + dnsMetadataProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_metadata_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) - dnsExemplarProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_exemplar_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) + dnsExemplarProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_exemplar_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) - var ( - endpoints = query.NewEndpointSet( + endpoints := query.NewEndpointSet( logger, reg, func() (specs []query.EndpointSpec) { // Add strict & static nodes. - for _, addr := range strictStores { - specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) + if config.Mode == "strict" { + for _, addr := range strictStores { + specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) + } } for _, dnsProvider := range []*dns.Provider{dnsStoreProvider, dnsRuleProvider, dnsExemplarProvider, dnsMetadataProvider, dnsTargetProvider} { @@ -425,11 +449,129 @@ func runQuery( dialOpts, unhealthyStoreTimeout, ) - proxy = store.NewProxyStore(logger, reg, endpoints.GetStoreClients, component.Query, selectorLset, storeResponseTimeout) - rulesProxy = rules.NewProxy(logger, endpoints.GetRulesClients) - targetsProxy = targets.NewProxy(logger, endpoints.GetTargetsClients) - metadataProxy = metadata.NewProxy(logger, endpoints.GetMetricMetadataClients) - exemplarsProxy = exemplars.NewProxy(logger, endpoints.GetExemplarsStores, selectorLset) + storeSets = append(storeSets, endpoints) + + // Periodically update the store set with the addresses we see in our cluster. + { + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + return runutil.Repeat(5*time.Second, ctx.Done(), func() error { + endpoints.Update(ctx) + return nil + }) + }, func(error) { + cancel() + endpoints.Close() + }) + } + // Run File Service Discovery and update the store set when the files are modified. + if len(config.EndPoints_sd) > 0 { + fileSDUpdates := make(chan []*targetgroup.Group) + + for _, fsdConfig := range config.EndPoints_sd { + ctxRun, cancelRun := context.WithCancel(context.Background()) + fileSD := file.NewDiscovery(&fsdConfig, logger) + g.Add(func() error { + fileSD.Run(ctxRun, fileSDUpdates) + return nil + }, func(error) { + cancelRun() + }) + } + + ctxUpdate, cancelUpdate := context.WithCancel(context.Background()) + staticAddresses := config.EndPoints + g.Add(func() error { + for { + select { + case update := <-fileSDUpdates: + // Discoverers sometimes send nil updates so need to check for it to avoid panics. + if update == nil { + continue + } + fileSDCache.Update(update) + endpoints.Update(ctxUpdate) + + if err := dnsStoreProvider.Resolve(ctxUpdate, append(fileSDCache.Addresses(), staticAddresses...)); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) + } + + // Rules apis do not support file service discovery as of now. + case <-ctxUpdate.Done(): + return nil + } + } + }, func(error) { + cancelUpdate() + close(fileSDUpdates) + }) + } + // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. + { + ctx, cancel := context.WithCancel(context.Background()) + staticAddresses := config.EndPoints + g.Add(func() error { + return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error { + resolveCtx, resolveCancel := context.WithTimeout(ctx, dnsSDInterval) + defer resolveCancel() + if err := dnsStoreProvider.Resolve(resolveCtx, append(fileSDCache.Addresses(), staticAddresses...)); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) + } + if err := dnsRuleProvider.Resolve(resolveCtx, ruleAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for rulesAPIs", "err", err) + } + if err := dnsTargetProvider.Resolve(ctx, targetAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for targetsAPIs", "err", err) + } + if err := dnsMetadataProvider.Resolve(resolveCtx, metadataAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for metadataAPIs", "err", err) + } + if err := dnsExemplarProvider.Resolve(resolveCtx, exemplarAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for exemplarsAPI", "err", err) + } + return nil + }) + }, func(error) { + cancel() + }) + } + } + var ( + get []store.Client + getRuleClient []rulespb.RulesClient + getTargetClient []targetspb.TargetsClient + getMetadataClient []metadatapb.MetadataClient + getExemplarsStore []*exemplarspb.ExemplarStore + ) + + for _, ss := range storeSets { + get = append(get, ss.Get()...) + getRuleClient = append(getRuleClient, ss.GetRulesClients()...) + getTargetClient = append(getTargetClient, ss.GetTargetsClients()...) + getMetadataClient = append(getMetadataClient, ss.GetMetadataClients()...) + getExemplarsStore = append(getExemplarsStore, ss.GetExemplarsStores()...) + } + var ( + allClients = func() []store.Client { + return get + } + ruleClients = func() []rulespb.RulesClient { + return getRuleClient + } + targetClients = func() []targetspb.TargetsClient { + return getTargetClient + } + metadataClients = func() []metadatapb.MetadataClient { + return getMetadataClient + } + exemplarStore = func() []*exemplarspb.ExemplarStore { + return getExemplarsStore + } + proxy = store.NewProxyStore(logger, reg, allClients, component.Query, selectorLset, storeResponseTimeout) + rulesProxy = rules.NewProxy(logger, ruleClients) + targetsProxy = targets.NewProxy(logger, targetClients) + metadataProxy = metadata.NewProxy(logger, metadataClients) + exemplarsProxy = exemplars.NewProxy(logger, exemplarStore, selectorLset) queryableCreator = query.NewQueryableCreator( logger, extprom.WrapRegistererWithPrefix("thanos_query_", reg), @@ -448,98 +590,13 @@ func runQuery( return defaultEvaluationInterval.Milliseconds() }, } - ) - - // Periodically update the store set with the addresses we see in our cluster. - { - ctx, cancel := context.WithCancel(context.Background()) - g.Add(func() error { - return runutil.Repeat(5*time.Second, ctx.Done(), func() error { - endpoints.Update(ctx) - return nil - }) - }, func(error) { - cancel() - endpoints.Close() - }) - } - // Run File Service Discovery and update the store set when the files are modified. - if fileSD != nil { - var fileSDUpdates chan []*targetgroup.Group - ctxRun, cancelRun := context.WithCancel(context.Background()) - - fileSDUpdates = make(chan []*targetgroup.Group) - - g.Add(func() error { - fileSD.Run(ctxRun, fileSDUpdates) - return nil - }, func(error) { - cancelRun() - }) - - engineOpts.EnableAtModifier = enableAtModifier - engineOpts.EnableNegativeOffset = enableNegativeOffset - - ctxUpdate, cancelUpdate := context.WithCancel(context.Background()) - g.Add(func() error { - for { - select { - case update := <-fileSDUpdates: - // Discoverers sometimes send nil updates so need to check for it to avoid panics. - if update == nil { - continue - } - fileSDCache.Update(update) - endpoints.Update(ctxUpdate) - - if err := dnsStoreProvider.Resolve(ctxUpdate, append(fileSDCache.Addresses(), storeAddrs...)); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) - } - - // Rules apis do not support file service discovery as of now. - case <-ctxUpdate.Done(): - return nil - } - } - }, func(error) { - cancelUpdate() - }) - } - // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. - { - ctx, cancel := context.WithCancel(context.Background()) - g.Add(func() error { - return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error { - resolveCtx, resolveCancel := context.WithTimeout(ctx, dnsSDInterval) - defer resolveCancel() - if err := dnsStoreProvider.Resolve(resolveCtx, append(fileSDCache.Addresses(), storeAddrs...)); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) - } - if err := dnsRuleProvider.Resolve(resolveCtx, ruleAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for rulesAPIs", "err", err) - } - if err := dnsTargetProvider.Resolve(ctx, targetAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for targetsAPIs", "err", err) - } - if err := dnsMetadataProvider.Resolve(resolveCtx, metadataAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for metadataAPIs", "err", err) - } - if err := dnsExemplarProvider.Resolve(resolveCtx, exemplarAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for exemplarsAPI", "err", err) - } - return nil - }) - }, func(error) { - cancel() - }) - } - - grpcProbe := prober.NewGRPC() - httpProbe := prober.NewHTTP() - statusProber := prober.Combine( - httpProbe, - grpcProbe, - prober.NewInstrumentation(comp, logger, extprom.WrapRegistererWithPrefix("thanos_", reg)), + grpcProbe = prober.NewGRPC() + httpProbe = prober.NewHTTP() + statusProber = prober.Combine( + httpProbe, + grpcProbe, + prober.NewInstrumentation(comp, logger, extprom.WrapRegistererWithPrefix("thanos_", reg)), + ) ) // Start query API + UI HTTP server. @@ -565,7 +622,7 @@ func runQuery( ins := extpromhttp.NewInstrumentationMiddleware(reg, nil) // TODO(bplotka in PR #513 review): pass all flags, not only the flags needed by prefix rewriting. - ui.NewQueryUI(logger, endpoints, webExternalPrefix, webPrefixHeaderName).Register(router, ins) + ui.NewQueryUI(logger, storeSets, webExternalPrefix, webPrefixHeaderName).Register(router, ins) api := v1.NewQueryAPI( logger, diff --git a/pkg/ui/query.go b/pkg/ui/query.go index 1778dc5557..3aea68a690 100644 --- a/pkg/ui/query.go +++ b/pkg/ui/query.go @@ -22,7 +22,7 @@ import ( type Query struct { *BaseUI - endpointSet *query.EndpointSet + endpointSet []*query.EndpointSet externalPrefix, prefixHeader string @@ -32,7 +32,7 @@ type Query struct { now func() model.Time } -func NewQueryUI(logger log.Logger, endpointSet *query.EndpointSet, externalPrefix, prefixHeader string) *Query { +func NewQueryUI(logger log.Logger, endpointSet []*query.EndpointSet, externalPrefix, prefixHeader string) *Query { tmplVariables := map[string]string{ "Component": component.Query.String(), } From aba6f103c3ea2613f5fa8e6f7489bbbca63e60f1 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Tue, 6 Jul 2021 23:41:46 +0530 Subject: [PATCH 04/29] addressed comments for config.go Signed-off-by: Namanl2001 --- pkg/extgrpc/client.go | 14 ++---- pkg/store/config.go | 112 +++++++++++++++++++++++------------------- 2 files changed, 66 insertions(+), 60 deletions(-) diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index 0b1fa3cf91..bb0acdd8e7 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -20,15 +20,8 @@ import ( "github.com/thanos-io/thanos/pkg/tracing" ) -func StoreClientGRPCOptsFromTlsConfig(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, skipVerify bool, tlsConfig *store.TlsConfiguration) ([]grpc.DialOption, error) { - if tlsConfig != nil { - return StoreClientGRPCOpts(logger, reg, tracer, true, skipVerify, tlsConfig.Cert, tlsConfig.Key, tlsConfig.CaCert, tlsConfig.ServerName) - } - return StoreClientGRPCOpts(logger, reg, tracer, false, skipVerify, "", "", "", "") -} - // StoreClientGRPCOpts creates gRPC dial options for connecting to a store client. -func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, secure, skipVerify bool, cert, key, caCert, serverName string) ([]grpc.DialOption, error) { +func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, secure, skipVerify bool, tlsConfig store.TLSConfiguration) ([]grpc.DialOption, error) { grpcMets := grpc_prometheus.NewClientMetrics() grpcMets.EnableClientHandlingTimeHistogram( grpc_prometheus.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120, 240, 360, 720}), @@ -56,13 +49,14 @@ func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer ope reg.MustRegister(grpcMets) } - if !secure { + // If secure is false or no TLS config is supplied. + if !secure || (tlsConfig == store.TLSConfiguration{}) { return append(dialOpts, grpc.WithInsecure()), nil } level.Info(logger).Log("msg", "enabling client to server TLS") - tlsCfg, err := tls.NewClientConfig(logger, cert, key, caCert, serverName, skipVerify) + tlsCfg, err := tls.NewClientConfig(logger, tlsConfig.CertFile, tlsConfig.KeyFile, tlsConfig.CaCertFile, tlsConfig.ServerName, skipVerify) if err != nil { return nil, err } diff --git a/pkg/store/config.go b/pkg/store/config.go index a7afd455b3..1bc02fa59b 100644 --- a/pkg/store/config.go +++ b/pkg/store/config.go @@ -4,91 +4,103 @@ package store import ( - "fmt" - "io/ioutil" - "path/filepath" - "gopkg.in/yaml.v2" + "github.com/pkg/errors" "github.com/prometheus/prometheus/discovery/file" ) // Config represents the configuration of a set of Store API endpoints. type Config struct { - TlsConfig TlsConfiguration `yaml:"tls_config"` - EndPoints []string `yaml:"endpoints"` - EndPoints_sd []file.SDConfig `yaml:"endpoints_sd_files"` - Mode string `yaml:"mode"` + TLSConfig TLSConfiguration `yaml:"tls_config"` + Endpoints []string `yaml:"endpoints"` + EndpointsSD []file.SDConfig `yaml:"endpoints_sd_files"` + Mode EndpointMode `yaml:"mode"` } // TlsConfiguration represents the TLS configuration for a set of Store API endpoints. -type TlsConfiguration struct { - // TLS Certificates to use to identify this client to the server. - Cert string `yaml:"cert_file"` - // TLS Key for the client's certificate. - Key string `yaml:"key_file"` - // TLS CA Certificates to use to verify gRPC servers. - CaCert string `yaml:"ca_file"` +type TLSConfiguration struct { + // TLS Certificates file to use to identify this client to the server. + CertFile string `yaml:"cert_file"` + // TLS Key file for the client's certificate. + KeyFile string `yaml:"key_file"` + // TLS CA Certificates file to use to verify gRPC servers. + CaCertFile string `yaml:"ca_file"` // Server name to verify the hostname on the returned gRPC certificates. See https://tools.ietf.org/html/rfc4366#section-3.1 ServerName string `yaml:"server_name"` } -func LoadConfig(yamlPath string, endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig) ([]Config, error) { +type EndpointMode string + +const ( + DefaultEndpointMode EndpointMode = "" + StrictEndpointMode EndpointMode = "strict" +) + +// LoadConfig loads and returns list of per-endpoint TLS config. +func LoadConfig(confYAML []byte, endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { var endpointConfig []Config - if len(yamlPath) > 0 { - filename, _ := filepath.Abs(yamlPath) - yamlFile, err := ioutil.ReadFile(filename) - if err != nil { - return []Config{}, fmt.Errorf("cannot read file at path %s", yamlPath) + if len(confYAML) > 0 { + if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { + return nil, err } + } - if err := yaml.UnmarshalStrict(yamlFile, &endpointConfig); err != nil { - return []Config{}, fmt.Errorf("yaml file not in proper format") + // Checking if no proper mode is provided. + for _, config := range endpointConfig { + if config.Mode != StrictEndpointMode && config.Mode != DefaultEndpointMode { + return nil, errors.Errorf("%s is not a proper mode", config.Mode) } } - // No dynamic endpoints in strict mode + // No dynamic endpoints in strict mode. for _, config := range endpointConfig { - if config.Mode == "strict" && len(config.EndPoints_sd) != 0 { - return []Config{}, fmt.Errorf("no sd-files allowed in strict mode") + if config.Mode == StrictEndpointMode && len(config.EndpointsSD) != 0 { + return nil, errors.Errorf("no sd-files allowed in strict mode") } } - // Checking if some endpoints are inputted more than once - mp := map[string]bool{} + // Checking if some endpoints are inputted more than once. + allEndpoints := make(map[string]struct{}) for _, config := range endpointConfig { - for _, ep := range config.EndPoints { - if mp[ep] { - return []Config{}, fmt.Errorf("%s endpoint provided more than once", ep) + for _, addr := range config.Endpoints { + if _, exists := allEndpoints[addr]; exists { + return nil, errors.Errorf("%s endpoint provided more than once", addr) } - mp[ep] = true + allEndpoints[addr] = struct{}{} } } - // Adding --endpoint, --endpoint_sd_files info to []endpointConfig - cfg1 := Config{} - for _, addr := range endpointAddrs { - if mp[addr] { - return []Config{}, fmt.Errorf("%s endpoint provided more than once", addr) + // Adding --endpoint, --endpoint.sd-files info to []endpointConfig, if provided. + if len(endpointAddrs) > 0 || fileSDConfig != nil { + cfg1 := Config{} + cfg1.TLSConfig = TLSConfig + for _, addr := range endpointAddrs { + if _, exists := allEndpoints[addr]; exists { + return []Config{}, errors.Errorf("%s endpoint provided more than once", addr) + } + allEndpoints[addr] = struct{}{} + cfg1.Endpoints = append(cfg1.Endpoints, addr) } - mp[addr] = true - cfg1.EndPoints = append(cfg1.EndPoints, addr) + cfg1.EndpointsSD = []file.SDConfig{*fileSDConfig} + endpointConfig = append(endpointConfig, cfg1) } - cfg1.EndPoints_sd = []file.SDConfig{*fileSDConfig} - endpointConfig = append(endpointConfig, cfg1) - // Adding --store-strict endpoints - cfg2 := Config{} - for _, addr := range strictEndpointAddrs { - if mp[addr] { - return []Config{}, fmt.Errorf("%s endpoint provided more than once", addr) + // Adding --endpoint-strict endpoints if provided. + if len(strictEndpointAddrs) > 0 { + cfg2 := Config{} + cfg2.TLSConfig = TLSConfig + for _, addr := range strictEndpointAddrs { + if _, exists := allEndpoints[addr]; exists { + return []Config{}, errors.Errorf("%s endpoint provided more than once", addr) + } + allEndpoints[addr] = struct{}{} + cfg2.Endpoints = append(cfg2.Endpoints, addr) } - mp[addr] = true - cfg2.EndPoints = append(cfg2.EndPoints, addr) + cfg2.Mode = StrictEndpointMode + endpointConfig = append(endpointConfig, cfg2) } - cfg2.Mode = "strict" - endpointConfig = append(endpointConfig, cfg2) return endpointConfig, nil } From 91718f5a856c547330241570e37deed0a0fc31cb Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Tue, 6 Jul 2021 23:42:34 +0530 Subject: [PATCH 05/29] addressed comments for querier Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 67 ++++++++++++++++++++++++------------------- cmd/thanos/receive.go | 13 ++++++--- pkg/api/query/v1.go | 25 ++++++++++++++++ pkg/ui/query.go | 20 +++++++++++++ 4 files changed, 91 insertions(+), 34 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 3a14e888a8..b723fc5745 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -26,6 +26,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" + extflag "github.com/efficientgo/tools/extkingpin" grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" v1 "github.com/thanos-io/thanos/pkg/api/query" "github.com/thanos-io/thanos/pkg/compact/downsample" @@ -131,8 +132,7 @@ func registerQuery(app *extkingpin.App) { fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback."). Default("5m")) - fileEndpointConfig := cmd.Flag("endpoint.config", "YAML file that contains store API servers configuration."). - PlaceHolder("").String() + endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains store API servers configuration.", extflag.WithEnvSubstitution()) // TODO(bwplotka): Grab this from TTL at some point. dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions."). @@ -217,6 +217,11 @@ func registerQuery(app *extkingpin.App) { return errors.Errorf("Address %s is duplicated for --target flag.", dup) } + endpointConfigYAML, err := endpointConfig.Content() + if err != nil { + return err + } + var fileSDConfig *file.SDConfig if len(*fileSDFiles) > 0 { fileSDConfig = &file.SDConfig{ @@ -282,7 +287,7 @@ func registerQuery(app *extkingpin.App) { *enableMetricMetadataPartialResponse, *enableExemplarPartialResponse, fileSDConfig, - *fileEndpointConfig, + endpointConfigYAML, time.Duration(*dnsSDInterval), *dnsSDResolver, time.Duration(*unhealthyStoreTimeout), @@ -348,7 +353,7 @@ func runQuery( enableMetricMetadataPartialResponse bool, enableExemplarPartialResponse bool, fileSDConfig *file.SDConfig, - endpointConfigYAML string, + endpointConfigYAML []byte, dnsSDInterval time.Duration, dnsSDResolver string, unhealthyStoreTimeout time.Duration, @@ -366,20 +371,23 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - var endpointsConfig []store.Config - var err error - if len(endpointConfigYAML) > 0 { - endpointsConfig, err = store.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig) - } else { - endpointsConfig, err = store.LoadConfig("", storeAddrs, strictStores, fileSDConfig) + // TLSConfig for endpoints supplied in --endpoint, --endpoint.sd-files and --endpoint-strict. + var TLSConfig store.TLSConfiguration + if secure { + TLSConfig.CertFile = cert + TLSConfig.KeyFile = key + TLSConfig.CaCertFile = caCert + TLSConfig.ServerName = serverName } + + endpointsConfig, err := store.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig, TLSConfig) if err != nil { return errors.Wrap(err, "loading store config") } var storeSets []*query.EndpointSet for _, config := range endpointsConfig { - dialOpts, err := extgrpc.StoreClientGRPCOptsFromTlsConfig(logger, reg, tracer, skipVerify, &config.TlsConfig) + dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, secure, skipVerify, config.TLSConfig) if err != nil { return errors.Wrap(err, "building gRPC client") } @@ -391,14 +399,6 @@ func runQuery( dns.ResolverType(dnsSDResolver), ) - if config.Mode == "strict" { - for _, store := range config.EndPoints { - if dns.IsDynamicNode(store) { - return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", store) - } - } - } - dnsRuleProvider := dns.NewProvider( logger, extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), @@ -423,16 +423,23 @@ func runQuery( dns.ResolverType(dnsSDResolver), ) + var spec []query.EndpointSpec + // Add strict & static nodes. + if config.Mode == store.StrictEndpointMode { + for _, addr := range config.Endpoints { + if dns.IsDynamicNode(addr) { + return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", addr) + } + spec = append(spec, query.NewGRPCEndpointSpec(addr, true)) + } + } + endpoints := query.NewEndpointSet( logger, reg, func() (specs []query.EndpointSpec) { - // Add strict & static nodes. - if config.Mode == "strict" { - for _, addr := range strictStores { - specs = append(specs, query.NewGRPCEndpointSpec(addr, true)) - } - } + + specs = spec for _, dnsProvider := range []*dns.Provider{dnsStoreProvider, dnsRuleProvider, dnsExemplarProvider, dnsMetadataProvider, dnsTargetProvider} { var tmpSpecs []query.EndpointSpec @@ -465,12 +472,12 @@ func runQuery( }) } // Run File Service Discovery and update the store set when the files are modified. - if len(config.EndPoints_sd) > 0 { + if len(config.EndpointsSD) > 0 { fileSDUpdates := make(chan []*targetgroup.Group) - for _, fsdConfig := range config.EndPoints_sd { + for _, fSDConfig := range config.EndpointsSD { ctxRun, cancelRun := context.WithCancel(context.Background()) - fileSD := file.NewDiscovery(&fsdConfig, logger) + fileSD := file.NewDiscovery(&fSDConfig, logger) g.Add(func() error { fileSD.Run(ctxRun, fileSDUpdates) return nil @@ -480,7 +487,7 @@ func runQuery( } ctxUpdate, cancelUpdate := context.WithCancel(context.Background()) - staticAddresses := config.EndPoints + staticAddresses := config.Endpoints g.Add(func() error { for { select { @@ -509,7 +516,7 @@ func runQuery( // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. { ctx, cancel := context.WithCancel(context.Background()) - staticAddresses := config.EndPoints + staticAddresses := config.Endpoints g.Add(func() error { return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error { resolveCtx, resolveCancel := context.WithTimeout(ctx, dnsSDInterval) diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index ed0e8794e7..9d3bb4d43e 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -120,16 +120,21 @@ func runReceive( return err } + var TLSConfig store.TLSConfiguration + TLSConfig.CertFile = conf.rwClientCert + TLSConfig.KeyFile = conf.rwClientKey + TLSConfig.CaCertFile = conf.rwClientServerCA + TLSConfig.ServerName = conf.rwClientServerName + dialOpts, err := extgrpc.StoreClientGRPCOpts( logger, reg, tracer, *conf.grpcCert != "", *conf.grpcClientCA == "", - conf.rwClientCert, - conf.rwClientKey, - conf.rwClientServerCA, - conf.rwClientServerName, + conf.rwServerCert != "", + conf.rwServerClientCA == "", + TLSConfig, ) if err != nil { return err diff --git a/pkg/api/query/v1.go b/pkg/api/query/v1.go index fd9ee99b68..34ace22a84 100644 --- a/pkg/api/query/v1.go +++ b/pkg/api/query/v1.go @@ -93,8 +93,17 @@ type QueryAPI struct { enableExemplarPartialResponse bool disableCORS bool +<<<<<<< HEAD replicaLabels []string endpointStatus func() []query.EndpointStatus +======= + replicaLabels []string +<<<<<<< HEAD + endpointSet *query.EndpointSet +======= + storeSets []*query.StoreSet +>>>>>>> addressed comments for querier +>>>>>>> addressed comments for querier defaultRangeQueryStep time.Duration defaultInstantQueryMaxSourceResolution time.Duration @@ -106,7 +115,15 @@ type QueryAPI struct { // NewQueryAPI returns an initialized QueryAPI type. func NewQueryAPI( logger log.Logger, +<<<<<<< HEAD endpointStatus func() []query.EndpointStatus, +======= +<<<<<<< HEAD + endpointSet *query.EndpointSet, +======= + storeSets []*query.StoreSet, +>>>>>>> addressed comments for querier +>>>>>>> addressed comments for querier qe func(int64) *promql.Engine, c query.QueryableCreator, ruleGroups rules.UnaryClient, @@ -146,7 +163,15 @@ func NewQueryAPI( enableMetricMetadataPartialResponse: enableMetricMetadataPartialResponse, enableExemplarPartialResponse: enableExemplarPartialResponse, replicaLabels: replicaLabels, +<<<<<<< HEAD endpointStatus: endpointStatus, +======= +<<<<<<< HEAD + endpointSet: endpointSet, +======= + storeSets: storeSets, +>>>>>>> addressed comments for querier +>>>>>>> addressed comments for querier defaultRangeQueryStep: defaultRangeQueryStep, defaultInstantQueryMaxSourceResolution: defaultInstantQueryMaxSourceResolution, defaultMetadataTimeRange: defaultMetadataTimeRange, diff --git a/pkg/ui/query.go b/pkg/ui/query.go index 3aea68a690..577ff34cb2 100644 --- a/pkg/ui/query.go +++ b/pkg/ui/query.go @@ -22,7 +22,11 @@ import ( type Query struct { *BaseUI +<<<<<<< HEAD endpointSet []*query.EndpointSet +======= + storeSets []*query.StoreSet +>>>>>>> addressed comments for querier externalPrefix, prefixHeader string @@ -32,7 +36,11 @@ type Query struct { now func() model.Time } +<<<<<<< HEAD func NewQueryUI(logger log.Logger, endpointSet []*query.EndpointSet, externalPrefix, prefixHeader string) *Query { +======= +func NewQueryUI(logger log.Logger, storeSets []*query.StoreSet, externalPrefix, prefixHeader string) *Query { +>>>>>>> addressed comments for querier tmplVariables := map[string]string{ "Component": component.Query.String(), } @@ -43,7 +51,11 @@ func NewQueryUI(logger log.Logger, endpointSet []*query.EndpointSet, externalPre return &Query{ BaseUI: NewBaseUI(logger, "query_menu.html", tmplFuncs, tmplVariables, externalPrefix, prefixHeader, component.Query), +<<<<<<< HEAD endpointSet: endpointSet, +======= + storeSets: storeSets, +>>>>>>> addressed comments for querier externalPrefix: externalPrefix, prefixHeader: prefixHeader, cwd: runtimeInfo().CWD, @@ -111,9 +123,17 @@ func (q *Query) status(w http.ResponseWriter, r *http.Request) { func (q *Query) stores(w http.ResponseWriter, r *http.Request) { prefix := GetWebPrefix(q.logger, q.externalPrefix, q.prefixHeader, r) +<<<<<<< HEAD statuses := make(map[component.Component][]query.EndpointStatus) for _, status := range q.endpointSet.GetEndpointStatus() { statuses[status.ComponentType] = append(statuses[status.ComponentType], status) +======= + statuses := make(map[component.StoreAPI][]query.StoreStatus) + for _, storesSet := range q.storeSets { + for _, status := range storesSet.GetStoreStatus() { + statuses[status.StoreType] = append(statuses[status.StoreType], status) + } +>>>>>>> addressed comments for querier } sources := make([]component.Component, 0, len(statuses)) From 3b07fe4cf0056569650d3091759e6c5dcf31f8ea Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Thu, 8 Jul 2021 20:12:22 +0530 Subject: [PATCH 06/29] use either old or new config option Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 124 ++++++++++++++++++++++++++---------------- cmd/thanos/receive.go | 2 - pkg/store/config.go | 65 ++++++++++------------ 3 files changed, 104 insertions(+), 87 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index b723fc5745..8770abb5bc 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -132,7 +132,7 @@ func registerQuery(app *extkingpin.App) { fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback."). Default("5m")) - endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains store API servers configuration.", extflag.WithEnvSubstitution()) + endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains store API servers configuration. Either use this option or seperate endpoint options (endpoint, endpoint.sd-files, endpoint.srict).", extflag.WithEnvSubstitution()) // TODO(bwplotka): Grab this from TTL at some point. dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions."). @@ -222,6 +222,10 @@ func registerQuery(app *extkingpin.App) { return err } + if (len(*fileSDFiles) != 0 || len(*stores) != 0) && len(endpointConfigYAML) != 0 { + return errors.Errorf("--sore/--store.sd-files and --endpoint.config parameters cannot be defined at the same time") + } + var fileSDConfig *file.SDConfig if len(*fileSDFiles) > 0 { fileSDConfig = &file.SDConfig{ @@ -371,7 +375,7 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - // TLSConfig for endpoints supplied in --endpoint, --endpoint.sd-files and --endpoint-strict. + // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. var TLSConfig store.TLSConfiguration if secure { TLSConfig.CertFile = cert @@ -380,11 +384,51 @@ func runQuery( TLSConfig.ServerName = serverName } - endpointsConfig, err := store.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig, TLSConfig) - if err != nil { - return errors.Wrap(err, "loading store config") + var endpointsConfig []store.Config + var err error + if len(endpointConfigYAML) > 0 { + endpointsConfig, err = store.LoadConfig(endpointConfigYAML) + if err != nil { + return errors.Wrap(err, "loading endpoint config") + } + } else { + endpointsConfig, err = store.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) + if err != nil { + return errors.Wrap(err, "initialising endpoint config from individual flags") + } } + fileSDCache := cache.New() + dnsStoreProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_store_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) + + dnsRuleProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) + + dnsTargetProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_target_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) + + dnsMetadataProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_metadata_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) + + dnsExemplarProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWithPrefix("thanos_query_exemplar_apis_", reg), + dns.ResolverType(dnsSDResolver), + ) + var storeSets []*query.EndpointSet for _, config := range endpointsConfig { dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, secure, skipVerify, config.TLSConfig) @@ -392,37 +436,6 @@ func runQuery( return errors.Wrap(err, "building gRPC client") } - fileSDCache := cache.New() - dnsStoreProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_store_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - dnsRuleProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - dnsTargetProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_target_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - dnsMetadataProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_metadata_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - dnsExemplarProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_exemplar_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - var spec []query.EndpointSpec // Add strict & static nodes. if config.Mode == store.StrictEndpointMode { @@ -524,18 +537,6 @@ func runQuery( if err := dnsStoreProvider.Resolve(resolveCtx, append(fileSDCache.Addresses(), staticAddresses...)); err != nil { level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) } - if err := dnsRuleProvider.Resolve(resolveCtx, ruleAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for rulesAPIs", "err", err) - } - if err := dnsTargetProvider.Resolve(ctx, targetAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for targetsAPIs", "err", err) - } - if err := dnsMetadataProvider.Resolve(resolveCtx, metadataAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for metadataAPIs", "err", err) - } - if err := dnsExemplarProvider.Resolve(resolveCtx, exemplarAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for exemplarsAPI", "err", err) - } return nil }) }, func(error) { @@ -543,6 +544,33 @@ func runQuery( }) } } + + // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. + { + ctx, cancel := context.WithCancel(context.Background()) + g.Add(func() error { + return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error { + resolveCtx, resolveCancel := context.WithTimeout(ctx, dnsSDInterval) + defer resolveCancel() + if err := dnsRuleProvider.Resolve(resolveCtx, ruleAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for rulesAPIs", "err", err) + } + if err := dnsTargetProvider.Resolve(ctx, targetAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for targetsAPIs", "err", err) + } + if err := dnsMetadataProvider.Resolve(resolveCtx, metadataAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for metadataAPIs", "err", err) + } + if err := dnsExemplarProvider.Resolve(resolveCtx, exemplarAddrs); err != nil { + level.Error(logger).Log("msg", "failed to resolve addresses for exemplarsAPI", "err", err) + } + return nil + }) + }, func(error) { + cancel() + }) + } + var ( get []store.Client getRuleClient []rulespb.RulesClient diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 9d3bb4d43e..37d565b74b 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -132,8 +132,6 @@ func runReceive( tracer, *conf.grpcCert != "", *conf.grpcClientCA == "", - conf.rwServerCert != "", - conf.rwServerClientCA == "", TLSConfig, ) if err != nil { diff --git a/pkg/store/config.go b/pkg/store/config.go index 1bc02fa59b..79b595da62 100644 --- a/pkg/store/config.go +++ b/pkg/store/config.go @@ -37,20 +37,41 @@ const ( StrictEndpointMode EndpointMode = "strict" ) +func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { + var endpointConfig []Config + + // Adding --endpoint, --endpoint.sd-files info to []endpointConfig, if provided. + if len(endpointAddrs) > 0 || fileSDConfig != nil { + cfg1 := Config{} + cfg1.TLSConfig = TLSConfig + cfg1.Endpoints = strictEndpointAddrs + cfg1.EndpointsSD = []file.SDConfig{*fileSDConfig} + endpointConfig = append(endpointConfig, cfg1) + } + + // Adding --endpoint-strict endpoints if provided. + if len(strictEndpointAddrs) > 0 { + cfg2 := Config{} + cfg2.TLSConfig = TLSConfig + cfg2.Endpoints = strictEndpointAddrs + cfg2.Mode = StrictEndpointMode + endpointConfig = append(endpointConfig, cfg2) + } + return endpointConfig, nil +} + // LoadConfig loads and returns list of per-endpoint TLS config. -func LoadConfig(confYAML []byte, endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { +func LoadConfig(confYAML []byte) ([]Config, error) { var endpointConfig []Config - if len(confYAML) > 0 { - if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { - return nil, err - } + if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { + return nil, err } - // Checking if no proper mode is provided. + // Checking if wrong mode is provided. for _, config := range endpointConfig { if config.Mode != StrictEndpointMode && config.Mode != DefaultEndpointMode { - return nil, errors.Errorf("%s is not a proper mode", config.Mode) + return nil, errors.Errorf("%s is wrong mode", config.Mode) } } @@ -72,35 +93,5 @@ func LoadConfig(confYAML []byte, endpointAddrs []string, strictEndpointAddrs []s } } - // Adding --endpoint, --endpoint.sd-files info to []endpointConfig, if provided. - if len(endpointAddrs) > 0 || fileSDConfig != nil { - cfg1 := Config{} - cfg1.TLSConfig = TLSConfig - for _, addr := range endpointAddrs { - if _, exists := allEndpoints[addr]; exists { - return []Config{}, errors.Errorf("%s endpoint provided more than once", addr) - } - allEndpoints[addr] = struct{}{} - cfg1.Endpoints = append(cfg1.Endpoints, addr) - } - cfg1.EndpointsSD = []file.SDConfig{*fileSDConfig} - endpointConfig = append(endpointConfig, cfg1) - } - - // Adding --endpoint-strict endpoints if provided. - if len(strictEndpointAddrs) > 0 { - cfg2 := Config{} - cfg2.TLSConfig = TLSConfig - for _, addr := range strictEndpointAddrs { - if _, exists := allEndpoints[addr]; exists { - return []Config{}, errors.Errorf("%s endpoint provided more than once", addr) - } - allEndpoints[addr] = struct{}{} - cfg2.Endpoints = append(cfg2.Endpoints, addr) - } - cfg2.Mode = StrictEndpointMode - endpointConfig = append(endpointConfig, cfg2) - } - return endpointConfig, nil } From 6c8e643197355f95e176d1011d79a45d36302417 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Mon, 12 Jul 2021 15:42:03 +0530 Subject: [PATCH 07/29] small nits to green Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 10 ++++++---- docs/components/query.md | 12 ++++++++++++ pkg/store/config.go | 9 ++++++--- 3 files changed, 24 insertions(+), 7 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 8770abb5bc..218ca1cc11 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -132,7 +132,7 @@ func registerQuery(app *extkingpin.App) { fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback."). Default("5m")) - endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains store API servers configuration. Either use this option or seperate endpoint options (endpoint, endpoint.sd-files, endpoint.srict).", extflag.WithEnvSubstitution()) + endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains store API servers configuration. Either use this option or separate endpoint options (endpoint, endpoint.sd-files, endpoint.srict).", extflag.WithEnvSubstitution()) // TODO(bwplotka): Grab this from TTL at some point. dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions."). @@ -394,7 +394,7 @@ func runQuery( } else { endpointsConfig, err = store.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) if err != nil { - return errors.Wrap(err, "initialising endpoint config from individual flags") + return errors.Wrap(err, "initializing endpoint config from individual flags") } } @@ -572,7 +572,6 @@ func runQuery( } var ( - get []store.Client getRuleClient []rulespb.RulesClient getTargetClient []targetspb.TargetsClient getMetadataClient []metadatapb.MetadataClient @@ -580,7 +579,6 @@ func runQuery( ) for _, ss := range storeSets { - get = append(get, ss.Get()...) getRuleClient = append(getRuleClient, ss.GetRulesClients()...) getTargetClient = append(getTargetClient, ss.GetTargetsClients()...) getMetadataClient = append(getMetadataClient, ss.GetMetadataClients()...) @@ -588,6 +586,10 @@ func runQuery( } var ( allClients = func() []store.Client { + var get []store.Client + for _, ss := range storeSets { + get = append(get, ss.Get()...) + } return get } ruleClients = func() []rulespb.RulesClient { diff --git a/docs/components/query.md b/docs/components/query.md index 41e2cccf89..e30dd79bd1 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -255,6 +255,18 @@ Flags: --enable-feature= ... Comma separated experimental feature names to enable.The current list of features is promql-negative-offset and promql-at-modifier. + --endpoint.config= + Alternative to 'endpoint.config-file' flag + (mutually exclusive). Content of YAML file that + contains store API servers configuration. + Either use this option or seperate endpoint + options (endpoint, endpoint.sd-files, + endpoint.srict). + --endpoint.config-file= + Path to YAML file that contains store API + servers configuration. Either use this option + or seperate endpoint options (endpoint, + endpoint.sd-files, endpoint.srict). --grpc-address="0.0.0.0:10901" Listen ip:port address for gRPC endpoints (StoreAPI). Make sure this address is routable diff --git a/pkg/store/config.go b/pkg/store/config.go index 79b595da62..d3739d47e9 100644 --- a/pkg/store/config.go +++ b/pkg/store/config.go @@ -37,6 +37,7 @@ const ( StrictEndpointMode EndpointMode = "strict" ) +// NewConfig returns list of per-endpoint TLS config from individual flags. func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { var endpointConfig []Config @@ -44,8 +45,10 @@ func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfi if len(endpointAddrs) > 0 || fileSDConfig != nil { cfg1 := Config{} cfg1.TLSConfig = TLSConfig - cfg1.Endpoints = strictEndpointAddrs - cfg1.EndpointsSD = []file.SDConfig{*fileSDConfig} + cfg1.Endpoints = endpointAddrs + if fileSDConfig != nil { + cfg1.EndpointsSD = []file.SDConfig{*fileSDConfig} + } endpointConfig = append(endpointConfig, cfg1) } @@ -60,7 +63,7 @@ func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfi return endpointConfig, nil } -// LoadConfig loads and returns list of per-endpoint TLS config. +// LoadConfig returns list of per-endpoint TLS config. func LoadConfig(confYAML []byte) ([]Config, error) { var endpointConfig []Config From 88a8084e286be1809bbc46503d4cbf740eea321c Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Mon, 12 Jul 2021 19:34:49 +0530 Subject: [PATCH 08/29] making separate loops Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 29 ++++++++++++++++------------- docs/components/query.md | 4 ++-- 2 files changed, 18 insertions(+), 15 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 218ca1cc11..3647ab7169 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -571,19 +571,6 @@ func runQuery( }) } - var ( - getRuleClient []rulespb.RulesClient - getTargetClient []targetspb.TargetsClient - getMetadataClient []metadatapb.MetadataClient - getExemplarsStore []*exemplarspb.ExemplarStore - ) - - for _, ss := range storeSets { - getRuleClient = append(getRuleClient, ss.GetRulesClients()...) - getTargetClient = append(getTargetClient, ss.GetTargetsClients()...) - getMetadataClient = append(getMetadataClient, ss.GetMetadataClients()...) - getExemplarsStore = append(getExemplarsStore, ss.GetExemplarsStores()...) - } var ( allClients = func() []store.Client { var get []store.Client @@ -593,15 +580,31 @@ func runQuery( return get } ruleClients = func() []rulespb.RulesClient { + var getRuleClient []rulespb.RulesClient + for _, ss := range storeSets { + getRuleClient = append(getRuleClient, ss.GetRulesClients()...) + } return getRuleClient } targetClients = func() []targetspb.TargetsClient { + var getTargetClient []targetspb.TargetsClient + for _, ss := range storeSets { + getTargetClient = append(getTargetClient, ss.GetTargetsClients()...) + } return getTargetClient } metadataClients = func() []metadatapb.MetadataClient { + var getMetadataClient []metadatapb.MetadataClient + for _, ss := range storeSets { + getMetadataClient = append(getMetadataClient, ss.GetMetadataClients()...) + } return getMetadataClient } exemplarStore = func() []*exemplarspb.ExemplarStore { + var getExemplarsStore []*exemplarspb.ExemplarStore + for _, ss := range storeSets { + getExemplarsStore = append(getExemplarsStore, ss.GetExemplarsStores()...) + } return getExemplarsStore } proxy = store.NewProxyStore(logger, reg, allClients, component.Query, selectorLset, storeResponseTimeout) diff --git a/docs/components/query.md b/docs/components/query.md index e30dd79bd1..754198cd46 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -259,13 +259,13 @@ Flags: Alternative to 'endpoint.config-file' flag (mutually exclusive). Content of YAML file that contains store API servers configuration. - Either use this option or seperate endpoint + Either use this option or separate endpoint options (endpoint, endpoint.sd-files, endpoint.srict). --endpoint.config-file= Path to YAML file that contains store API servers configuration. Either use this option - or seperate endpoint options (endpoint, + or separate endpoint options (endpoint, endpoint.sd-files, endpoint.srict). --grpc-address="0.0.0.0:10901" Listen ip:port address for gRPC endpoints From 781fac57be19658f346e8cb69da95800e52753ec Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Tue, 13 Jul 2021 23:25:31 +0530 Subject: [PATCH 09/29] added e2e-test Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 6 +- test/e2e/e2ethanos/services.go | 50 +++++++++--- test/e2e/query_test.go | 137 +++++++++++++++++++++++++++++++++ 3 files changed, 178 insertions(+), 15 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 3647ab7169..cb1b5ca80f 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -384,15 +384,15 @@ func runQuery( TLSConfig.ServerName = serverName } - var endpointsConfig []store.Config + var endpointConfig []store.Config var err error if len(endpointConfigYAML) > 0 { - endpointsConfig, err = store.LoadConfig(endpointConfigYAML) + endpointConfig, err = store.LoadConfig(endpointConfigYAML) if err != nil { return errors.Wrap(err, "loading endpoint config") } } else { - endpointsConfig, err = store.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) + endpointConfig, err = store.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) if err != nil { return errors.Wrap(err, "initializing endpoint config from individual flags") } diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index b0d9b1b3ad..843aba33bf 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -17,8 +17,6 @@ import ( e2edb "github.com/efficientgo/e2e/db" "github.com/efficientgo/tools/core/pkg/backoff" "github.com/pkg/errors" - "github.com/prometheus/common/model" - "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/relabel" "github.com/thanos-io/thanos/pkg/httpconfig" "gopkg.in/yaml.v2" @@ -141,13 +139,15 @@ type QuerierBuilder struct { routePrefix string externalPrefix string image string + fileSDPath string - storeAddresses []string - fileSDStoreAddresses []string - ruleAddresses []string - metadataAddresses []string - targetAddresses []string - exemplarAddresses []string + storeAddresses []string + ruleAddresses []string + metadataAddresses []string + targetAddresses []string + exemplarAddresses []string + + endpointConfig []store.Config tracingConfig string } @@ -166,9 +166,8 @@ func (q *QuerierBuilder) WithImage(image string) *QuerierBuilder { q.image = image return q } - -func (q *QuerierBuilder) WithFileSDStoreAddresses(fileSDStoreAddresses ...string) *QuerierBuilder { - q.fileSDStoreAddresses = fileSDStoreAddresses +func (q *QuerierBuilder) WithFileSDStoreAddresses(fileSDPath string) *QuerierBuilder { + q.fileSDPath = fileSDPath return q } @@ -253,6 +252,12 @@ func (q *QuerierBuilder) Build() (*e2e.InstrumentedRunnable, error) { } func (q *QuerierBuilder) collectArgs() ([]string, error) { +func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []store.Config) *QuerierBuilder { + q.endpointConfig = endpointConfig + return q +} + +func (q *QuerierBuilder) Build() (*Service, error) { const replicaLabel = "replica" args := e2e.BuildArgs(map[string]string{ @@ -308,6 +313,8 @@ func (q *QuerierBuilder) collectArgs() ([]string, error) { } args = append(args, "--store.sd-files="+filepath.Join(container, "filesd.yaml")) + if q.fileSDPath != "" { + args = append(args, "--store.sd-files="+q.fileSDPath) } if q.routePrefix != "" { @@ -322,7 +329,26 @@ func (q *QuerierBuilder) collectArgs() ([]string, error) { args = append(args, "--tracing.config="+q.tracingConfig) } - return args, nil + if (len(q.storeAddresses) == 0 && q.fileSDPath == "") && len(q.endpointConfig) > 0 { + endpointCfgBytes, err := yaml.Marshal(q.endpointConfig) + if err != nil { + return nil, errors.Wrapf(err, "generate endpoint config file: %v", q.endpointConfig) + } + args = append(args, "--endpoint.config="+string(endpointCfgBytes)) + } + + querier := NewService( + fmt.Sprintf("querier-%v", q.name), + DefaultImage(), + e2e.NewCommand("query", args...), + e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + 8080, + 9091, + ) + querier.SetUser(strconv.Itoa(os.Getuid())) + querier.SetBackoff(defaultBackoffConfig) + + return querier, nil } func RemoteWriteEndpoint(addr string) string { return fmt.Sprintf("http://%s/api/v1/receive", addr) } diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 06c6208763..94fe4cb890 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -6,6 +6,7 @@ package e2e_test import ( "context" "fmt" + "io/ioutil" "net/http/httptest" "net/url" "os" @@ -21,16 +22,20 @@ import ( "github.com/go-kit/kit/log" "github.com/pkg/errors" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/discovery/file" + "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/timestamp" "github.com/thanos-io/thanos/pkg/metadata/metadatapb" "github.com/thanos-io/thanos/pkg/rules/rulespb" "github.com/thanos-io/thanos/pkg/store/labelpb" "github.com/thanos-io/thanos/pkg/targets/targetspb" + "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" "github.com/thanos-io/thanos/pkg/promclient" "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) @@ -94,6 +99,31 @@ func sortResults(res model.Vector) { }) } +func createSDFile(sharedDir string, name string, fileSDStoreAddresses []string) (string, error) { + if len(fileSDStoreAddresses) > 0 { + queryFileSDDir := filepath.Join(sharedDir, "data", "querier", name) + container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", name) + if err := os.MkdirAll(queryFileSDDir, 0750); err != nil { + return "", errors.Wrap(err, "create query dir failed") + } + + fileSD := []*targetgroup.Group{{}} + for _, a := range fileSDStoreAddresses { + fileSD[0].Targets = append(fileSD[0].Targets, model.LabelSet{model.AddressLabel: model.LabelValue(a)}) + } + + b, err := yaml.Marshal(fileSD) + if err != nil { + return "", err + } + + if err := ioutil.WriteFile(queryFileSDDir+"/filesd.yaml", b, 0600); err != nil { + return "", errors.Wrap(err, "creating query SD config failed") + } + return filepath.Join(container, "filesd.yaml"), nil + } + return "", nil +} func TestQuery(t *testing.T) { t.Parallel() @@ -116,9 +146,116 @@ func TestQuery(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) + fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) + testutil.Ok(t, err) + // Querier. Both fileSD and directly by flags. q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")). WithFileSDStoreAddresses(sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")).Build() + q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}). + WithFileSDStoreAddresses(fileSDPath).Build() + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(q)) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) + t.Cleanup(cancel) + + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(5), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + + queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + Deduplicate: false, + }, []model.Metric{ + { + "job": "myself", + "prometheus": "prom-alone", + "replica": "0", + }, + { + "job": "myself", + "prometheus": "prom-both-remote-write-and-sidecar", + "receive": "1", + "replica": "1234", + "tenant_id": "default-tenant", + }, + { + "job": "myself", + "prometheus": "prom-both-remote-write-and-sidecar", + "replica": "1234", + }, + { + "job": "myself", + "prometheus": "prom-ha", + "replica": "0", + }, + { + "job": "myself", + "prometheus": "prom-ha", + "replica": "1", + }, + }) + + // With deduplication. + queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + Deduplicate: true, + }, []model.Metric{ + { + "job": "myself", + "prometheus": "prom-alone", + }, + { + "job": "myself", + "prometheus": "prom-both-remote-write-and-sidecar", + "receive": "1", + "tenant_id": "default-tenant", + }, + { + "job": "myself", + "prometheus": "prom-both-remote-write-and-sidecar", + }, + { + "job": "myself", + "prometheus": "prom-ha", + }, + }) +} + +func TestQueryWithEndpointConfig(t *testing.T) { + t.Parallel() + + s, err := e2e.NewScenario("e2e_test_query_config") + testutil.Ok(t, err) + t.Cleanup(e2ethanos.CleanScenario(t, s)) + + receiver, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(receiver)) + + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + testutil.Ok(t, err) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) + testutil.Ok(t, err) + prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) + testutil.Ok(t, err) + prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) + + fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) + testutil.Ok(t, err) + + endpointConfig := []store.Config{ + { + Endpoints: []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, + EndpointsSD: []file.SDConfig{ + { + Files: []string{fileSDPath}, + RefreshInterval: model.Duration(time.Minute), + }, + }, + }, + } + + q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", nil).WithEndpointConfig(endpointConfig).Build() testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(q)) From bb5a0c65ad6c946aff17556a4fa49f8fd75d4b7c Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 21 Jul 2021 04:22:48 +0530 Subject: [PATCH 10/29] more store configs Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 21 +- cmd/thanos/receive.go | 1 + pkg/extgrpc/client.go | 6 +- pkg/query/storeset.go | 743 ++++++++++++++++++++++++ pkg/query/storeset_test.go | 1095 ++++++++++++++++++++++++++++++++++++ test/e2e/query_test.go | 5 +- 6 files changed, 1859 insertions(+), 12 deletions(-) create mode 100644 pkg/query/storeset.go create mode 100644 pkg/query/storeset_test.go diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index cb1b5ca80f..015ee8bc9f 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -398,13 +398,6 @@ func runQuery( } } - fileSDCache := cache.New() - dnsStoreProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_store_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - dnsRuleProvider := dns.NewProvider( logger, extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), @@ -430,12 +423,22 @@ func runQuery( ) var storeSets []*query.EndpointSet - for _, config := range endpointsConfig { - dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, secure, skipVerify, config.TLSConfig) + for instance, config := range endpointConfig { + dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, instance, secure, skipVerify, config.TLSConfig) if err != nil { return errors.Wrap(err, "building gRPC client") } + fileSDCache := cache.New() + dnsStoreProvider := dns.NewProvider( + logger, + extprom.WrapRegistererWith( + map[string]string{"config_instance": string(rune(instance))}, + extprom.WrapRegistererWithPrefix("thanos_querier_store_apis_", reg), + ), + dns.ResolverType(dnsSDResolver), + ) + var spec []query.EndpointSpec // Add strict & static nodes. if config.Mode == store.StrictEndpointMode { diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 37d565b74b..7035d7bea5 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -130,6 +130,7 @@ func runReceive( logger, reg, tracer, + 0, *conf.grpcCert != "", *conf.grpcClientCA == "", TLSConfig, diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index bb0acdd8e7..c0a3317d74 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -21,9 +21,11 @@ import ( ) // StoreClientGRPCOpts creates gRPC dial options for connecting to a store client. -func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, secure, skipVerify bool, tlsConfig store.TLSConfiguration) ([]grpc.DialOption, error) { - grpcMets := grpc_prometheus.NewClientMetrics() +func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, instance int, secure, skipVerify bool, tlsConfig store.TLSConfiguration) ([]grpc.DialOption, error) { + constLabels := map[string]string{"config_instance": string(rune(instance))} + grpcMets := grpc_prometheus.NewClientMetrics(grpc_prometheus.WithConstLabels(constLabels)) grpcMets.EnableClientHandlingTimeHistogram( + grpc_prometheus.WithHistogramConstLabels(constLabels), grpc_prometheus.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120, 240, 360, 720}), ) dialOpts := []grpc.DialOption{ diff --git a/pkg/query/storeset.go b/pkg/query/storeset.go new file mode 100644 index 0000000000..89ce65c8d5 --- /dev/null +++ b/pkg/query/storeset.go @@ -0,0 +1,743 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package query + +import ( + "context" + "encoding/json" + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" + "google.golang.org/grpc" + + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/metadata/metadatapb" + "github.com/thanos-io/thanos/pkg/rules/rulespb" + "github.com/thanos-io/thanos/pkg/runutil" + "github.com/thanos-io/thanos/pkg/store" + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/targets/targetspb" +) + +const ( + unhealthyStoreMessage = "removing store because it's unhealthy or does not exist" +) + +type StoreSpec interface { + // Addr returns StoreAPI Address for the store spec. It is used as ID for store. + Addr() string + // Metadata returns current labels, store type and min, max ranges for store. + // It can change for every call for this method. + // If metadata call fails we assume that store is no longer accessible and we should not use it. + // NOTE: It is implementation responsibility to retry until context timeout, but a caller responsibility to manage + // given store connection. + Metadata(ctx context.Context, client storepb.StoreClient) (labelSets []labels.Labels, mint int64, maxt int64, storeType component.StoreAPI, err error) + + // StrictStatic returns true if the StoreAPI has been statically defined and it is under a strict mode. + StrictStatic() bool +} + +type RuleSpec interface { + // Addr returns RulesAPI Address for the rules spec. It is used as its ID. + Addr() string +} + +type TargetSpec interface { + // Addr returns TargetsAPI Address for the targets spec. It is used as its ID. + Addr() string +} + +type MetadataSpec interface { + // Addr returns MetadataAPI Address for the metadata spec. It is used as its ID. + Addr() string +} + +type ExemplarSpec interface { + // Addr returns ExemplarsAPI Address for the exemplars spec. It is used as its ID. + Addr() string +} + +// stringError forces the error to be a string +// when marshaled into a JSON. +type stringError struct { + originalErr error +} + +// MarshalJSON marshals the error into a string form. +func (e *stringError) MarshalJSON() ([]byte, error) { + return json.Marshal(e.originalErr.Error()) +} + +// Error returns the original underlying error. +func (e *stringError) Error() string { + return e.originalErr.Error() +} + +type StoreStatus struct { + Name string `json:"name"` + LastCheck time.Time `json:"lastCheck"` + LastError *stringError `json:"lastError"` + LabelSets []labels.Labels `json:"labelSets"` + StoreType component.StoreAPI `json:"-"` + MinTime int64 `json:"minTime"` + MaxTime int64 `json:"maxTime"` +} + +type grpcStoreSpec struct { + addr string + strictstatic bool +} + +// NewGRPCStoreSpec creates store pure gRPC spec. +// It uses Info gRPC call to get Metadata. +func NewGRPCStoreSpec(addr string, strictstatic bool) StoreSpec { + return &grpcStoreSpec{addr: addr, strictstatic: strictstatic} +} + +// StrictStatic returns true if the StoreAPI has been statically defined and it is under a strict mode. +func (s *grpcStoreSpec) StrictStatic() bool { + return s.strictstatic +} + +func (s *grpcStoreSpec) Addr() string { + // API addr should not change between state changes. + return s.addr +} + +// Metadata method for gRPC store API tries to reach host Info method until context timeout. If we are unable to get metadata after +// that time, we assume that the host is unhealthy and return error. +func (s *grpcStoreSpec) Metadata(ctx context.Context, client storepb.StoreClient) (labelSets []labels.Labels, mint, maxt int64, Type component.StoreAPI, err error) { + resp, err := client.Info(ctx, &storepb.InfoRequest{}, grpc.WaitForReady(true)) + if err != nil { + return nil, 0, 0, nil, errors.Wrapf(err, "fetching store info from %s", s.addr) + } + if len(resp.LabelSets) == 0 && len(resp.Labels) > 0 { + resp.LabelSets = []labelpb.ZLabelSet{{Labels: resp.Labels}} + } + + labelSets = make([]labels.Labels, 0, len(resp.LabelSets)) + for _, ls := range resp.LabelSets { + labelSets = append(labelSets, ls.PromLabels()) + } + return labelSets, resp.MinTime, resp.MaxTime, component.FromProto(resp.StoreType), nil +} + +// storeSetNodeCollector is a metric collector reporting the number of available storeAPIs for Querier. +// A Collector is required as we want atomic updates for all 'thanos_store_nodes_grpc_connections' series. +type storeSetNodeCollector struct { + mtx sync.Mutex + storeNodes map[component.StoreAPI]map[string]int + storePerExtLset map[string]int + + connectionsDesc *prometheus.Desc +} + +func newStoreSetNodeCollector(configInstance string) *storeSetNodeCollector { + return &storeSetNodeCollector{ + storeNodes: map[component.StoreAPI]map[string]int{}, + connectionsDesc: prometheus.NewDesc( + "thanos_store_nodes_grpc_connections", + "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", + []string{"external_labels", "store_type"}, map[string]string{"config_instance": configInstance}, + ), + } +} + +func (c *storeSetNodeCollector) Update(nodes map[component.StoreAPI]map[string]int) { + storeNodes := make(map[component.StoreAPI]map[string]int, len(nodes)) + storePerExtLset := map[string]int{} + + for k, v := range nodes { + storeNodes[k] = make(map[string]int, len(v)) + for kk, vv := range v { + storePerExtLset[kk] += vv + storeNodes[k][kk] = vv + } + } + + c.mtx.Lock() + defer c.mtx.Unlock() + c.storeNodes = storeNodes + c.storePerExtLset = storePerExtLset +} + +func (c *storeSetNodeCollector) Describe(ch chan<- *prometheus.Desc) { + ch <- c.connectionsDesc +} + +func (c *storeSetNodeCollector) Collect(ch chan<- prometheus.Metric) { + c.mtx.Lock() + defer c.mtx.Unlock() + + for storeType, occurrencesPerExtLset := range c.storeNodes { + for externalLabels, occurrences := range occurrencesPerExtLset { + var storeTypeStr string + if storeType != nil { + storeTypeStr = storeType.String() + } + ch <- prometheus.MustNewConstMetric(c.connectionsDesc, prometheus.GaugeValue, float64(occurrences), externalLabels, storeTypeStr) + } + } +} + +// StoreSet maintains a set of active stores. It is backed up by Store Specifications that are dynamically fetched on +// every Update() call. +type StoreSet struct { + logger log.Logger + + // Store specifications can change dynamically. If some store is missing from the list, we assuming it is no longer + // accessible and we close gRPC client for it. + storeSpecs func() []StoreSpec + ruleSpecs func() []RuleSpec + targetSpecs func() []TargetSpec + metadataSpecs func() []MetadataSpec + exemplarSpecs func() []ExemplarSpec + dialOpts []grpc.DialOption + gRPCInfoCallTimeout time.Duration + + updateMtx sync.Mutex + storesMtx sync.RWMutex + storesStatusesMtx sync.RWMutex + + // Main map of stores currently used for fanout. + stores map[string]*storeRef + storesMetric *storeSetNodeCollector + + // Map of statuses used only by UI. + storeStatuses map[string]*StoreStatus + unhealthyStoreTimeout time.Duration +} + +// NewStoreSet returns a new set of store APIs and potentially Rules APIs from given specs. +func NewStoreSet( + logger log.Logger, + reg *prometheus.Registry, + instance int, + storeSpecs func() []StoreSpec, + ruleSpecs func() []RuleSpec, + targetSpecs func() []TargetSpec, + metadataSpecs func() []MetadataSpec, + exemplarSpecs func() []ExemplarSpec, + dialOpts []grpc.DialOption, + unhealthyStoreTimeout time.Duration, +) *StoreSet { + storesMetric := newStoreSetNodeCollector(string(rune(instance))) + if reg != nil { + reg.MustRegister(storesMetric) + } + + if logger == nil { + logger = log.NewNopLogger() + } + if storeSpecs == nil { + storeSpecs = func() []StoreSpec { return nil } + } + if ruleSpecs == nil { + ruleSpecs = func() []RuleSpec { return nil } + } + if targetSpecs == nil { + targetSpecs = func() []TargetSpec { return nil } + } + if metadataSpecs == nil { + metadataSpecs = func() []MetadataSpec { return nil } + } + if exemplarSpecs == nil { + exemplarSpecs = func() []ExemplarSpec { return nil } + } + + ss := &StoreSet{ + logger: log.With(logger, "component", "storeset"), + storeSpecs: storeSpecs, + ruleSpecs: ruleSpecs, + targetSpecs: targetSpecs, + metadataSpecs: metadataSpecs, + exemplarSpecs: exemplarSpecs, + dialOpts: dialOpts, + storesMetric: storesMetric, + gRPCInfoCallTimeout: 5 * time.Second, + stores: make(map[string]*storeRef), + storeStatuses: make(map[string]*StoreStatus), + unhealthyStoreTimeout: unhealthyStoreTimeout, + } + return ss +} + +// TODO(bwplotka): Consider moving storeRef out of this package and renaming it, as it also supports rules API. +type storeRef struct { + storepb.StoreClient + + mtx sync.RWMutex + cc *grpc.ClientConn + addr string + // If rule is not nil, then this store also supports rules API. + rule rulespb.RulesClient + metadata metadatapb.MetadataClient + + // If exemplar is not nil, then this store also support exemplars API. + exemplar exemplarspb.ExemplarsClient + + // If target is not nil, then this store also supports targets API. + target targetspb.TargetsClient + + // Meta (can change during runtime). + labelSets []labels.Labels + storeType component.StoreAPI + minTime int64 + maxTime int64 + + logger log.Logger +} + +func (s *storeRef) Update(labelSets []labels.Labels, minTime, maxTime int64, storeType component.StoreAPI, rule rulespb.RulesClient, target targetspb.TargetsClient, metadata metadatapb.MetadataClient, exemplar exemplarspb.ExemplarsClient) { + s.mtx.Lock() + defer s.mtx.Unlock() + + s.storeType = storeType + s.labelSets = labelSets + s.minTime = minTime + s.maxTime = maxTime + s.rule = rule + s.target = target + s.metadata = metadata + s.exemplar = exemplar +} + +func (s *storeRef) StoreType() component.StoreAPI { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.storeType +} + +func (s *storeRef) HasRulesAPI() bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.rule != nil +} + +func (s *storeRef) HasTargetsAPI() bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.target != nil +} + +func (s *storeRef) HasMetadataAPI() bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.metadata != nil +} + +func (s *storeRef) HasExemplarsAPI() bool { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.exemplar != nil +} + +func (s *storeRef) LabelSets() []labels.Labels { + s.mtx.RLock() + defer s.mtx.RUnlock() + + labelSet := make([]labels.Labels, 0, len(s.labelSets)) + for _, ls := range s.labelSets { + if len(ls) == 0 { + continue + } + // Compatibility label for Queriers pre 0.8.1. Filter it out now. + if ls[0].Name == store.CompatibilityTypeLabelName { + continue + } + labelSet = append(labelSet, ls.Copy()) + } + return labelSet +} + +func (s *storeRef) TimeRange() (mint, maxt int64) { + s.mtx.RLock() + defer s.mtx.RUnlock() + + return s.minTime, s.maxTime +} + +func (s *storeRef) String() string { + mint, maxt := s.TimeRange() + return fmt.Sprintf("Addr: %s LabelSets: %v Mint: %d Maxt: %d", s.addr, labelpb.PromLabelSetsToString(s.LabelSets()), mint, maxt) +} + +func (s *storeRef) Addr() string { + return s.addr +} + +func (s *storeRef) Close() { + runutil.CloseWithLogOnErr(s.logger, s.cc, fmt.Sprintf("store %v connection close", s.addr)) +} + +func newStoreAPIStats() map[component.StoreAPI]map[string]int { + nodes := make(map[component.StoreAPI]map[string]int, len(storepb.StoreType_name)) + for i := range storepb.StoreType_name { + nodes[component.FromProto(storepb.StoreType(i))] = map[string]int{} + } + return nodes +} + +// Update updates the store set. It fetches current list of store specs from function and updates the fresh metadata +// from all stores. Keeps around statically defined nodes that were defined with the strict mode. +func (s *StoreSet) Update(ctx context.Context) { + s.updateMtx.Lock() + defer s.updateMtx.Unlock() + + s.storesMtx.RLock() + stores := make(map[string]*storeRef, len(s.stores)) + for addr, st := range s.stores { + stores[addr] = st + } + s.storesMtx.RUnlock() + + level.Debug(s.logger).Log("msg", "starting updating storeAPIs", "cachedStores", len(stores)) + + activeStores := s.getActiveStores(ctx, stores) + level.Debug(s.logger).Log("msg", "checked requested storeAPIs", "activeStores", len(activeStores), "cachedStores", len(stores)) + + stats := newStoreAPIStats() + + // Close stores that where not active this time (are not in active stores map). + for addr, st := range stores { + if _, ok := activeStores[addr]; ok { + stats[st.StoreType()][labelpb.PromLabelSetsToString(st.LabelSets())]++ + continue + } + + st.Close() + delete(stores, addr) + s.updateStoreStatus(st, errors.New(unhealthyStoreMessage)) + level.Info(s.logger).Log("msg", unhealthyStoreMessage, "address", addr, "extLset", labelpb.PromLabelSetsToString(st.LabelSets())) + } + + // Add stores that are not yet in stores. + for addr, st := range activeStores { + if _, ok := stores[addr]; ok { + continue + } + + extLset := labelpb.PromLabelSetsToString(st.LabelSets()) + + // All producers should have unique external labels. While this does not check only StoreAPIs connected to + // this querier this allows to notify early user about misconfiguration. Warn only. This is also detectable from metric. + if st.StoreType() != nil && + (st.StoreType() == component.Sidecar || st.StoreType() == component.Rule) && + stats[component.Sidecar][extLset]+stats[component.Rule][extLset] > 0 { + + level.Warn(s.logger).Log("msg", "found duplicate storeAPI producer (sidecar or ruler). This is not advices as it will malform data in in the same bucket", + "address", addr, "extLset", extLset, "duplicates", fmt.Sprintf("%v", stats[component.Sidecar][extLset]+stats[component.Rule][extLset]+1)) + } + stats[st.StoreType()][extLset]++ + + stores[addr] = st + s.updateStoreStatus(st, nil) + + if st.HasRulesAPI() { + level.Info(s.logger).Log("msg", "adding new rulesAPI to query storeset", "address", addr) + } + + if st.HasExemplarsAPI() { + level.Info(s.logger).Log("msg", "adding new exemplarsAPI to query storeset", "address", addr) + } + + if st.HasTargetsAPI() { + level.Info(s.logger).Log("msg", "adding new targetsAPI to query storeset", "address", addr) + } + + level.Info(s.logger).Log("msg", "adding new storeAPI to query storeset", "address", addr, "extLset", extLset) + } + + s.storesMetric.Update(stats) + s.storesMtx.Lock() + s.stores = stores + s.storesMtx.Unlock() + + s.cleanUpStoreStatuses(stores) +} + +func (s *StoreSet) getActiveStores(ctx context.Context, stores map[string]*storeRef) map[string]*storeRef { + var ( + // UNIQUE? + activeStores = make(map[string]*storeRef, len(stores)) + mtx sync.Mutex + wg sync.WaitGroup + + storeAddrSet = make(map[string]struct{}) + ruleAddrSet = make(map[string]struct{}) + targetAddrSet = make(map[string]struct{}) + metadataAddrSet = make(map[string]struct{}) + exemplarAddrSet = make(map[string]struct{}) + ) + + // Gather active stores map concurrently. Build new store if does not exist already. + for _, ruleSpec := range s.ruleSpecs() { + ruleAddrSet[ruleSpec.Addr()] = struct{}{} + } + + // Gather active targets map concurrently. Add a new target if it does not exist already. + for _, targetSpec := range s.targetSpecs() { + targetAddrSet[targetSpec.Addr()] = struct{}{} + } + + // Gather active stores map concurrently. Build new store if does not exist already. + for _, metadataSpec := range s.metadataSpecs() { + metadataAddrSet[metadataSpec.Addr()] = struct{}{} + } + + // Gather active stores map concurrently. Build new store if does not exist already. + for _, exemplarSpec := range s.exemplarSpecs() { + exemplarAddrSet[exemplarSpec.Addr()] = struct{}{} + } + + // Gather healthy stores map concurrently. Build new store if does not exist already. + for _, storeSpec := range s.storeSpecs() { + if _, ok := storeAddrSet[storeSpec.Addr()]; ok { + level.Warn(s.logger).Log("msg", "duplicated address in store nodes", "address", storeSpec.Addr()) + continue + } + storeAddrSet[storeSpec.Addr()] = struct{}{} + + wg.Add(1) + go func(spec StoreSpec) { + defer wg.Done() + + addr := spec.Addr() + + ctx, cancel := context.WithTimeout(ctx, s.gRPCInfoCallTimeout) + defer cancel() + + st, seenAlready := stores[addr] + if !seenAlready { + // New store or was unactive and was removed in the past - create new one. + conn, err := grpc.DialContext(ctx, addr, s.dialOpts...) + if err != nil { + s.updateStoreStatus(&storeRef{addr: addr}, err) + level.Warn(s.logger).Log("msg", "update of store node failed", "err", errors.Wrap(err, "dialing connection"), "address", addr) + return + } + + st = &storeRef{StoreClient: storepb.NewStoreClient(conn), storeType: component.UnknownStoreAPI, cc: conn, addr: addr, logger: s.logger} + if spec.StrictStatic() { + st.maxTime = math.MaxInt64 + } + } + + var rule rulespb.RulesClient + if _, ok := ruleAddrSet[addr]; ok { + rule = rulespb.NewRulesClient(st.cc) + } + + var target targetspb.TargetsClient + if _, ok := targetAddrSet[addr]; ok { + target = targetspb.NewTargetsClient(st.cc) + } + + var metadata metadatapb.MetadataClient + if _, ok := metadataAddrSet[addr]; ok { + metadata = metadatapb.NewMetadataClient(st.cc) + } + + var exemplar exemplarspb.ExemplarsClient + if _, ok := exemplarAddrSet[addr]; ok { + exemplar = exemplarspb.NewExemplarsClient(st.cc) + } + + // Check existing or new store. Is it healthy? What are current metadata? + labelSets, minTime, maxTime, storeType, err := spec.Metadata(ctx, st.StoreClient) + if err != nil { + if !seenAlready && !spec.StrictStatic() { + // Close only if new and not a strict static node. + // Unactive `s.stores` will be closed later on. + st.Close() + } + s.updateStoreStatus(st, err) + level.Warn(s.logger).Log("msg", "update of store node failed", "err", errors.Wrap(err, "getting metadata"), "address", addr) + + if !spec.StrictStatic() { + return + } + + // Still keep it around if static & strict mode enabled. + mtx.Lock() + defer mtx.Unlock() + + activeStores[addr] = st + return + } + + s.updateStoreStatus(st, nil) + st.Update(labelSets, minTime, maxTime, storeType, rule, target, metadata, exemplar) + + mtx.Lock() + defer mtx.Unlock() + + activeStores[addr] = st + }(storeSpec) + } + wg.Wait() + + for ruleAddr := range ruleAddrSet { + if _, ok := storeAddrSet[ruleAddr]; !ok { + level.Warn(s.logger).Log("msg", "ignored rule store", "address", ruleAddr) + } + } + return activeStores +} + +func (s *StoreSet) updateStoreStatus(store *storeRef, err error) { + s.storesStatusesMtx.Lock() + defer s.storesStatusesMtx.Unlock() + + status := StoreStatus{Name: store.addr} + prev, ok := s.storeStatuses[store.addr] + if ok { + status = *prev + } else { + mint, maxt := store.TimeRange() + status.MinTime = mint + status.MaxTime = maxt + } + + if err == nil { + status.LastCheck = time.Now() + mint, maxt := store.TimeRange() + status.LabelSets = store.LabelSets() + status.StoreType = store.StoreType() + status.MinTime = mint + status.MaxTime = maxt + status.LastError = nil + } else { + status.LastError = &stringError{originalErr: err} + } + + s.storeStatuses[store.addr] = &status +} + +func (s *StoreSet) GetStoreStatus() []StoreStatus { + s.storesStatusesMtx.RLock() + defer s.storesStatusesMtx.RUnlock() + + statuses := make([]StoreStatus, 0, len(s.storeStatuses)) + for _, v := range s.storeStatuses { + statuses = append(statuses, *v) + } + + sort.Slice(statuses, func(i, j int) bool { + return statuses[i].Name < statuses[j].Name + }) + return statuses +} + +// Get returns a list of all active stores. +func (s *StoreSet) Get() []store.Client { + s.storesMtx.RLock() + defer s.storesMtx.RUnlock() + + stores := make([]store.Client, 0, len(s.stores)) + for _, st := range s.stores { + stores = append(stores, st) + } + return stores +} + +// GetRulesClients returns a list of all active rules clients. +func (s *StoreSet) GetRulesClients() []rulespb.RulesClient { + s.storesMtx.RLock() + defer s.storesMtx.RUnlock() + + rules := make([]rulespb.RulesClient, 0, len(s.stores)) + for _, st := range s.stores { + if st.HasRulesAPI() { + rules = append(rules, st.rule) + } + } + return rules +} + +// GetTargetsClients returns a list of all active targets clients. +func (s *StoreSet) GetTargetsClients() []targetspb.TargetsClient { + s.storesMtx.RLock() + defer s.storesMtx.RUnlock() + + targets := make([]targetspb.TargetsClient, 0, len(s.stores)) + for _, st := range s.stores { + if st.HasTargetsAPI() { + targets = append(targets, st.target) + } + } + return targets +} + +// GetMetadataClients returns a list of all active metadata clients. +func (s *StoreSet) GetMetadataClients() []metadatapb.MetadataClient { + s.storesMtx.RLock() + defer s.storesMtx.RUnlock() + + metadataClients := make([]metadatapb.MetadataClient, 0, len(s.stores)) + for _, st := range s.stores { + if st.HasMetadataAPI() { + metadataClients = append(metadataClients, st.metadata) + } + } + return metadataClients +} + +// GetExemplarsStores returns a list of all active exemplars stores. +func (s *StoreSet) GetExemplarsStores() []*exemplarspb.ExemplarStore { + s.storesMtx.RLock() + defer s.storesMtx.RUnlock() + + exemplarStores := make([]*exemplarspb.ExemplarStore, 0, len(s.stores)) + for _, st := range s.stores { + if st.HasExemplarsAPI() { + exemplarStores = append(exemplarStores, &exemplarspb.ExemplarStore{ + ExemplarsClient: st.exemplar, + LabelSets: st.labelSets, + }) + } + } + return exemplarStores +} + +func (s *StoreSet) Close() { + s.storesMtx.Lock() + defer s.storesMtx.Unlock() + + for _, st := range s.stores { + st.Close() + } + s.stores = map[string]*storeRef{} +} + +func (s *StoreSet) cleanUpStoreStatuses(stores map[string]*storeRef) { + s.storesStatusesMtx.Lock() + defer s.storesStatusesMtx.Unlock() + + now := time.Now() + for addr, status := range s.storeStatuses { + if _, ok := stores[addr]; ok { + continue + } + + if now.Sub(status.LastCheck) >= s.unhealthyStoreTimeout { + delete(s.storeStatuses, addr) + } + } +} diff --git a/pkg/query/storeset_test.go b/pkg/query/storeset_test.go new file mode 100644 index 0000000000..60f8bc0f2c --- /dev/null +++ b/pkg/query/storeset_test.go @@ -0,0 +1,1095 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package query + +import ( + "context" + "encoding/json" + "fmt" + "math" + "net" + "testing" + "time" + + "github.com/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/thanos-io/thanos/pkg/component" + "github.com/thanos-io/thanos/pkg/store" + "github.com/thanos-io/thanos/pkg/store/labelpb" + "github.com/thanos-io/thanos/pkg/store/storepb" + "github.com/thanos-io/thanos/pkg/testutil" +) + +var testGRPCOpts = []grpc.DialOption{ + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithInsecure(), +} + +type mockedStore struct { + infoDelay time.Duration + info storepb.InfoResponse +} + +func (s *mockedStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { + if s.infoDelay > 0 { + time.Sleep(s.infoDelay) + } + return &s.info, nil +} + +func (s *mockedStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { + return status.Error(codes.Unimplemented, "not implemented") +} + +func (s *mockedStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( + *storepb.LabelNamesResponse, error, +) { + return nil, status.Error(codes.Unimplemented, "not implemented") +} + +func (s *mockedStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( + *storepb.LabelValuesResponse, error, +) { + return nil, status.Error(codes.Unimplemented, "not implemented") +} + +type testStoreMeta struct { + extlsetFn func(addr string) []labelpb.ZLabelSet + storeType component.StoreAPI + minTime, maxTime int64 + infoDelay time.Duration +} + +type testStores struct { + srvs map[string]*grpc.Server + orderAddrs []string +} + +func startTestStores(storeMetas []testStoreMeta) (*testStores, error) { + st := &testStores{ + srvs: map[string]*grpc.Server{}, + } + + for _, meta := range storeMetas { + listener, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + // Close so far started servers. + st.Close() + return nil, err + } + + srv := grpc.NewServer() + + storeSrv := &mockedStore{ + info: storepb.InfoResponse{ + LabelSets: meta.extlsetFn(listener.Addr().String()), + MaxTime: meta.maxTime, + MinTime: meta.minTime, + }, + infoDelay: meta.infoDelay, + } + if meta.storeType != nil { + storeSrv.info.StoreType = meta.storeType.ToProto() + } + storepb.RegisterStoreServer(srv, storeSrv) + go func() { + _ = srv.Serve(listener) + }() + + st.srvs[listener.Addr().String()] = srv + st.orderAddrs = append(st.orderAddrs, listener.Addr().String()) + } + + return st, nil +} + +func (s *testStores) StoreAddresses() []string { + var stores []string + stores = append(stores, s.orderAddrs...) + return stores +} + +func (s *testStores) Close() { + for _, srv := range s.srvs { + srv.Stop() + } + s.srvs = nil +} + +func (s *testStores) CloseOne(addr string) { + srv, ok := s.srvs[addr] + if !ok { + return + } + + srv.Stop() + delete(s.srvs, addr) +} + +func TestStoreSet_Update(t *testing.T) { + stores, err := startTestStores([]testStoreMeta{ + { + storeType: component.Sidecar, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "addr", Value: addr}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "a", Value: "b"}, + }, + }, + } + }, + }, + { + storeType: component.Sidecar, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "addr", Value: addr}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "a", Value: "b"}, + }, + }, + } + }, + }, + { + storeType: component.Query, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "a", Value: "broken"}, + }, + }, + } + }, + }, + }) + testutil.Ok(t, err) + defer stores.Close() + + discoveredStoreAddr := stores.StoreAddresses() + + // Testing if duplicates can cause weird results. + discoveredStoreAddr = append(discoveredStoreAddr, discoveredStoreAddr[0]) + storeSet := NewStoreSet(nil, nil, 0, + func() (specs []StoreSpec) { + for _, addr := range discoveredStoreAddr { + specs = append(specs, NewGRPCStoreSpec(addr, false)) + } + return specs + }, + func() (specs []RuleSpec) { + return nil + }, + func() (specs []TargetSpec) { + return nil + }, + func() (specs []MetadataSpec) { + return nil + }, + func() (specs []ExemplarSpec) { + return nil + }, + testGRPCOpts, time.Minute) + storeSet.gRPCInfoCallTimeout = 2 * time.Second + defer storeSet.Close() + + // Initial update. + storeSet.Update(context.Background()) + + // Start with one not available. + stores.CloseOne(discoveredStoreAddr[2]) + + // Should not matter how many of these we run. + storeSet.Update(context.Background()) + storeSet.Update(context.Background()) + testutil.Equals(t, 2, len(storeSet.stores)) + testutil.Equals(t, 3, len(storeSet.storeStatuses)) + + for addr, st := range storeSet.stores { + testutil.Equals(t, addr, st.addr) + + lset := st.LabelSets() + testutil.Equals(t, 2, len(lset)) + testutil.Equals(t, "addr", lset[0][0].Name) + testutil.Equals(t, addr, lset[0][0].Value) + testutil.Equals(t, "a", lset[1][0].Name) + testutil.Equals(t, "b", lset[1][0].Value) + } + + // Check stats. + expected := newStoreAPIStats() + expected[component.Sidecar] = map[string]int{ + fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[0]): 1, + fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[1]): 1, + } + testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) + + // Remove address from discovered and reset last check, which should ensure cleanup of status on next update. + storeSet.storeStatuses[discoveredStoreAddr[2]].LastCheck = time.Now().Add(-4 * time.Minute) + discoveredStoreAddr = discoveredStoreAddr[:len(discoveredStoreAddr)-2] + storeSet.Update(context.Background()) + testutil.Equals(t, 2, len(storeSet.storeStatuses)) + + stores.CloseOne(discoveredStoreAddr[0]) + delete(expected[component.Sidecar], fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[0])) + + // We expect Update to tear down store client for closed store server. + storeSet.Update(context.Background()) + testutil.Equals(t, 1, len(storeSet.stores), "only one service should respond just fine, so we expect one client to be ready.") + testutil.Equals(t, 2, len(storeSet.storeStatuses)) + + addr := discoveredStoreAddr[1] + st, ok := storeSet.stores[addr] + testutil.Assert(t, ok, "addr exist") + testutil.Equals(t, addr, st.addr) + + lset := st.LabelSets() + testutil.Equals(t, 2, len(lset)) + testutil.Equals(t, "addr", lset[0][0].Name) + testutil.Equals(t, addr, lset[0][0].Value) + testutil.Equals(t, "a", lset[1][0].Name) + testutil.Equals(t, "b", lset[1][0].Value) + testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) + + // New big batch of storeAPIs. + stores2, err := startTestStores([]testStoreMeta{ + { + storeType: component.Query, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "l3", Value: "v4"}, + }, + }, + } + }, + }, + { + // Duplicated Querier, in previous versions it would be deduplicated. Now it should be not. + storeType: component.Query, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "l3", Value: "v4"}, + }, + }, + } + }, + }, + { + storeType: component.Sidecar, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + } + }, + }, + { + // Duplicated Sidecar, in previous versions it would be deduplicated. Now it should be not. + storeType: component.Sidecar, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + } + }, + }, + { + // Querier that duplicates with sidecar, in previous versions it would be deduplicated. Now it should be not. + storeType: component.Query, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + } + }, + }, + { + // Ruler that duplicates with sidecar, in previous versions it would be deduplicated. Now it should be not. + // Warning should be produced. + storeType: component.Rule, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + } + }, + }, + { + // Duplicated Rule, in previous versions it would be deduplicated. Now it should be not. Warning should be produced. + storeType: component.Rule, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + } + }, + }, + { + // No storeType. + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "no-store-type"}, + {Name: "l2", Value: "v3"}, + }, + }, + } + }, + }, + // Two pre v0.8.0 store gateway nodes, they don't have ext labels set. + { + storeType: component.Store, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{} + }, + }, + { + storeType: component.Store, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{} + }, + }, + // Regression tests against https://github.com/thanos-io/thanos/issues/1632: From v0.8.0 stores advertise labels. + // If the object storage handled by store gateway has only one sidecar we used to hitting issue. + { + storeType: component.Store, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "l3", Value: "v4"}, + }, + }, + } + }, + }, + // Stores v0.8.1 has compatibility labels. Check if they are correctly removed. + { + storeType: component.Store, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "l3", Value: "v4"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: store.CompatibilityTypeLabelName, Value: "store"}, + }, + }, + } + }, + }, + // Duplicated store, in previous versions it would be deduplicated. Now it should be not. + { + storeType: component.Store, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + {Name: "l1", Value: "v2"}, + {Name: "l2", Value: "v3"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: "l3", Value: "v4"}, + }, + }, + { + Labels: []labelpb.ZLabel{ + {Name: store.CompatibilityTypeLabelName, Value: "store"}, + }, + }, + } + }, + }, + }) + testutil.Ok(t, err) + defer stores2.Close() + + discoveredStoreAddr = append(discoveredStoreAddr, stores2.StoreAddresses()...) + + // New stores should be loaded. + storeSet.Update(context.Background()) + testutil.Equals(t, 1+len(stores2.srvs), len(storeSet.stores)) + + // Check stats. + expected = newStoreAPIStats() + expected[component.UnknownStoreAPI] = map[string]int{ + "{l1=\"no-store-type\", l2=\"v3\"}": 1, + } + expected[component.Query] = map[string]int{ + "{l1=\"v2\", l2=\"v3\"}": 1, + "{l1=\"v2\", l2=\"v3\"},{l3=\"v4\"}": 2, + } + expected[component.Rule] = map[string]int{ + "{l1=\"v2\", l2=\"v3\"}": 2, + } + expected[component.Sidecar] = map[string]int{ + fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[1]): 1, + "{l1=\"v2\", l2=\"v3\"}": 2, + } + expected[component.Store] = map[string]int{ + "": 2, + "{l1=\"v2\", l2=\"v3\"},{l3=\"v4\"}": 3, + } + testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) + + // Check statuses. + testutil.Equals(t, 2+len(stores2.srvs), len(storeSet.storeStatuses)) +} + +func TestStoreSet_Update_NoneAvailable(t *testing.T) { + st, err := startTestStores([]testStoreMeta{ + { + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + { + Name: "addr", + Value: addr, + }, + }, + }, + } + }, + storeType: component.Sidecar, + }, + { + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + { + Name: "addr", + Value: addr, + }, + }, + }, + } + }, + storeType: component.Sidecar, + }, + }) + testutil.Ok(t, err) + defer st.Close() + + initialStoreAddr := st.StoreAddresses() + st.CloseOne(initialStoreAddr[0]) + st.CloseOne(initialStoreAddr[1]) + + storeSet := NewStoreSet(nil, nil, 0, + func() (specs []StoreSpec) { + for _, addr := range initialStoreAddr { + specs = append(specs, NewGRPCStoreSpec(addr, false)) + } + return specs + }, + func() (specs []RuleSpec) { return nil }, + func() (specs []TargetSpec) { return nil }, + func() (specs []MetadataSpec) { return nil }, + func() (specs []ExemplarSpec) { return nil }, + testGRPCOpts, time.Minute) + storeSet.gRPCInfoCallTimeout = 2 * time.Second + + // Should not matter how many of these we run. + storeSet.Update(context.Background()) + storeSet.Update(context.Background()) + testutil.Equals(t, 0, len(storeSet.stores), "none of services should respond just fine, so we expect no client to be ready.") + + // Leak test will ensure that we don't keep client connection around. + + expected := newStoreAPIStats() + testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) +} + +// TestQuerierStrict tests what happens when the strict mode is enabled/disabled. +func TestQuerierStrict(t *testing.T) { + st, err := startTestStores([]testStoreMeta{ + { + minTime: 12345, + maxTime: 54321, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + { + Name: "addr", + Value: addr, + }, + }, + }, + } + }, + storeType: component.Sidecar, + }, + { + minTime: 66666, + maxTime: 77777, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + { + Name: "addr", + Value: addr, + }, + }, + }, + } + }, + storeType: component.Sidecar, + }, + // Slow store. + { + minTime: 65644, + maxTime: 77777, + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{ + { + Labels: []labelpb.ZLabel{ + { + Name: "addr", + Value: addr, + }, + }, + }, + } + }, + storeType: component.Sidecar, + infoDelay: 2 * time.Second, + }, + }) + + testutil.Ok(t, err) + defer st.Close() + + staticStoreAddr := st.StoreAddresses()[0] + slowStaticStoreAddr := st.StoreAddresses()[2] + storeSet := NewStoreSet(nil, nil, 0, func() (specs []StoreSpec) { + return []StoreSpec{ + NewGRPCStoreSpec(st.StoreAddresses()[0], true), + NewGRPCStoreSpec(st.StoreAddresses()[1], false), + NewGRPCStoreSpec(st.StoreAddresses()[2], true), + } + }, func() []RuleSpec { + return nil + }, func() []TargetSpec { + return nil + }, func() (specs []MetadataSpec) { + return nil + }, func() []ExemplarSpec { + return nil + }, testGRPCOpts, time.Minute) + defer storeSet.Close() + storeSet.gRPCInfoCallTimeout = 1 * time.Second + + // Initial update. + storeSet.Update(context.Background()) + testutil.Equals(t, 3, len(storeSet.stores), "three clients must be available for running store nodes") + + // The store has not responded to the info call and is assumed to cover everything. + curMin, curMax := storeSet.stores[slowStaticStoreAddr].minTime, storeSet.stores[slowStaticStoreAddr].maxTime + testutil.Assert(t, storeSet.stores[slowStaticStoreAddr].cc.GetState().String() != "SHUTDOWN", "slow store's connection should not be closed") + testutil.Equals(t, int64(0), curMin) + testutil.Equals(t, int64(math.MaxInt64), curMax) + + // The store is statically defined + strict mode is enabled + // so its client + information must be retained. + curMin, curMax = storeSet.stores[staticStoreAddr].minTime, storeSet.stores[staticStoreAddr].maxTime + testutil.Equals(t, int64(12345), curMin, "got incorrect minimum time") + testutil.Equals(t, int64(54321), curMax, "got incorrect minimum time") + + // Successfully retrieve the information and observe minTime/maxTime updating. + storeSet.gRPCInfoCallTimeout = 3 * time.Second + storeSet.Update(context.Background()) + updatedCurMin, updatedCurMax := storeSet.stores[slowStaticStoreAddr].minTime, storeSet.stores[slowStaticStoreAddr].maxTime + testutil.Equals(t, int64(65644), updatedCurMin) + testutil.Equals(t, int64(77777), updatedCurMax) + storeSet.gRPCInfoCallTimeout = 1 * time.Second + + // Turn off the stores. + st.Close() + + // Update again many times. Should not matter WRT the static one. + storeSet.Update(context.Background()) + storeSet.Update(context.Background()) + storeSet.Update(context.Background()) + + // Check that the information is the same. + testutil.Equals(t, 2, len(storeSet.stores), "two static clients must remain available") + testutil.Equals(t, curMin, storeSet.stores[staticStoreAddr].minTime, "minimum time reported by the store node is different") + testutil.Equals(t, curMax, storeSet.stores[staticStoreAddr].maxTime, "minimum time reported by the store node is different") + testutil.NotOk(t, storeSet.storeStatuses[staticStoreAddr].LastError.originalErr) + + testutil.Equals(t, updatedCurMin, storeSet.stores[slowStaticStoreAddr].minTime, "minimum time reported by the store node is different") + testutil.Equals(t, updatedCurMax, storeSet.stores[slowStaticStoreAddr].maxTime, "minimum time reported by the store node is different") +} + +func TestStoreSet_Update_Rules(t *testing.T) { + stores, err := startTestStores([]testStoreMeta{ + { + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{} + }, + storeType: component.Sidecar, + }, + { + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{} + }, + storeType: component.Rule, + }, + }) + testutil.Ok(t, err) + defer stores.Close() + + for _, tc := range []struct { + name string + storeSpecs func() []StoreSpec + ruleSpecs func() []RuleSpec + exemplarSpecs func() []ExemplarSpec + expectedStores int + expectedRules int + }{ + { + name: "stores, no rules", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + NewGRPCStoreSpec(stores.orderAddrs[1], false), + } + }, + expectedStores: 2, + expectedRules: 0, + }, + { + name: "rules, no stores", + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedStores: 0, + expectedRules: 0, + }, + { + name: "one store, different rule", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[1], false), + } + }, + expectedStores: 1, + expectedRules: 0, + }, + { + name: "two stores, one rule", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + NewGRPCStoreSpec(stores.orderAddrs[1], false), + } + }, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedStores: 2, + expectedRules: 1, + }, + { + name: "two stores, two rules", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + NewGRPCStoreSpec(stores.orderAddrs[1], false), + } + }, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + NewGRPCStoreSpec(stores.orderAddrs[1], false), + } + }, + exemplarSpecs: func() []ExemplarSpec { + return []ExemplarSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + NewGRPCStoreSpec(stores.orderAddrs[1], false), + } + }, + expectedStores: 2, + expectedRules: 2, + }, + } { + storeSet := NewStoreSet(nil, nil, 0, + tc.storeSpecs, + tc.ruleSpecs, + func() []TargetSpec { return nil }, + func() []MetadataSpec { return nil }, + tc.exemplarSpecs, + testGRPCOpts, time.Minute) + + t.Run(tc.name, func(t *testing.T) { + defer storeSet.Close() + storeSet.Update(context.Background()) + testutil.Equals(t, tc.expectedStores, len(storeSet.stores)) + + gotRules := 0 + for _, ref := range storeSet.stores { + if ref.HasRulesAPI() { + gotRules += 1 + } + } + + testutil.Equals(t, tc.expectedRules, gotRules) + }) + } +} + +func TestStoreSet_Rules_Discovery(t *testing.T) { + stores, err := startTestStores([]testStoreMeta{ + { + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{} + }, + storeType: component.Sidecar, + }, + { + extlsetFn: func(addr string) []labelpb.ZLabelSet { + return []labelpb.ZLabelSet{} + }, + storeType: component.Rule, + }, + }) + testutil.Ok(t, err) + defer stores.Close() + + type discoveryState struct { + name string + storeSpecs func() []StoreSpec + ruleSpecs func() []RuleSpec + expectedStores int + expectedRules int + } + + for _, tc := range []struct { + states []discoveryState + name string + }{ + { + name: "StoreAPI and RulesAPI concurrent discovery", + states: []discoveryState{ + { + name: "no stores", + storeSpecs: nil, + ruleSpecs: nil, + expectedRules: 0, + expectedStores: 0, + }, + { + name: "RulesAPI discovered", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedRules: 1, + expectedStores: 1, + }, + }, + }, + + { + name: "StoreAPI discovery first, eventually discovered RulesAPI", + states: []discoveryState{ + { + name: "no stores", + storeSpecs: nil, + ruleSpecs: nil, + expectedRules: 0, + expectedStores: 0, + }, + { + name: "StoreAPI discovered, no RulesAPI discovered", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedStores: 1, + expectedRules: 0, + }, + { + name: "RulesAPI discovered", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedStores: 1, + expectedRules: 1, + }, + }, + }, + + { + name: "RulesAPI discovery first, eventually discovered StoreAPI", + states: []discoveryState{ + { + name: "no stores", + storeSpecs: nil, + ruleSpecs: nil, + expectedRules: 0, + expectedStores: 0, + }, + { + name: "RulesAPI discovered, no StoreAPI discovered", + storeSpecs: nil, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedStores: 0, + expectedRules: 0, + }, + { + name: "StoreAPI discovered", + storeSpecs: func() []StoreSpec { + return []StoreSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + ruleSpecs: func() []RuleSpec { + return []RuleSpec{ + NewGRPCStoreSpec(stores.orderAddrs[0], false), + } + }, + expectedStores: 1, + expectedRules: 1, + }, + }, + }, + } { + t.Run(tc.name, func(t *testing.T) { + currentState := 0 + + storeSet := NewStoreSet(nil, nil, 0, + func() []StoreSpec { + if tc.states[currentState].storeSpecs == nil { + return nil + } + + return tc.states[currentState].storeSpecs() + }, + func() []RuleSpec { + if tc.states[currentState].ruleSpecs == nil { + return nil + } + + return tc.states[currentState].ruleSpecs() + }, + func() []TargetSpec { return nil }, + func() []MetadataSpec { + return nil + }, + func() []ExemplarSpec { return nil }, + testGRPCOpts, time.Minute) + + defer storeSet.Close() + + for { + storeSet.Update(context.Background()) + testutil.Equals( + t, + tc.states[currentState].expectedStores, + len(storeSet.stores), + "unexepected discovered stores in state %q", + tc.states[currentState].name, + ) + + gotRules := 0 + for _, ref := range storeSet.stores { + if ref.HasRulesAPI() { + gotRules += 1 + } + } + testutil.Equals( + t, + tc.states[currentState].expectedRules, + gotRules, + "unexpected discovered rules in state %q", + tc.states[currentState].name, + ) + + currentState = currentState + 1 + if len(tc.states) == currentState { + break + } + } + }) + } +} + +type errThatMarshalsToEmptyDict struct { + msg string +} + +// MarshalJSON marshals the error and returns and empty dict, not the error string. +func (e *errThatMarshalsToEmptyDict) MarshalJSON() ([]byte, error) { + return json.Marshal(map[string]string{}) +} + +// Error returns the original, underlying string. +func (e *errThatMarshalsToEmptyDict) Error() string { + return e.msg +} + +// Test highlights that without wrapping the error, it is marshaled to empty dict {}, not its message. +func TestStringError(t *testing.T) { + dictErr := &errThatMarshalsToEmptyDict{msg: "Error message"} + stringErr := &stringError{originalErr: dictErr} + + storestatusMock := map[string]error{} + storestatusMock["dictErr"] = dictErr + storestatusMock["stringErr"] = stringErr + + b, err := json.Marshal(storestatusMock) + + testutil.Ok(t, err) + testutil.Equals(t, []byte(`{"dictErr":{},"stringErr":"Error message"}`), b, "expected to get proper results") +} + +// Errors that usually marshal to empty dict should return the original error string. +func TestUpdateStoreStateLastError(t *testing.T) { + tcs := []struct { + InputError error + ExpectedLastErr string + }{ + {errors.New("normal_err"), `"normal_err"`}, + {nil, `null`}, + {&errThatMarshalsToEmptyDict{"the error message"}, `"the error message"`}, + } + + for _, tc := range tcs { + mockStoreSet := &StoreSet{ + storeStatuses: map[string]*StoreStatus{}, + } + mockStoreRef := &storeRef{ + addr: "mockedStore", + } + + mockStoreSet.updateStoreStatus(mockStoreRef, tc.InputError) + + b, err := json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) + testutil.Ok(t, err) + testutil.Equals(t, tc.ExpectedLastErr, string(b)) + } +} + +func TestUpdateStoreStateForgetsPreviousErrors(t *testing.T) { + mockStoreSet := &StoreSet{ + storeStatuses: map[string]*StoreStatus{}, + } + mockStoreRef := &storeRef{ + addr: "mockedStore", + } + + mockStoreSet.updateStoreStatus(mockStoreRef, errors.New("test err")) + + b, err := json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) + testutil.Ok(t, err) + testutil.Equals(t, `"test err"`, string(b)) + + // updating status without and error should clear the previous one. + mockStoreSet.updateStoreStatus(mockStoreRef, nil) + + b, err = json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) + testutil.Ok(t, err) + testutil.Equals(t, `null`, string(b)) +} diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 94fe4cb890..8de59c592d 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -245,7 +245,10 @@ func TestQueryWithEndpointConfig(t *testing.T) { endpointConfig := []store.Config{ { - Endpoints: []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, + Endpoints: []string{sidecar1.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, + }, + { + Endpoints: []string{sidecar2.InternalEndpoint("grpc")}, EndpointsSD: []file.SDConfig{ { Files: []string{fileSDPath}, From a464c53a60d91fb910a9a25b1fdfb13adb462f46 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Thu, 22 Jul 2021 03:12:10 +0530 Subject: [PATCH 11/29] test mTLS in querier (failing) Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 2 +- pkg/extgrpc/client.go | 4 +- test/e2e/certs/myclient.crt | 24 ++++++++++ test/e2e/certs/myclient.key | 27 +++++++++++ test/e2e/certs/myserver.crt | 24 ++++++++++ test/e2e/certs/myserver.key | 27 +++++++++++ test/e2e/e2ethanos/services.go | 8 ++++ test/e2e/query_test.go | 82 ++++++++++++++++++++++++++++++++-- 8 files changed, 192 insertions(+), 6 deletions(-) create mode 100644 test/e2e/certs/myclient.crt create mode 100644 test/e2e/certs/myclient.key create mode 100644 test/e2e/certs/myserver.crt create mode 100644 test/e2e/certs/myserver.key diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 015ee8bc9f..e9fbe78e11 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -377,7 +377,7 @@ func runQuery( // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. var TLSConfig store.TLSConfiguration - if secure { + if secure && len(endpointConfigYAML) == 0 { TLSConfig.CertFile = cert TLSConfig.KeyFile = key TLSConfig.CaCertFile = caCert diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index c0a3317d74..adc4b717aa 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -51,8 +51,8 @@ func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer ope reg.MustRegister(grpcMets) } - // If secure is false or no TLS config is supplied. - if !secure || (tlsConfig == store.TLSConfiguration{}) { + // Insecure if secure is false and no TLS config is supplied. + if !secure && (tlsConfig == store.TLSConfiguration{}) { return append(dialOpts, grpc.WithInsecure()), nil } diff --git a/test/e2e/certs/myclient.crt b/test/e2e/certs/myclient.crt new file mode 100644 index 0000000000..ce815123e2 --- /dev/null +++ b/test/e2e/certs/myclient.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIUfaBBMHE+q+AytBzvHJecI2fX970wDQYJKoZIhvcNAQEL +BQAwgZAxCzAJBgNVBAYTAklOMRcwFQYDVQQIDA5NYWRoeWEgUHJhZGVzaDEQMA4G +A1UEBwwHR3dhbGlvcjENMAsGA1UECgwEQ05DRjEPMA0GA1UECwwGVGhhbm9zMQ4w +DAYDVQQDDAVOYW1hbjEmMCQGCSqGSIb3DQEJARYXbmFtYW5sYWtod2FuaUBnbWFp +bC5jb20wHhcNMjEwNzIwMTk1MDA4WhcNMzEwNzE4MTk1MDA4WjCBkDELMAkGA1UE +BhMCSU4xFzAVBgNVBAgMDk1hZGh5YSBQcmFkZXNoMRAwDgYDVQQHDAdHd2FsaW9y +MQ0wCwYDVQQKDARDTkNGMQ8wDQYDVQQLDAZUaGFub3MxDjAMBgNVBAMMBU5hbWFu +MSYwJAYJKoZIhvcNAQkBFhduYW1hbmxha2h3YW5pQGdtYWlsLmNvbTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBANrrZ3UPX0+Qg1mztix/p0JTZRmTo6wP +ij2MuKcMjIvdJcXM7JuKDdVPbm+WARoCjXZYWN0Za40n+n1E01paS586PCV5QeIq +aZNweNLbD2Hgr2OyWkWpl83EjXGgjEoJqRAqBsNquBl2krIiwd1992YeDsTwxdDk +RJmKvE4+n+OgCDF1oJhrBS0UoHyc7k/s/BpgbNQCOCVKNkVgz16Is6hI66ppf/36 +MBVuNuZMEVrK5agrYorNAhb4us3xxdKmaE+Ym7OKco6+FfpbKNPinV66FqnLruBP +yH9meyz7AAd9/YIfrhB4/85n+jgaGMSI0CSsapRT7qBnVjph9LXIg+UCAwEAAaNT +MFEwHQYDVR0OBBYEFBAFb0wjzh2IDD+t9W8kOMgr08nOMB8GA1UdIwQYMBaAFBAF +b0wjzh2IDD+t9W8kOMgr08nOMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBALMHiVUxV5WZ5paGcJqAnSgyAicqNVRcQT/RQpDbKWsDK51Etq7165lc +/C64ZRxyPP10AQpULP3Lbx3XboO7b3r2fo/AWvTvBSbzOaxxqbE/+veLNx2/7LFu +PqStDlhdRAkXyLM90Z9IBx8eRtlDjnx9/VwXpcEWVL1GEhAs0XcagqIzgo41CuxA +ne82rI30i1FM3Q6LZw0cTaeNMXfw+1n+yilryHX6Z6YrlDn2vMEXTB2A6QuAMn5u +WKlSDgLM95xxv4R4YopYnPGKbL4E9DijNlRE3Tdd/QIG7DGocAHcCBoN4/HltNq4 +V75cFtW4eFoa+I8cd6ZnEuI0Ty5TsOI= +-----END CERTIFICATE----- diff --git a/test/e2e/certs/myclient.key b/test/e2e/certs/myclient.key new file mode 100644 index 0000000000..6a71bec83c --- /dev/null +++ b/test/e2e/certs/myclient.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA2utndQ9fT5CDWbO2LH+nQlNlGZOjrA+KPYy4pwyMi90lxczs +m4oN1U9ub5YBGgKNdlhY3RlrjSf6fUTTWlpLnzo8JXlB4ippk3B40tsPYeCvY7Ja +RamXzcSNcaCMSgmpECoGw2q4GXaSsiLB3X33Zh4OxPDF0OREmYq8Tj6f46AIMXWg +mGsFLRSgfJzuT+z8GmBs1AI4JUo2RWDPXoizqEjrqml//fowFW425kwRWsrlqCti +is0CFvi6zfHF0qZoT5ibs4pyjr4V+lso0+KdXroWqcuu4E/If2Z7LPsAB339gh+u +EHj/zmf6OBoYxIjQJKxqlFPuoGdWOmH0tciD5QIDAQABAoIBAEDfs6fn50mvvIzv +iZPEPG+WQQaETuNlM4Ur12iBeoDUByaHpLIvBgxpXoOowyjPCtbANY1HxCxvZXaL +18oVqgVEf6BnP5PjF6g+kz+A9Rz/NVpo9wFf0YGDtpquhgRGupf0rE5qqKW7EpvB +WwvlEVRsodnQs/5ENuP9TfIt8rnEHV8wwxcKBJIzF8F8icC0tBY0FeK7RZ9AcI9A +cewLApJ4QB/AY0kXMxsYDsEM1rZC6RA/3CjwQWhxrp+lIX2J9bS3zQqIIesIBSW/ +wVenTL0Vpz5Rsgl63LeYtTqvBsb61Z4nSsQCJ5GWYpk5QjF9XpipDDQyIArdhi8c +pllGhj0CgYEA9iT1PXVPvdFVt3VR4Y6RG5kZ4Azv1ahCi/cww4oxWfNkjH0hO3uC +tMDRvmXdzBa9Yu2uDewlZpu/860bdaMH/r/Fdk1tlpCQGYtrs0sen7Ue3rTFqf83 +u+PNuPtreosjqemyIucDZtaLEQbWy9BfrZyitFfkeytludxZqhSuvKMCgYEA469i +hvelN1phGOw9mfd2oAxTetphe55rCzvdfM12GFHLiwF1uwGH8WhQyK8ZWDg73bgQ +rq3cXmbXgobhFLBHThh4yhKDP4ZpkIggcNYRN3w0JbNyIUHISOT5HlCPdm6pikXc +CGw9CnO7Kd3WoD0MLHuto97yclmUwaZSkFWp/dcCgYEAgn/RrcXWgzLLGTsQMF/f +uxwOxeTV+xb8QSzLI8DOrXwHYRyuU9UF1UMtcJ6dWyjrO9n7n9IFekM1H8I1fsby +5DNUQ4aFhYAbhg+PCD3ZfJ8QQM7ixWBUcj8ywNI7h3rha6JFGZAVE57HmD1iACj3 +sRoMgUTgFBVMF6AlwbC5e7UCgYEAsC8yJdCSJ2AnjJqaHH9Sawy+uG0uS/NNT9cW +UBHJVY5N3BXYHUpVAKhBAtoD2bFCGhLpzdG5mc8o6tcmatTxiGwFZBCpQUnofC/q +MoZjsjTJQXc3VKbLriSI5T1fljyRsu7WEip3nZPqe74u67XxqqZVul217GiHZMYo +U4oGTecCgYB26v5vWVe7sSRK/nwVHMoQ2btqQ96GV3GYid48X3JN8ukXBgWzmdSl +ai8MilhjwoQ39EDBZ8FGWCh4Hcc69Bg78KrYrdzlLNe89N+nzFVD3raEwAKNdd+V +69GFJAGS41jQ27Gyqq7sNn2xtquO8OSVDQ2OhmcRs/rww2xdwxixGw== +-----END RSA PRIVATE KEY----- diff --git a/test/e2e/certs/myserver.crt b/test/e2e/certs/myserver.crt new file mode 100644 index 0000000000..f93503b852 --- /dev/null +++ b/test/e2e/certs/myserver.crt @@ -0,0 +1,24 @@ +-----BEGIN CERTIFICATE----- +MIIEAzCCAuugAwIBAgIUayp5ZpT4Cc5P5lezIAY9ThGqU+QwDQYJKoZIhvcNAQEL +BQAwgZAxCzAJBgNVBAYTAklOMRcwFQYDVQQIDA5NYWRoeWEgUHJhZGVzaDEQMA4G +A1UEBwwHR3dhbGlvcjENMAsGA1UECgwEQ05DRjEPMA0GA1UECwwGVGhhbm9zMQ4w +DAYDVQQDDAVOYW1hbjEmMCQGCSqGSIb3DQEJARYXbmFtYW5sYWtod2FuaUBnbWFp +bC5jb20wHhcNMjEwNzIwMTk0NjQ0WhcNMzEwNzE4MTk0NjQ0WjCBkDELMAkGA1UE +BhMCSU4xFzAVBgNVBAgMDk1hZGh5YSBQcmFkZXNoMRAwDgYDVQQHDAdHd2FsaW9y +MQ0wCwYDVQQKDARDTkNGMQ8wDQYDVQQLDAZUaGFub3MxDjAMBgNVBAMMBU5hbWFu +MSYwJAYJKoZIhvcNAQkBFhduYW1hbmxha2h3YW5pQGdtYWlsLmNvbTCCASIwDQYJ +KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKcx82TLwSjsUjzl23a4EkD7xLC1h3s +vjV4onjiNa9IwzmxRhLGFSuWxFM/euFe7DBhHp6Fw89NYzBPv/3pvZ8myZn3C7Nl +5Zrn1r3DHITiGG4cUXRruHA6krxEW9w5uOhGY2Uwugc1cYUGMc0HwSj5wRCpA1R3 +a989UIPWXwmqDYRJ7F35eMNw43Qq2XV/4lTT5k1isH77ZRSA7nVMPwxhk5C7Ov7x +42yoEUPlS57jfgYehIBQGx/3XONkzjx2EBzfrKzJM4R2ndFLmuY+3ZNGCUgyvCK/ +VbvE7ZR2PpoLxyTdeanWv0emVXWQQNuXqG1V3Ev0lyLgeW8Sl3iAnFsCAwEAAaNT +MFEwHQYDVR0OBBYEFBHcDA14a1mr1gnM7OnjwW6YU5muMB8GA1UdIwQYMBaAFBHc +DA14a1mr1gnM7OnjwW6YU5muMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL +BQADggEBAFHNNQm1AeAQ02iayHLmMCH4nOZH7iJlnasfwrKtNUd8a0jqAILz8NCc +8+908UufoSc2Pnaw6P/Q+F01vDbdQLBGnem0P+/gYtfZmbhtsnvy4n6uOeSYW8vH +zYMwY7Pr8Ekvo+3699xN/+0F6XGtK/nB/qSugf1wg0d40M8L2i6iR96ufjYN5o2o +kQCGdniBVw22lx61tmrrY/rlO7f0CKSWYh8GvIcE/8bRt8hVo5A/Swb7wMrQffQC +Ua+7e6ibcoW5T9ZwqZ+8gRzc+pu9H6at0/AQ54JdK5pAZzxXCirsiUiVTeiajg9+ +i7hV1xAc+rI05bb54497RRChYVTNLjk= +-----END CERTIFICATE----- diff --git a/test/e2e/certs/myserver.key b/test/e2e/certs/myserver.key new file mode 100644 index 0000000000..2bd3a8cb5f --- /dev/null +++ b/test/e2e/certs/myserver.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAwpzHzZMvBKOxSPOXbdrgSQPvEsLWHey+NXiieOI1r0jDObFG +EsYVK5bEUz964V7sMGEenoXDz01jME+//em9nybJmfcLs2XlmufWvcMchOIYbhxR +dGu4cDqSvERb3Dm46EZjZTC6BzVxhQYxzQfBKPnBEKkDVHdr3z1Qg9ZfCaoNhEns +Xfl4w3DjdCrZdX/iVNPmTWKwfvtlFIDudUw/DGGTkLs6/vHjbKgRQ+VLnuN+Bh6E +gFAbH/dc42TOPHYQHN+srMkzhHad0Uua5j7dk0YJSDK8Ir9Vu8TtlHY+mgvHJN15 +qda/R6ZVdZBA25eobVXcS/SXIuB5bxKXeICcWwIDAQABAoIBAHPhBLusyw9ZJOQK +X32DGY+Abyddqj7xS6TtKtkN4Od5TP2aR94/4jUtNcNYBiQR8FMyURgVEC3sp4YJ +xok7V7RfVSV/S5FLuDbPmzcs6VtagS93Xy0SQyVYSy0dCxGLsUVlPCPxqqNGOEz8 +DkVGO8bZSBNSf/AYl7u6d6m4yKgodXQAKjxFeLYDwJKshq4nbG3QzW0DcwgDCBTZ +3PcPcBRJWblfSgA9cN3XubR3rHARsd515hCAhSRDWh119DXyQ1G5AcCo1xgLdnnb +ZlmVjzbEpMftr4Ixkla+5R3Y7np6KJ89Dq71Q1zlQ0y4LsJZnmoQUStDv/qa78iJ +OUjoBgkCgYEA9rqGvZ2jjtEpEYB6esO3yHLRdw2F6ssu2G2PrjzNxOwyuPCWXEMX +IylCF7xSYs5MboMQU4Tp49Rr5AUhiQv4L/U/rzzFZA6+TLghFlnKeAe6DOnMSJ8h +iNxPPmQZ5brmnfJ0AXzezUP6JLnkJWv600/o9ziTDQjl4+XHElawhF8CgYEAyezo +edtHyIF2DzIJaN1rbWtXBUiAxx91ZxUaKWQ92e02uDtpW0AIo6BBKxCMj0uowQLb +W/SbCOcNSkWofYJ6eBcDJr/hwFN92TvzAqFAT0pnlgZ9cSWMdvOQtqFBZvJ+YSPW +VkdhYRxhg5G8chEi32qly/jxnRxXUSmWc8QriYUCgYEA5bCCDXX6tNMNK4jy/OTF +bCtPy0hgmlNrGfrtHqTmXuQQ2FDJebzPhmE7cUNVYzxwtRT4lvgOkZXIly8bqCzn +cSYcknO9w0dia6Oi0d7neVSgJLNnBVh3bKTEdO3VmiOj2/jBfD0WYftdnOEdfqUG +jcA/vh8B7smQ0tevuufPTnsCgYAHB/LIW3RSbP+ZY9qkBiG5e6VaD748MMI7xqZU +jqMAtZvUKm1uufoTCH98amUakD3eCqVsvEWxt6nkziwhwqtVByga70+DDOUy7T12 +9/pvSF33AV7Y07/iXHTLhy3p0cPIyiCqfG3NndlfZXG0XxhhwyrmwVJYcCYyM0qD +2W7SqQKBgDgv7e5M4SJLUe54zO3S8aq940wVZl8PcA5G5WLbbEc05I6REYGLPlRC +tM704m6Fqkcj9Osdg+qSM+G9ViezDKvMahPJFe6wQ9Gyw166pK5dT5Y/v7r0vE4o +sxfosMl/oJKvwhE9pz5IOkxSS/sT6W0etMuaDOeSvk0pWGuk+No1 +-----END RSA PRIVATE KEY----- diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 843aba33bf..475efbdc2b 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -146,6 +146,7 @@ type QuerierBuilder struct { metadataAddresses []string targetAddresses []string exemplarAddresses []string + mutualTLSConfig []string endpointConfig []store.Config @@ -257,6 +258,11 @@ func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []store.Config) *Quer return q } +func (q *QuerierBuilder) WithMutualTLS(mutualTLSConfig []string) *QuerierBuilder { + q.mutualTLSConfig = mutualTLSConfig + return q +} + func (q *QuerierBuilder) Build() (*Service, error) { const replicaLabel = "replica" @@ -337,6 +343,8 @@ func (q *QuerierBuilder) Build() (*Service, error) { args = append(args, "--endpoint.config="+string(endpointCfgBytes)) } + args = append(args, q.mutualTLSConfig...) + querier := NewService( fmt.Sprintf("querier-%v", q.name), DefaultImage(), diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 8de59c592d..74d67fa4be 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -6,6 +6,7 @@ package e2e_test import ( "context" "fmt" + "io" "io/ioutil" "net/http/httptest" "net/url" @@ -13,6 +14,7 @@ import ( "path/filepath" "sort" "strings" + "syscall" "testing" "time" @@ -149,11 +151,23 @@ func TestQuery(t *testing.T) { fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) + queryFileSDDir := filepath.Join(s.SharedDir(), "data", "querier", "1") + container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") + testutil.Ok(t, cpyDir("./certs", queryFileSDDir)) + + args := e2e.BuildArgs(map[string]string{ + "--grpc-client-tls-cert": filepath.Join(container, "myclient.crt"), + "--grpc-client-tls-key": filepath.Join(container, "myclient.key"), + "--grpc-server-tls-cert": filepath.Join(container, "myserver.crt"), + "--grpc-server-tls-key": filepath.Join(container, "myserver.key"), + }) + + args = append(args, "--grpc-client-tls-secure") + args = append(args, "--grpc-client-tls-skip-verify") // As the certs are self-signed. + // Querier. Both fileSD and directly by flags. - q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")). - WithFileSDStoreAddresses(sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")).Build() q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}). - WithFileSDStoreAddresses(fileSDPath).Build() + WithFileSDStoreAddresses(fileSDPath).WithMutualTLS(args).Build() testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(q)) @@ -857,3 +871,65 @@ func queryExemplars(t *testing.T, ctx context.Context, addr, q string, start, en return nil })) } + +func cpyDir(scrDir, dest string) error { + entries, err := ioutil.ReadDir(scrDir) + if err != nil { + return err + } + for _, entry := range entries { + sourcePath := filepath.Join(scrDir, entry.Name()) + destPath := filepath.Join(dest, entry.Name()) + + fileInfo, err := os.Stat(sourcePath) + if err != nil { + return err + } + + stat, ok := fileInfo.Sys().(*syscall.Stat_t) + if !ok { + return fmt.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath) + } + + switch fileInfo.Mode() & os.ModeType { + case os.ModeDir: + if err := os.MkdirAll(destPath, 0755); err != nil { + return err + } + if err := cpyDir(sourcePath, destPath); err != nil { + return err + } + case os.ModeSymlink: + return errors.New("symlink copy is not supported") + default: + if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { + return err + } + if err := cpyFile(sourcePath, destPath); err != nil { + return err + } + } + if err := os.Lchown(destPath, int(stat.Uid), int(stat.Gid)); err != nil { + return err + } + } + return nil +} + +func cpyFile(srcFile, dstFile string) (err error) { + out, err := os.Create(dstFile) + if err != nil { + return err + } + + defer runutil.CloseWithErrCapture(&err, out, "close dst") + + in, err := os.Open(srcFile) + defer runutil.CloseWithErrCapture(&err, in, "close src") + if err != nil { + return err + } + + _, err = io.Copy(out, in) + return err +} From 593401cbda1c6c8f99e66d3ca78c80f271a536ba Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Sat, 24 Jul 2021 04:31:06 +0530 Subject: [PATCH 12/29] Added name to per-endpoint yaml config Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 9 +++++---- cmd/thanos/receive.go | 2 +- pkg/extgrpc/client.go | 7 +++++-- pkg/query/storeset.go | 7 +++++-- pkg/query/storeset_test.go | 10 +++++----- pkg/store/config.go | 1 + test/e2e/query_test.go | 2 ++ 7 files changed, 24 insertions(+), 14 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index e9fbe78e11..bcc3229340 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -423,17 +423,17 @@ func runQuery( ) var storeSets []*query.EndpointSet - for instance, config := range endpointConfig { - dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, instance, secure, skipVerify, config.TLSConfig) + fileSDCache := cache.New() + for _, config := range endpointConfig { + dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) if err != nil { return errors.Wrap(err, "building gRPC client") } - fileSDCache := cache.New() dnsStoreProvider := dns.NewProvider( logger, extprom.WrapRegistererWith( - map[string]string{"config_instance": string(rune(instance))}, + map[string]string{"config_name": config.Name}, extprom.WrapRegistererWithPrefix("thanos_querier_store_apis_", reg), ), dns.ResolverType(dnsSDResolver), @@ -453,6 +453,7 @@ func runQuery( endpoints := query.NewEndpointSet( logger, reg, + config.Name, func() (specs []query.EndpointSpec) { specs = spec diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 7035d7bea5..c5b6f2023e 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -130,7 +130,7 @@ func runReceive( logger, reg, tracer, - 0, + "", *conf.grpcCert != "", *conf.grpcClientCA == "", TLSConfig, diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index adc4b717aa..eea2cb875a 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -21,8 +21,11 @@ import ( ) // StoreClientGRPCOpts creates gRPC dial options for connecting to a store client. -func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, instance int, secure, skipVerify bool, tlsConfig store.TLSConfiguration) ([]grpc.DialOption, error) { - constLabels := map[string]string{"config_instance": string(rune(instance))} +func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, clientInstance string, secure, skipVerify bool, tlsConfig store.TLSConfiguration) ([]grpc.DialOption, error) { + if clientInstance == "" { + clientInstance = "default" + } + constLabels := map[string]string{"config_name": clientInstance} grpcMets := grpc_prometheus.NewClientMetrics(grpc_prometheus.WithConstLabels(constLabels)) grpcMets.EnableClientHandlingTimeHistogram( grpc_prometheus.WithHistogramConstLabels(constLabels), diff --git a/pkg/query/storeset.go b/pkg/query/storeset.go index 89ce65c8d5..8d5bdc82be 100644 --- a/pkg/query/storeset.go +++ b/pkg/query/storeset.go @@ -223,7 +223,7 @@ type StoreSet struct { func NewStoreSet( logger log.Logger, reg *prometheus.Registry, - instance int, + configInstance string, storeSpecs func() []StoreSpec, ruleSpecs func() []RuleSpec, targetSpecs func() []TargetSpec, @@ -232,7 +232,10 @@ func NewStoreSet( dialOpts []grpc.DialOption, unhealthyStoreTimeout time.Duration, ) *StoreSet { - storesMetric := newStoreSetNodeCollector(string(rune(instance))) + if configInstance == "" { + configInstance = "default" + } + storesMetric := newStoreSetNodeCollector(configInstance) if reg != nil { reg.MustRegister(storesMetric) } diff --git a/pkg/query/storeset_test.go b/pkg/query/storeset_test.go index 60f8bc0f2c..a67f23fc26 100644 --- a/pkg/query/storeset_test.go +++ b/pkg/query/storeset_test.go @@ -186,7 +186,7 @@ func TestStoreSet_Update(t *testing.T) { // Testing if duplicates can cause weird results. discoveredStoreAddr = append(discoveredStoreAddr, discoveredStoreAddr[0]) - storeSet := NewStoreSet(nil, nil, 0, + storeSet := NewStoreSet(nil, nil, "", func() (specs []StoreSpec) { for _, addr := range discoveredStoreAddr { specs = append(specs, NewGRPCStoreSpec(addr, false)) @@ -546,7 +546,7 @@ func TestStoreSet_Update_NoneAvailable(t *testing.T) { st.CloseOne(initialStoreAddr[0]) st.CloseOne(initialStoreAddr[1]) - storeSet := NewStoreSet(nil, nil, 0, + storeSet := NewStoreSet(nil, nil, "", func() (specs []StoreSpec) { for _, addr := range initialStoreAddr { specs = append(specs, NewGRPCStoreSpec(addr, false)) @@ -634,7 +634,7 @@ func TestQuerierStrict(t *testing.T) { staticStoreAddr := st.StoreAddresses()[0] slowStaticStoreAddr := st.StoreAddresses()[2] - storeSet := NewStoreSet(nil, nil, 0, func() (specs []StoreSpec) { + storeSet := NewStoreSet(nil, nil, "", func() (specs []StoreSpec) { return []StoreSpec{ NewGRPCStoreSpec(st.StoreAddresses()[0], true), NewGRPCStoreSpec(st.StoreAddresses()[1], false), @@ -796,7 +796,7 @@ func TestStoreSet_Update_Rules(t *testing.T) { expectedRules: 2, }, } { - storeSet := NewStoreSet(nil, nil, 0, + storeSet := NewStoreSet(nil, nil, "", tc.storeSpecs, tc.ruleSpecs, func() []TargetSpec { return nil }, @@ -959,7 +959,7 @@ func TestStoreSet_Rules_Discovery(t *testing.T) { t.Run(tc.name, func(t *testing.T) { currentState := 0 - storeSet := NewStoreSet(nil, nil, 0, + storeSet := NewStoreSet(nil, nil, "", func() []StoreSpec { if tc.states[currentState].storeSpecs == nil { return nil diff --git a/pkg/store/config.go b/pkg/store/config.go index d3739d47e9..0557b570d4 100644 --- a/pkg/store/config.go +++ b/pkg/store/config.go @@ -12,6 +12,7 @@ import ( // Config represents the configuration of a set of Store API endpoints. type Config struct { + Name string `yaml:"name"` TLSConfig TLSConfiguration `yaml:"tls_config"` Endpoints []string `yaml:"endpoints"` EndpointsSD []file.SDConfig `yaml:"endpoints_sd_files"` diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 74d67fa4be..7bb4f09aac 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -259,9 +259,11 @@ func TestQueryWithEndpointConfig(t *testing.T) { endpointConfig := []store.Config{ { + Name: "one", Endpoints: []string{sidecar1.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, }, { + Name: "two", Endpoints: []string{sidecar2.InternalEndpoint("grpc")}, EndpointsSD: []file.SDConfig{ { From 8b0ae4af05cfd2b6d4c66eb9333858691cf29d13 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Mon, 26 Jul 2021 04:44:13 +0530 Subject: [PATCH 13/29] tested with new certs Signed-off-by: Namanl2001 --- test/e2e/certs/create.sh | 71 +++++++++++++++++++ .../certs/e2e_test_query_config_client.crt | 23 ++++++ .../certs/e2e_test_query_config_client.csr | 16 +++++ .../certs/e2e_test_query_config_server.crt | 23 ++++++ .../certs/e2e_test_query_config_server.csr | 16 +++++ test/e2e/certs/myclient.crt | 24 ------- test/e2e/certs/myclient.key | 27 ------- test/e2e/certs/myserver.crt | 24 ------- test/e2e/certs/myserver.key | 27 ------- test/e2e/certs/testca.crt | 19 +++++ test/e2e/certs/testca.key | 28 ++++++++ test/e2e/certs/testca.srl | 1 + test/e2e/certs/testclient.key | 27 +++++++ test/e2e/certs/testserver.key | 27 +++++++ test/e2e/e2ethanos/services.go | 2 +- test/e2e/query_test.go | 41 ++++++----- 16 files changed, 274 insertions(+), 122 deletions(-) create mode 100644 test/e2e/certs/create.sh create mode 100644 test/e2e/certs/e2e_test_query_config_client.crt create mode 100644 test/e2e/certs/e2e_test_query_config_client.csr create mode 100644 test/e2e/certs/e2e_test_query_config_server.crt create mode 100644 test/e2e/certs/e2e_test_query_config_server.csr delete mode 100644 test/e2e/certs/myclient.crt delete mode 100644 test/e2e/certs/myclient.key delete mode 100644 test/e2e/certs/myserver.crt delete mode 100644 test/e2e/certs/myserver.key create mode 100644 test/e2e/certs/testca.crt create mode 100644 test/e2e/certs/testca.key create mode 100644 test/e2e/certs/testca.srl create mode 100644 test/e2e/certs/testclient.key create mode 100644 test/e2e/certs/testserver.key diff --git a/test/e2e/certs/create.sh b/test/e2e/certs/create.sh new file mode 100644 index 0000000000..6c2ee6b5c8 --- /dev/null +++ b/test/e2e/certs/create.sh @@ -0,0 +1,71 @@ +#!/bin/zsh + +openssl req \ + -new \ + -x509 \ + -nodes \ + -days 99999 \ + -subj '/CN=my-ca' \ + -keyout testca.key \ + -out testca.crt + +openssl genrsa -out testserver.key 2048 +openssl genrsa -out testclient.key 2048 + +openssl req \ + -new \ + -key testserver.key \ + -subj "/CN=e2e_test_query_config-sidecar-alone" \ + -out e2e_test_query_config_server.csr + +openssl req \ + -new \ + -key testclient.key \ + -subj "/CN=e2e_test_query_config-querier-1" \ + -out e2e_test_query_config_client.csr + +openssl x509 \ + -req \ + -in e2e_test_query_config_server.csr \ + -CA testca.crt \ + -CAkey testca.key \ + -CAcreateserial \ + -days 99999 \ + -extfile <( + cat <<-EOF +basicConstraints = CA:FALSE +nsCertType = server +nsComment = "OpenSSL Generated Server Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = serverAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = e2e_test_query_config-sidecar-alone +EOF + ) \ + -out e2e_test_query_config_server.crt + +openssl x509 \ + -req \ + -in e2e_test_query_config_client.csr \ + -CA testca.crt \ + -CAkey testca.key \ + -CAcreateserial \ + -days 99999 \ + -extfile <( + cat <<-EOF +basicConstraints = CA:FALSE +nsCertType = client +nsComment = "OpenSSL Generated Client Certificate" +subjectKeyIdentifier = hash +authorityKeyIdentifier = keyid,issuer:always +keyUsage = critical, digitalSignature, keyEncipherment +extendedKeyUsage = clientAuth +subjectAltName = @alt_names +[alt_names] +DNS.1 = e2e_test_query_config-querier-1 +EOF + ) \ + -out e2e_test_query_config_client.crt diff --git a/test/e2e/certs/e2e_test_query_config_client.crt b/test/e2e/certs/e2e_test_query_config_client.crt new file mode 100644 index 0000000000..3137facee9 --- /dev/null +++ b/test/e2e/certs/e2e_test_query_config_client.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID4DCCAsigAwIBAgIUOgft+f/l5XvnweoJJs1E1f8wEOcwDQYJKoZIhvcNAQEL +BQAwEDEOMAwGA1UEAwwFbXktY2EwIBcNMjEwNzI1MjIxOTIwWhgPMjI5NTA1MDky +MjE5MjBaMCoxKDAmBgNVBAMMH2UyZV90ZXN0X3F1ZXJ5X2NvbmZpZy1xdWVyaWVy +LTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCyxR1e50V7nVZ31Cz +3bZUePC1Jf0PkKl5exXysTGsxJIFixUWbZo040VQtR0UqfWqskad5CqppwlvopsK +h2xjskQK7VVmAZ84hKOSjRkrhO4ksPQtOu1+kK4S5uS+IcAv3lag9cdq83c9RUE4 +pbjQvdVvvTRkDWtCAY7GIYDXIkxARXZZbwZoCQGhNW+W+MhHTw6+7shkJC9BPd4o +xI/MuYMNevIHICHqFshaMvXGS6A1vQGKJEONJFVLIEIBTB6zDulhLnMlWi1De9d7 +lhSGxMkJAzu08uUil40j7s+xelZEwok+Qejkiq032osrhqE7c/N92FEShlp9D3Bt +sY/vAgMBAAGjggEUMIIBEDAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIHgDAz +BglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRpZmlj +YXRlMB0GA1UdDgQWBBSEaIDn5IKg9m4D8OIsWL+STQ42OTBLBgNVHSMERDBCgBQc +Gp7wudiNIelAlOPjStRtBAy196EUpBIwEDEOMAwGA1UEAwwFbXktY2GCFBuNAghw +4XEGc2LX51v9/p9mtYMiMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEF +BQcDAjAqBgNVHREEIzAhgh9lMmVfdGVzdF9xdWVyeV9jb25maWctcXVlcmllci0x +MA0GCSqGSIb3DQEBCwUAA4IBAQCFIZPOKWAGL3HXVEyucqw1VNy2uNEiJqm2Pp8W +sbX3HQk+abQB8UhT25cWVYaZB84czUBIzjBcNziZqhUfls4nx6N+5ksdsRa/jtPD +wbXmdNQdU05Vp8K8O22nikrLgMPveqOAZuuxC6DjMg5sqofMPf7cQlMA6ec3a+7x +2jshwyViX1wr08MzhqKwvAWeYsDRRcH9q80PAqOc461GL63j0zPfTEJdeglOAaHQ +n0ocsQ5FXIJVlntwTASuhdtevokwYYW/C5i/B2RBgSz9XN2QnJbLNADyqRLaN8+5 +YiN8Ua2IANfIcFS2GuESUJxdGl3e58gPj0URnddQQXrjCawG +-----END CERTIFICATE----- diff --git a/test/e2e/certs/e2e_test_query_config_client.csr b/test/e2e/certs/e2e_test_query_config_client.csr new file mode 100644 index 0000000000..0f8bddb756 --- /dev/null +++ b/test/e2e/certs/e2e_test_query_config_client.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICbzCCAVcCAQAwKjEoMCYGA1UEAwwfZTJlX3Rlc3RfcXVlcnlfY29uZmlnLXF1 +ZXJpZXItMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMLLFHV7nRXu +dVnfULPdtlR48LUl/Q+QqXl7FfKxMazEkgWLFRZtmjTjRVC1HRSp9aqyRp3kKqmn +CW+imwqHbGOyRArtVWYBnziEo5KNGSuE7iSw9C067X6QrhLm5L4hwC/eVqD1x2rz +dz1FQTiluNC91W+9NGQNa0IBjsYhgNciTEBFdllvBmgJAaE1b5b4yEdPDr7uyGQk +L0E93ijEj8y5gw168gcgIeoWyFoy9cZLoDW9AYokQ40kVUsgQgFMHrMO6WEucyVa +LUN713uWFIbEyQkDO7Ty5SKXjSPuz7F6VkTCiT5B6OSKrTfaiyuGoTtz833YURKG +Wn0PcG2xj+8CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQAToQ8VV1Ur5WEkFBPq +Os8ThCh428M399foOR0f7YrAxKSaNIxH0gZjNO9204zEVHl+dwdtnjI/8BNXl61L +Gb8zX0NS+SLLlCgZG0Z5jzKew9el4AoCCDYi7DjVOvGdM1KEF38gwIiUoIAaB6dT +ts/MCfx6msPLIx4FgyxYL8yx7nwlJ0AJ9hXrhlHUFrtVpOfAkf9i38Jt7YtytHTr +2MZzUlDo4zge/zphtDCPOOpLlBUl27bYo4VDt8xsBgXcOiNpJvKOVrSj3y90Qrnh +tIBauFCxKPZRptcbgVxn4XFP9IjivTODa5HOBPuxI3Mb9bxLisoISzm7ffe1nVeD +6dlT +-----END CERTIFICATE REQUEST----- diff --git a/test/e2e/certs/e2e_test_query_config_server.crt b/test/e2e/certs/e2e_test_query_config_server.crt new file mode 100644 index 0000000000..f9764eb484 --- /dev/null +++ b/test/e2e/certs/e2e_test_query_config_server.crt @@ -0,0 +1,23 @@ +-----BEGIN CERTIFICATE----- +MIID6DCCAtCgAwIBAgIUOgft+f/l5XvnweoJJs1E1f8wEOYwDQYJKoZIhvcNAQEL +BQAwEDEOMAwGA1UEAwwFbXktY2EwIBcNMjEwNzI1MjIxNjU5WhgPMjI5NTA1MDky +MjE2NTlaMC4xLDAqBgNVBAMMI2UyZV90ZXN0X3F1ZXJ5X2NvbmZpZy1zaWRlY2Fy +LWFsb25lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzUlmGN13bhFj +i84de+73UPDyB4NRUrdBn4WDkZDcRIagiAbOPIDqYvj4gAQEpxQfl/srs2taoPCm +M2kc5OtwCmoRO8uKMcdNnIAiLByDcpWQusOCPQiw0178y1g/XZ+GU3UkVUGXD8M2 +SI5bbEU/F7HPp/MC5ILLktnCnGUQKqB3S9tAvPbZ01KN/xqHHwo2JzxkWpFu+W90 +ix1VStJoCjZVYwlUt+VH9i7AHX/buDIBIeyDDCI3PLFP+mPfvpvKPzgwSP5VHRK3 +l0+AqFGEo5dF+nB4w/HexMzVz3A5c+HLChDH1qORmtIadzHqyq0CqNLd1Fp0Im7f +F1K4pt3SzwIDAQABo4IBGDCCARQwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMC +BkAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0 +aWZpY2F0ZTAdBgNVHQ4EFgQU88Aco+YLRzKRPKb9eQCwEITbJAQwSwYDVR0jBEQw +QoAUHBqe8LnYjSHpQJTj40rUbQQMtfehFKQSMBAxDjAMBgNVBAMMBW15LWNhghQb +jQIIcOFxBnNi1+db/f6fZrWDIjAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYI +KwYBBQUHAwEwLgYDVR0RBCcwJYIjZTJlX3Rlc3RfcXVlcnlfY29uZmlnLXNpZGVj +YXItYWxvbmUwDQYJKoZIhvcNAQELBQADggEBAAj6NcE+UfxuI+XJhbYAZ/ovMv5c +qatqmmq4MF+vrMVLVScwTS3qNjoN5npz1e+01XBpL7pAJQAKbsgxN+2QotYWxzzv +f9XcWa378+u04FGBXKA46iAyOmfVPcUuNT9aIfEIZc6UP39HXlz1qXLTN8LVtWDv +59yCGOe87x97/R9i2p/qLlXqbW8hdDU5STNBN15oc2yRSQ0b29laGbjZTTXD/74V +kqJS6EC+MKNup5XQEHk4Ngy1x6zTk6jqNY4aj12kzxRpZVFkUDD3JMnN7vVHOu4/ +HZ8bcm6Hm4SUeaXHK75a+GUb2jehEKnt2MYIJC2tLmj7h3G4A54xnh1JFjo= +-----END CERTIFICATE----- diff --git a/test/e2e/certs/e2e_test_query_config_server.csr b/test/e2e/certs/e2e_test_query_config_server.csr new file mode 100644 index 0000000000..5c879ab075 --- /dev/null +++ b/test/e2e/certs/e2e_test_query_config_server.csr @@ -0,0 +1,16 @@ +-----BEGIN CERTIFICATE REQUEST----- +MIICczCCAVsCAQAwLjEsMCoGA1UEAwwjZTJlX3Rlc3RfcXVlcnlfY29uZmlnLXNp +ZGVjYXItYWxvbmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNSWYY +3XduEWOLzh177vdQ8PIHg1FSt0GfhYORkNxEhqCIBs48gOpi+PiABASnFB+X+yuz +a1qg8KYzaRzk63AKahE7y4oxx02cgCIsHINylZC6w4I9CLDTXvzLWD9dn4ZTdSRV +QZcPwzZIjltsRT8Xsc+n8wLkgsuS2cKcZRAqoHdL20C89tnTUo3/GocfCjYnPGRa +kW75b3SLHVVK0mgKNlVjCVS35Uf2LsAdf9u4MgEh7IMMIjc8sU/6Y9++m8o/ODBI +/lUdEreXT4CoUYSjl0X6cHjD8d7EzNXPcDlz4csKEMfWo5Ga0hp3MerKrQKo0t3U +WnQibt8XUrim3dLPAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAvYhHqWREJE40 ++0FFB52LWoig0HASv1ytZCzbaZWSFcV30h2iSSU+gGTk3wdf+eM3xWysIbqPqhOm +xyAfkoGRsFbJO0sZXvB6/GGicUDRQ74pIcP/1LHLcGe+wFUYojHIyczMxX3REvJH +tqhN2xSz0veo7+jZmF64SxTdwClY5/khAPdYdde3vu3KIW4nHDCXF4sFxOB+BFLg +YxBFUQAKAFgRU89tXs2/O3VVYP2PUe6cRv3suL/C7UUW3rxUSfxCiYumG8Wm3xdp +gtqQGRhTxHw6QjZQNywnOY79QQTR/bypplcLayKqEq8bO6KsYXB/jbYi6TsaTBMl +Y1RHz5UQWw== +-----END CERTIFICATE REQUEST----- diff --git a/test/e2e/certs/myclient.crt b/test/e2e/certs/myclient.crt deleted file mode 100644 index ce815123e2..0000000000 --- a/test/e2e/certs/myclient.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEAzCCAuugAwIBAgIUfaBBMHE+q+AytBzvHJecI2fX970wDQYJKoZIhvcNAQEL -BQAwgZAxCzAJBgNVBAYTAklOMRcwFQYDVQQIDA5NYWRoeWEgUHJhZGVzaDEQMA4G -A1UEBwwHR3dhbGlvcjENMAsGA1UECgwEQ05DRjEPMA0GA1UECwwGVGhhbm9zMQ4w -DAYDVQQDDAVOYW1hbjEmMCQGCSqGSIb3DQEJARYXbmFtYW5sYWtod2FuaUBnbWFp -bC5jb20wHhcNMjEwNzIwMTk1MDA4WhcNMzEwNzE4MTk1MDA4WjCBkDELMAkGA1UE -BhMCSU4xFzAVBgNVBAgMDk1hZGh5YSBQcmFkZXNoMRAwDgYDVQQHDAdHd2FsaW9y -MQ0wCwYDVQQKDARDTkNGMQ8wDQYDVQQLDAZUaGFub3MxDjAMBgNVBAMMBU5hbWFu -MSYwJAYJKoZIhvcNAQkBFhduYW1hbmxha2h3YW5pQGdtYWlsLmNvbTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBANrrZ3UPX0+Qg1mztix/p0JTZRmTo6wP -ij2MuKcMjIvdJcXM7JuKDdVPbm+WARoCjXZYWN0Za40n+n1E01paS586PCV5QeIq -aZNweNLbD2Hgr2OyWkWpl83EjXGgjEoJqRAqBsNquBl2krIiwd1992YeDsTwxdDk -RJmKvE4+n+OgCDF1oJhrBS0UoHyc7k/s/BpgbNQCOCVKNkVgz16Is6hI66ppf/36 -MBVuNuZMEVrK5agrYorNAhb4us3xxdKmaE+Ym7OKco6+FfpbKNPinV66FqnLruBP -yH9meyz7AAd9/YIfrhB4/85n+jgaGMSI0CSsapRT7qBnVjph9LXIg+UCAwEAAaNT -MFEwHQYDVR0OBBYEFBAFb0wjzh2IDD+t9W8kOMgr08nOMB8GA1UdIwQYMBaAFBAF -b0wjzh2IDD+t9W8kOMgr08nOMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL -BQADggEBALMHiVUxV5WZ5paGcJqAnSgyAicqNVRcQT/RQpDbKWsDK51Etq7165lc -/C64ZRxyPP10AQpULP3Lbx3XboO7b3r2fo/AWvTvBSbzOaxxqbE/+veLNx2/7LFu -PqStDlhdRAkXyLM90Z9IBx8eRtlDjnx9/VwXpcEWVL1GEhAs0XcagqIzgo41CuxA -ne82rI30i1FM3Q6LZw0cTaeNMXfw+1n+yilryHX6Z6YrlDn2vMEXTB2A6QuAMn5u -WKlSDgLM95xxv4R4YopYnPGKbL4E9DijNlRE3Tdd/QIG7DGocAHcCBoN4/HltNq4 -V75cFtW4eFoa+I8cd6ZnEuI0Ty5TsOI= ------END CERTIFICATE----- diff --git a/test/e2e/certs/myclient.key b/test/e2e/certs/myclient.key deleted file mode 100644 index 6a71bec83c..0000000000 --- a/test/e2e/certs/myclient.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA2utndQ9fT5CDWbO2LH+nQlNlGZOjrA+KPYy4pwyMi90lxczs -m4oN1U9ub5YBGgKNdlhY3RlrjSf6fUTTWlpLnzo8JXlB4ippk3B40tsPYeCvY7Ja -RamXzcSNcaCMSgmpECoGw2q4GXaSsiLB3X33Zh4OxPDF0OREmYq8Tj6f46AIMXWg -mGsFLRSgfJzuT+z8GmBs1AI4JUo2RWDPXoizqEjrqml//fowFW425kwRWsrlqCti -is0CFvi6zfHF0qZoT5ibs4pyjr4V+lso0+KdXroWqcuu4E/If2Z7LPsAB339gh+u -EHj/zmf6OBoYxIjQJKxqlFPuoGdWOmH0tciD5QIDAQABAoIBAEDfs6fn50mvvIzv -iZPEPG+WQQaETuNlM4Ur12iBeoDUByaHpLIvBgxpXoOowyjPCtbANY1HxCxvZXaL -18oVqgVEf6BnP5PjF6g+kz+A9Rz/NVpo9wFf0YGDtpquhgRGupf0rE5qqKW7EpvB -WwvlEVRsodnQs/5ENuP9TfIt8rnEHV8wwxcKBJIzF8F8icC0tBY0FeK7RZ9AcI9A -cewLApJ4QB/AY0kXMxsYDsEM1rZC6RA/3CjwQWhxrp+lIX2J9bS3zQqIIesIBSW/ -wVenTL0Vpz5Rsgl63LeYtTqvBsb61Z4nSsQCJ5GWYpk5QjF9XpipDDQyIArdhi8c -pllGhj0CgYEA9iT1PXVPvdFVt3VR4Y6RG5kZ4Azv1ahCi/cww4oxWfNkjH0hO3uC -tMDRvmXdzBa9Yu2uDewlZpu/860bdaMH/r/Fdk1tlpCQGYtrs0sen7Ue3rTFqf83 -u+PNuPtreosjqemyIucDZtaLEQbWy9BfrZyitFfkeytludxZqhSuvKMCgYEA469i -hvelN1phGOw9mfd2oAxTetphe55rCzvdfM12GFHLiwF1uwGH8WhQyK8ZWDg73bgQ -rq3cXmbXgobhFLBHThh4yhKDP4ZpkIggcNYRN3w0JbNyIUHISOT5HlCPdm6pikXc -CGw9CnO7Kd3WoD0MLHuto97yclmUwaZSkFWp/dcCgYEAgn/RrcXWgzLLGTsQMF/f -uxwOxeTV+xb8QSzLI8DOrXwHYRyuU9UF1UMtcJ6dWyjrO9n7n9IFekM1H8I1fsby -5DNUQ4aFhYAbhg+PCD3ZfJ8QQM7ixWBUcj8ywNI7h3rha6JFGZAVE57HmD1iACj3 -sRoMgUTgFBVMF6AlwbC5e7UCgYEAsC8yJdCSJ2AnjJqaHH9Sawy+uG0uS/NNT9cW -UBHJVY5N3BXYHUpVAKhBAtoD2bFCGhLpzdG5mc8o6tcmatTxiGwFZBCpQUnofC/q -MoZjsjTJQXc3VKbLriSI5T1fljyRsu7WEip3nZPqe74u67XxqqZVul217GiHZMYo -U4oGTecCgYB26v5vWVe7sSRK/nwVHMoQ2btqQ96GV3GYid48X3JN8ukXBgWzmdSl -ai8MilhjwoQ39EDBZ8FGWCh4Hcc69Bg78KrYrdzlLNe89N+nzFVD3raEwAKNdd+V -69GFJAGS41jQ27Gyqq7sNn2xtquO8OSVDQ2OhmcRs/rww2xdwxixGw== ------END RSA PRIVATE KEY----- diff --git a/test/e2e/certs/myserver.crt b/test/e2e/certs/myserver.crt deleted file mode 100644 index f93503b852..0000000000 --- a/test/e2e/certs/myserver.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEAzCCAuugAwIBAgIUayp5ZpT4Cc5P5lezIAY9ThGqU+QwDQYJKoZIhvcNAQEL -BQAwgZAxCzAJBgNVBAYTAklOMRcwFQYDVQQIDA5NYWRoeWEgUHJhZGVzaDEQMA4G -A1UEBwwHR3dhbGlvcjENMAsGA1UECgwEQ05DRjEPMA0GA1UECwwGVGhhbm9zMQ4w -DAYDVQQDDAVOYW1hbjEmMCQGCSqGSIb3DQEJARYXbmFtYW5sYWtod2FuaUBnbWFp -bC5jb20wHhcNMjEwNzIwMTk0NjQ0WhcNMzEwNzE4MTk0NjQ0WjCBkDELMAkGA1UE -BhMCSU4xFzAVBgNVBAgMDk1hZGh5YSBQcmFkZXNoMRAwDgYDVQQHDAdHd2FsaW9y -MQ0wCwYDVQQKDARDTkNGMQ8wDQYDVQQLDAZUaGFub3MxDjAMBgNVBAMMBU5hbWFu -MSYwJAYJKoZIhvcNAQkBFhduYW1hbmxha2h3YW5pQGdtYWlsLmNvbTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMKcx82TLwSjsUjzl23a4EkD7xLC1h3s -vjV4onjiNa9IwzmxRhLGFSuWxFM/euFe7DBhHp6Fw89NYzBPv/3pvZ8myZn3C7Nl -5Zrn1r3DHITiGG4cUXRruHA6krxEW9w5uOhGY2Uwugc1cYUGMc0HwSj5wRCpA1R3 -a989UIPWXwmqDYRJ7F35eMNw43Qq2XV/4lTT5k1isH77ZRSA7nVMPwxhk5C7Ov7x -42yoEUPlS57jfgYehIBQGx/3XONkzjx2EBzfrKzJM4R2ndFLmuY+3ZNGCUgyvCK/ -VbvE7ZR2PpoLxyTdeanWv0emVXWQQNuXqG1V3Ev0lyLgeW8Sl3iAnFsCAwEAAaNT -MFEwHQYDVR0OBBYEFBHcDA14a1mr1gnM7OnjwW6YU5muMB8GA1UdIwQYMBaAFBHc -DA14a1mr1gnM7OnjwW6YU5muMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEL -BQADggEBAFHNNQm1AeAQ02iayHLmMCH4nOZH7iJlnasfwrKtNUd8a0jqAILz8NCc -8+908UufoSc2Pnaw6P/Q+F01vDbdQLBGnem0P+/gYtfZmbhtsnvy4n6uOeSYW8vH -zYMwY7Pr8Ekvo+3699xN/+0F6XGtK/nB/qSugf1wg0d40M8L2i6iR96ufjYN5o2o -kQCGdniBVw22lx61tmrrY/rlO7f0CKSWYh8GvIcE/8bRt8hVo5A/Swb7wMrQffQC -Ua+7e6ibcoW5T9ZwqZ+8gRzc+pu9H6at0/AQ54JdK5pAZzxXCirsiUiVTeiajg9+ -i7hV1xAc+rI05bb54497RRChYVTNLjk= ------END CERTIFICATE----- diff --git a/test/e2e/certs/myserver.key b/test/e2e/certs/myserver.key deleted file mode 100644 index 2bd3a8cb5f..0000000000 --- a/test/e2e/certs/myserver.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAwpzHzZMvBKOxSPOXbdrgSQPvEsLWHey+NXiieOI1r0jDObFG -EsYVK5bEUz964V7sMGEenoXDz01jME+//em9nybJmfcLs2XlmufWvcMchOIYbhxR -dGu4cDqSvERb3Dm46EZjZTC6BzVxhQYxzQfBKPnBEKkDVHdr3z1Qg9ZfCaoNhEns -Xfl4w3DjdCrZdX/iVNPmTWKwfvtlFIDudUw/DGGTkLs6/vHjbKgRQ+VLnuN+Bh6E -gFAbH/dc42TOPHYQHN+srMkzhHad0Uua5j7dk0YJSDK8Ir9Vu8TtlHY+mgvHJN15 -qda/R6ZVdZBA25eobVXcS/SXIuB5bxKXeICcWwIDAQABAoIBAHPhBLusyw9ZJOQK -X32DGY+Abyddqj7xS6TtKtkN4Od5TP2aR94/4jUtNcNYBiQR8FMyURgVEC3sp4YJ -xok7V7RfVSV/S5FLuDbPmzcs6VtagS93Xy0SQyVYSy0dCxGLsUVlPCPxqqNGOEz8 -DkVGO8bZSBNSf/AYl7u6d6m4yKgodXQAKjxFeLYDwJKshq4nbG3QzW0DcwgDCBTZ -3PcPcBRJWblfSgA9cN3XubR3rHARsd515hCAhSRDWh119DXyQ1G5AcCo1xgLdnnb -ZlmVjzbEpMftr4Ixkla+5R3Y7np6KJ89Dq71Q1zlQ0y4LsJZnmoQUStDv/qa78iJ -OUjoBgkCgYEA9rqGvZ2jjtEpEYB6esO3yHLRdw2F6ssu2G2PrjzNxOwyuPCWXEMX -IylCF7xSYs5MboMQU4Tp49Rr5AUhiQv4L/U/rzzFZA6+TLghFlnKeAe6DOnMSJ8h -iNxPPmQZ5brmnfJ0AXzezUP6JLnkJWv600/o9ziTDQjl4+XHElawhF8CgYEAyezo -edtHyIF2DzIJaN1rbWtXBUiAxx91ZxUaKWQ92e02uDtpW0AIo6BBKxCMj0uowQLb -W/SbCOcNSkWofYJ6eBcDJr/hwFN92TvzAqFAT0pnlgZ9cSWMdvOQtqFBZvJ+YSPW -VkdhYRxhg5G8chEi32qly/jxnRxXUSmWc8QriYUCgYEA5bCCDXX6tNMNK4jy/OTF -bCtPy0hgmlNrGfrtHqTmXuQQ2FDJebzPhmE7cUNVYzxwtRT4lvgOkZXIly8bqCzn -cSYcknO9w0dia6Oi0d7neVSgJLNnBVh3bKTEdO3VmiOj2/jBfD0WYftdnOEdfqUG -jcA/vh8B7smQ0tevuufPTnsCgYAHB/LIW3RSbP+ZY9qkBiG5e6VaD748MMI7xqZU -jqMAtZvUKm1uufoTCH98amUakD3eCqVsvEWxt6nkziwhwqtVByga70+DDOUy7T12 -9/pvSF33AV7Y07/iXHTLhy3p0cPIyiCqfG3NndlfZXG0XxhhwyrmwVJYcCYyM0qD -2W7SqQKBgDgv7e5M4SJLUe54zO3S8aq940wVZl8PcA5G5WLbbEc05I6REYGLPlRC -tM704m6Fqkcj9Osdg+qSM+G9ViezDKvMahPJFe6wQ9Gyw166pK5dT5Y/v7r0vE4o -sxfosMl/oJKvwhE9pz5IOkxSS/sT6W0etMuaDOeSvk0pWGuk+No1 ------END RSA PRIVATE KEY----- diff --git a/test/e2e/certs/testca.crt b/test/e2e/certs/testca.crt new file mode 100644 index 0000000000..368cadb785 --- /dev/null +++ b/test/e2e/certs/testca.crt @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIDAzCCAeugAwIBAgIUG40CCHDhcQZzYtfnW/3+n2a1gyIwDQYJKoZIhvcNAQEL +BQAwEDEOMAwGA1UEAwwFbXktY2EwIBcNMjEwNzI1MjIwNDQ2WhgPMjI5NTA1MDky +MjA0NDZaMBAxDjAMBgNVBAMMBW15LWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAnkpmvnBW4UWw/W3m2Q9FJl1vVrZ21NneUapo9snyHH7rq9V/Kl+K +NZw4ZnxYUJI6QCJ0FLipj/RhtHnBEBu5SnyCi78aInGkkUECuvXLVFnG/9vPjpGV +fqQg3457VvsCexVIpEVHAVCI+pyQmVntIm9k5jzixzJvcnoXvlQLWO5w/b2KtA0B +wZGvaFoXl6KXr70T6Nhj6UOA1CZF2+cNB4EAIv7dV67y5+sBp7zu9pgF1ApjNV7e +QxEQUaT8LmKFsi7Xuj5ABlzb6Yiqzm2JZBO8V9wRiScchy8yRwS256eX1ASLRTI9 +l9Tvx9mYDyND27vTAlnJyPkeOeB12YgMgQIDAQABo1MwUTAdBgNVHQ4EFgQUHBqe +8LnYjSHpQJTj40rUbQQMtfcwHwYDVR0jBBgwFoAUHBqe8LnYjSHpQJTj40rUbQQM +tfcwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAH6OVef/bWgg3 +xJbdtQC5dI1OAHCL2GYZfHyquEXaCX5hACfBm5Nmqv/GYU5nrrS9+sdVOzkRL4pt +HuAwYHBdxvg1qvZTqBp65hTkni/zQC5K0rCgjDe452EuwK2pXhp8k4ChYG0BCGo0 +nXJU2yfpdu/WjHdrz/Nj+XoaPveb0IeLSdWJ8zUj9LZmpeetBkCppy68kKhCzgVZ +hbme+XKE6gnNxEYmQsQPc14NOVXGjxll9FMpkjMTLcXwjWkRmoqDDcfnmDcdg/eD +2WIgVZy7GlwtU1/dXs3AhBnwyUALgllHhXZd7DWSRNko/kg6L35Ncv0SyFiFRZY4 +06xGFXOrIg== +-----END CERTIFICATE----- diff --git a/test/e2e/certs/testca.key b/test/e2e/certs/testca.key new file mode 100644 index 0000000000..dec4156918 --- /dev/null +++ b/test/e2e/certs/testca.key @@ -0,0 +1,28 @@ +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCeSma+cFbhRbD9 +bebZD0UmXW9WtnbU2d5Rqmj2yfIcfuur1X8qX4o1nDhmfFhQkjpAInQUuKmP9GG0 +ecEQG7lKfIKLvxoicaSRQQK69ctUWcb/28+OkZV+pCDfjntW+wJ7FUikRUcBUIj6 +nJCZWe0ib2TmPOLHMm9yehe+VAtY7nD9vYq0DQHBka9oWheXopevvRPo2GPpQ4DU +JkXb5w0HgQAi/t1XrvLn6wGnvO72mAXUCmM1Xt5DERBRpPwuYoWyLte6PkAGXNvp +iKrObYlkE7xX3BGJJxyHLzJHBLbnp5fUBItFMj2X1O/H2ZgPI0Pbu9MCWcnI+R45 +4HXZiAyBAgMBAAECggEBAJp/B/VMdJ//g5Z4FF87tkSR4cAC680osQOIjlaCGRuT +Dbdz9GCJS76zsaDmMNTDRLOgbShkbKui3VJnm/Z6+Mko6co4Revke7mCeIW+riyw +S7ki9Fy03/Lxg1xwFxZ4fJ1h0rrzGzC/SFNDSSdETF3V1+eY+cdvBLP5Uv9jzIeI ++htIg9hAwiWhLtt9NyyO1zJ3Npfe6c+aPkJIWrf//TU9Bkv0Cje7+wZJDLeN4Trm +2kwmj774CYpKPsRKjD9RRRpmN9M9B7vomssoCdF0Qe/KD5Frq4rXJywY1/ZSU68B +HhbaiHckg/Zzm41OHxRhC6fyVXsdXqfPWrvw4DEaMaECgYEAzUOq+Q5kwWrY2tMT +6bl41l/sh7CczhMa5zTnLgREqxELueCtLRZUIf16HXQikQfnHHWOfDLL6IK6CO9e +KKw+S2yeywKBLAx9GnWgqUzdkqn39NGUmkp/+HgyYWj38VCZlYwAPwHAdBzU8puT +47Yo6y9c+fl+JT/v0RqqiyonN+UCgYEAxWprc569OIqDHrMlTZTiVYuYxRXlgDcK +tnefefDmoU2wAOX29ak7rzuaktPzJZ6NWhbFPN2US21y4y2zEMgNdjufxvJj7Dsv ++LL135P+7+pJPwGOQnJXcUVDq1a6lYsPp0Xov/+MaaprOtupqwVG5h7Y7EVh+p/T +C9eQ7K+sQG0CgYAs/3uYs7YUzLaTahiAcV9zJ34z1NzbNEb+jpNrfBYNpmP6sK1m +J/HzrZy6nj4Bq0jX7VF6Gqym4RmlYcwzgGb7o+k/ueHVZTt7tHOvCaG9SDOKYPOB +y3k4YCkzP46zQScaKKHZLiohijyInuCR19oqjKyGRFKKGfUZ0w22ClyknQKBgQCx +U6o5LPli9GhbBygWzAmSNdaAEe8YgFiLFmCVtCI1+gaCiA6RE/acrLln8iKSRB1M +Mu/wgEhYiHOORNp31HrjHSOkHM5Mv82TY40fy1xQPO6WPdt/LB4HjOP/3+OxuDvT +USSEchO6Rm4TbIS3LgJFaqgHcztTWITGOlffgt0C8QKBgESD9fgd7+qcqcEj6uk1 +uIKamTzacbbSmjcu3OU8S9hNItw+UDARcvan4wN0Y/LtbsaCaVU4DdRcxfo5ZgGi +JIK5ZiwaQ1gVO3+4CuNJ4Mq8z2AqmK9m5PBhunm1PxwQlh1BR8GeJzHo8RAMfqLC +pk6M/9D0m2Sqa4MAOzNAX1rA +-----END PRIVATE KEY----- diff --git a/test/e2e/certs/testca.srl b/test/e2e/certs/testca.srl new file mode 100644 index 0000000000..3b1528d849 --- /dev/null +++ b/test/e2e/certs/testca.srl @@ -0,0 +1 @@ +3A07EDF9FFE5E57BE7C1EA0926CD44D5FF3010E7 diff --git a/test/e2e/certs/testclient.key b/test/e2e/certs/testclient.key new file mode 100644 index 0000000000..f7b0db76b9 --- /dev/null +++ b/test/e2e/certs/testclient.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEAwssUdXudFe51Wd9Qs922VHjwtSX9D5CpeXsV8rExrMSSBYsV +Fm2aNONFULUdFKn1qrJGneQqqacJb6KbCodsY7JECu1VZgGfOISjko0ZK4TuJLD0 +LTrtfpCuEubkviHAL95WoPXHavN3PUVBOKW40L3Vb700ZA1rQgGOxiGA1yJMQEV2 +WW8GaAkBoTVvlvjIR08Ovu7IZCQvQT3eKMSPzLmDDXryByAh6hbIWjL1xkugNb0B +iiRDjSRVSyBCAUwesw7pYS5zJVotQ3vXe5YUhsTJCQM7tPLlIpeNI+7PsXpWRMKJ +PkHo5IqtN9qLK4ahO3PzfdhREoZafQ9wbbGP7wIDAQABAoIBAAkyOTcKrPHaHiQu +DMtRDkiSpRqIPpg7hiS8EN8ySlwTs6kbVdWHOx6AFEepJURSecyjOf4+RV1ZC3L/ +xD4NAqr6QYgaiEuWjAPJD0fd7XBGiE+nIv/3pjqxbeZqjCYFxeL8pi5XLCD6eYik ++l1IRZ9lNuxdjFmk4IlvNWRZNJHzcXb00fzkouf88hzDKryrY9c499AY71Hq1oV4 +1JaMvNQE5+mha42TB9DlMDrpo+4KmbSQEzmWR0vJaSBYZoJo2BTNt15ZqfqGZxcF +E2v6HbWwfrpJAKHVukoV9+O0Aww17fv59PAOiobx16HTDnnmvmfkd4rFRC2jBSrW +llCSqIECgYEA6Xwcgr3wlIkTp2ROCih56tKs9U449Cvs1YBttQw00KCs120MI7LJ +j1o5iSVHXwy6Li/Xkan4wMq8Q6wJde2lTmXuzqJNbBpPJ7f7nImApEI233GtUeDB +8jNfBbFgDxPJ/SUKV95Pkts4c702TKEjcIW1QorGQ9kfNW/pqqwVSqECgYEA1ZPQ +1ldDjMryh+9aSCeosI81dTHD8XB3hWonh6cgGSGlamHvnkHr4yha63b1SFAczUXK +UMsHhzjVNgwuOY79J4oKFMhkOilLbQTJAW+49dPYLd7YkWR2HCeg8zQDhTCOPih+ +sfPuJpXzMLxyPnYZB3PPnftxdL2wYvQDwNyA4I8CgYEAopo9nMu8pSUFm/jd5HMl +3OSVcUzlmx6QFGwZexGiIn0Tgf++iYn0lzIFib59gvNxIcNNxHLz+wf1rsc723YC +PqP8eMlNU/DKmWWnA/A6t6opXtljE/eL+inOjj9mH3nvnK1UE7eOAT9MhEYMb7d0 +3+MuRcYxp4SpGGT0/VhUh6ECgYEAmOJL7vLefu1T+Wyj4tzIy0JoUPEDw1PvqsgX +bep7dqH4KBPdFBs6QQxFMSKhaFc4ltRrg7QiOnC7ZwoHGNq7as9Mch4UaPHgdTSk +smuIKiiwKf3IZyNTzMmtVAqarn/YgrQ4JShpBKP4Rp3O5dliB0ymhP9V8uxKVDs6 +aJcvhg8CgYBPXYYud2RU105diuyO+R2kB1VpoT5k+qGUtLWSq8Z8M/Om2tJSkTCj +g7uBaGHCIOCum4sDKKtcaCV6ReNRuxENOP/0zOvz4D61opgannwriQiLbnxtD6Tm +Jr6zhxOEEbh4uktqafBvm2XMGxj75mvI9UUQPZbxg+QKd+H4gttPsQ== +-----END RSA PRIVATE KEY----- diff --git a/test/e2e/certs/testserver.key b/test/e2e/certs/testserver.key new file mode 100644 index 0000000000..341c21eac2 --- /dev/null +++ b/test/e2e/certs/testserver.key @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAzUlmGN13bhFji84de+73UPDyB4NRUrdBn4WDkZDcRIagiAbO +PIDqYvj4gAQEpxQfl/srs2taoPCmM2kc5OtwCmoRO8uKMcdNnIAiLByDcpWQusOC +PQiw0178y1g/XZ+GU3UkVUGXD8M2SI5bbEU/F7HPp/MC5ILLktnCnGUQKqB3S9tA +vPbZ01KN/xqHHwo2JzxkWpFu+W90ix1VStJoCjZVYwlUt+VH9i7AHX/buDIBIeyD +DCI3PLFP+mPfvpvKPzgwSP5VHRK3l0+AqFGEo5dF+nB4w/HexMzVz3A5c+HLChDH +1qORmtIadzHqyq0CqNLd1Fp0Im7fF1K4pt3SzwIDAQABAoIBAFma1iO4V28FzAu0 +Fg6Iig8cfKhm21JdhxlYL8uUDXEjymJVhiPGqYg14FIQbu92yyuNiM+x5kclg4Yh +NWl+ktMY5RjSiSmdMvVCwKe8cmvLePbsa6a0amp4paMki9QT05AOK6PDE2rf7IvT +B3jE6Psws9E9M2AG71DUGCwnuNzBkYFzbgFBhOcOeXE4olkBLzImrjdwZhu7bx8h +MGzkLTOom9pqnCJ1lPatX+jaKETTP/7OMwXBnNxD0byLxRLWS5fhdbHtNCUjRIGV +t5x8Fp7dEfMKr7FzMKeXxIAp9RBvHYNgSYuwabrTL4m/yI6fK9d2PfDRavELISSH +0hgmyEECgYEA9NEg09jB/bJR3H7FS9DHQliaAjzM05jyO5nb2Hb6KebZA4t4dB1o +5kiDsK+l7Fo7DE9Ev96D88Z+KjsT8kVkmnQhESJ63PUtIh/OFwaaQ0wYUzDA4rY+ +JWvtuGJI4m0V0ay26E1p7x05Yc6rP4h6Pj4fAfXBi4SqhKeY0rt0k98CgYEA1qoB +3M26/Reu6U+hgBefQJjVPz/OHXFoPOkpuPuuV4T72YUs+1WCGKac/KbWyvP5mRNR +/r6JEP1O7PhyubTASL32HvWEiukGcspD9PfvUiPKVDUfuwfAfA5ki9eE+RxF7xGW +06JD6eBDDn2KUUeteWGcBgvixvsthbMj/nDIHxECgYAaWcAczUHkGqNxBjwaMvif +jYXWI9EwHVBFVgQxwmyewfgtx39JkDjQQJHcHKvRSsz8QDP4Ku0s+fpudTlqZEm1 +zjYY8AslaUZYW/AEznMgNzqPCbpZLKB3yqwlon6R0If+r4ZobpyuXgjlHCeWiQwP +UwRnY5pXeAoaiSeL1x1vJQKBgQDPObsmjqwuW0iNDkRXxXYFBAUIU+4D2lDITEXs +v3nkHwqJR9jOoBNA/Mab1+uObMvNDCuhggoLjp08gfMuduEQ1+DariTPfHivI7dK +qnYrk0Vk2QvSn38fwz0pClrhqHuSeCZkE0cAZXAWcvat1EYQ4TufUqYPdLbTVMYw +gGuQIQKBgAOshacIU0wDRz+FFN6rGKiroiNPPu3iXLlapdBhyh8xjzF0X6hdwfrY +EfXd4JuLfxeJCfP0hAx32+pv6OPRJXC10MIn/wJOZb18dh57vSOdvhybj6j0nYU0 +noS2EZrfPuoBxxRjJws1WIrixhM6wWvrUafs8uIltsn0/u0c2QmY +-----END RSA PRIVATE KEY----- diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 475efbdc2b..890442c22b 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -146,7 +146,7 @@ type QuerierBuilder struct { metadataAddresses []string targetAddresses []string exemplarAddresses []string - mutualTLSConfig []string + mutualTLSConfig []string endpointConfig []store.Config diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 7bb4f09aac..fb8c9f289d 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -151,23 +151,9 @@ func TestQuery(t *testing.T) { fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) - queryFileSDDir := filepath.Join(s.SharedDir(), "data", "querier", "1") - container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") - testutil.Ok(t, cpyDir("./certs", queryFileSDDir)) - - args := e2e.BuildArgs(map[string]string{ - "--grpc-client-tls-cert": filepath.Join(container, "myclient.crt"), - "--grpc-client-tls-key": filepath.Join(container, "myclient.key"), - "--grpc-server-tls-cert": filepath.Join(container, "myserver.crt"), - "--grpc-server-tls-key": filepath.Join(container, "myserver.key"), - }) - - args = append(args, "--grpc-client-tls-secure") - args = append(args, "--grpc-client-tls-skip-verify") // As the certs are self-signed. - // Querier. Both fileSD and directly by flags. q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}). - WithFileSDStoreAddresses(fileSDPath).WithMutualTLS(args).Build() + WithFileSDStoreAddresses(fileSDPath).Build() testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(q)) @@ -257,14 +243,31 @@ func TestQueryWithEndpointConfig(t *testing.T) { fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) + queryFileSDDir := filepath.Join(s.SharedDir(), "data", "querier", "1") + container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") + testutil.Ok(t, cpyDir("./certs", queryFileSDDir)) + + args := e2e.BuildArgs(map[string]string{ + "--grpc-server-tls-cert": filepath.Join(container, "e2e_test_query_config_server.crt"), + "--grpc-server-tls-key": filepath.Join(container, "testserver.key"), + "--grpc-server-tls-client-ca": filepath.Join(container, "testca.crt"), + }) + + args = append(args, "--grpc-client-tls-skip-verify") // As the certs are self-signed. + endpointConfig := []store.Config{ { - Name: "one", - Endpoints: []string{sidecar1.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, + Name: "one", + TLSConfig: store.TLSConfiguration{ + CertFile: filepath.Join(container, "e2e_test_query_config_client.crt"), + KeyFile: filepath.Join(container, "testclient.key"), + CaCertFile: filepath.Join(container, "testca.crt"), + }, + Endpoints: []string{sidecar1.InternalEndpoint("grpc")}, }, { Name: "two", - Endpoints: []string{sidecar2.InternalEndpoint("grpc")}, + Endpoints: []string{sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, EndpointsSD: []file.SDConfig{ { Files: []string{fileSDPath}, @@ -274,7 +277,7 @@ func TestQueryWithEndpointConfig(t *testing.T) { }, } - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", nil).WithEndpointConfig(endpointConfig).Build() + q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", nil).WithEndpointConfig(endpointConfig).WithMutualTLS(args).Build() testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(q)) From fd8e660b7c14adb5fe49ff3864c36c53f001f8d0 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 28 Jul 2021 19:25:43 +0530 Subject: [PATCH 14/29] separate fileSDCache Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index bcc3229340..49f7af6120 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -430,6 +430,8 @@ func runQuery( return errors.Wrap(err, "building gRPC client") } + // Separate DNS provider for each endpoint config. + fileSDCache := cache.New() dnsStoreProvider := dns.NewProvider( logger, extprom.WrapRegistererWith( From e48140ad134a42f1a7764bab7ac37d00ecb4b3bb Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Fri, 30 Jul 2021 01:38:52 +0530 Subject: [PATCH 15/29] configuring TLS in sidecar testing Signed-off-by: Namanl2001 --- test/e2e/e2ethanos/services.go | 28 ++++++++++++++------------- test/e2e/exemplars_api_test.go | 2 ++ test/e2e/metadata_api_test.go | 2 ++ test/e2e/query_test.go | 35 +++++++++++++++++----------------- test/e2e/rules_api_test.go | 2 ++ test/e2e/targets_api_test.go | 2 ++ 6 files changed, 40 insertions(+), 31 deletions(-) diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 890442c22b..c29fda82ea 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -111,20 +111,24 @@ func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promIm return nil, nil, err } + args := e2e.BuildArgs(map[string]string{ + "--debug.name": fmt.Sprintf("sidecar-%v", name), + "--grpc-address": ":9091", + "--grpc-grace-period": "0s", + "--http-address": ":8080", + "--prometheus.url": "http://" + prom.NetworkEndpointFor(netName, 9090), + "--tsdb.path": dataDir, + "--log.level": infoLogLevel, + }) + + args = append(args, tlsConfig...) + sidecar := NewService( e, fmt.Sprintf("sidecar-%s", name), - sidecarImage, - e2e.NewCommand("sidecar", e2e.BuildArgs(map[string]string{ - "--debug.name": fmt.Sprintf("sidecar-%v", name), - "--grpc-address": ":9091", - "--grpc-grace-period": "0s", - "--http-address": ":8080", - "--prometheus.url": "http://" + prom.InternalEndpoint("http"), - "--tsdb.path": dataDir, - "--log.level": infoLogLevel, - })...), - e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), + DefaultImage(), + e2e.NewCommand("sidecar", args...), + e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), 8080, 9091, ) @@ -343,8 +347,6 @@ func (q *QuerierBuilder) Build() (*Service, error) { args = append(args, "--endpoint.config="+string(endpointCfgBytes)) } - args = append(args, q.mutualTLSConfig...) - querier := NewService( fmt.Sprintf("querier-%v", q.name), DefaultImage(), diff --git a/test/e2e/exemplars_api_test.go b/test/e2e/exemplars_api_test.go index 13aaffca85..6f3e83dd2e 100644 --- a/test/e2e/exemplars_api_test.go +++ b/test/e2e/exemplars_api_test.go @@ -44,6 +44,7 @@ func TestExemplarsAPI_Fanout(t *testing.T) { "prom1", defaultPromConfig("ha", 0, "", "", "localhost:9090", qUnitiated.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage(), + nil, e2ethanos.FeatureExemplarStorage, ) testutil.Ok(t, err) @@ -52,6 +53,7 @@ func TestExemplarsAPI_Fanout(t *testing.T) { "prom2", defaultPromConfig("ha", 1, "", "", "localhost:9090", qUnitiated.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage(), + nil, e2ethanos.FeatureExemplarStorage, ) testutil.Ok(t, err) diff --git a/test/e2e/metadata_api_test.go b/test/e2e/metadata_api_test.go index 096560e64e..f7c44c0772 100644 --- a/test/e2e/metadata_api_test.go +++ b/test/e2e/metadata_api_test.go @@ -32,6 +32,7 @@ func TestMetadataAPI_Fanout(t *testing.T) { "prom1", defaultPromConfig("ha", 0, "", "", "localhost:9090", "sidecar-prom1:8080"), e2ethanos.DefaultPrometheusImage(), + nil, ) testutil.Ok(t, err) @@ -40,6 +41,7 @@ func TestMetadataAPI_Fanout(t *testing.T) { "prom2", defaultPromConfig("ha", 1, "", "", "localhost:9090", "sidecar-prom2:8080"), e2ethanos.DefaultPrometheusImage(), + nil, ) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index fb8c9f289d..4237aa1cb2 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -230,19 +230,6 @@ func TestQueryWithEndpointConfig(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, s.StartAndWaitReady(receiver)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) - testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage()) - testutil.Ok(t, err) - prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) - testutil.Ok(t, err) - prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) - testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) - - fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) - testutil.Ok(t, err) - queryFileSDDir := filepath.Join(s.SharedDir(), "data", "querier", "1") container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") testutil.Ok(t, cpyDir("./certs", queryFileSDDir)) @@ -253,21 +240,33 @@ func TestQueryWithEndpointConfig(t *testing.T) { "--grpc-server-tls-client-ca": filepath.Join(container, "testca.crt"), }) - args = append(args, "--grpc-client-tls-skip-verify") // As the certs are self-signed. + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), args) + testutil.Ok(t, err) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage(), args) + testutil.Ok(t, err) + prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) + testutil.Ok(t, err) + prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) + testutil.Ok(t, err) + testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) + + fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.GRPCNetworkEndpoint(), sidecar4.GRPCNetworkEndpoint()}) + testutil.Ok(t, err) endpointConfig := []store.Config{ { - Name: "one", + Name: "withTLS", TLSConfig: store.TLSConfiguration{ CertFile: filepath.Join(container, "e2e_test_query_config_client.crt"), KeyFile: filepath.Join(container, "testclient.key"), CaCertFile: filepath.Join(container, "testca.crt"), + ServerName: "e2e_test_query_config-sidecar-alone", }, - Endpoints: []string{sidecar1.InternalEndpoint("grpc")}, + Endpoints: []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc")}, }, { - Name: "two", - Endpoints: []string{sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}, + Name: "withoutTLS", + Endpoints: []string{receiver.InternalEndpoint("grpc")}, EndpointsSD: []file.SDConfig{ { Files: []string{fileSDPath}, diff --git a/test/e2e/rules_api_test.go b/test/e2e/rules_api_test.go index 0d94317c8b..dcb21a6d81 100644 --- a/test/e2e/rules_api_test.go +++ b/test/e2e/rules_api_test.go @@ -49,6 +49,7 @@ func TestRulesAPI_Fanout(t *testing.T) { "prom1", defaultPromConfig("ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, promRulesSubDir, "*.yaml")), e2ethanos.DefaultPrometheusImage(), + nil, ) testutil.Ok(t, err) prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar( @@ -56,6 +57,7 @@ func TestRulesAPI_Fanout(t *testing.T) { "prom2", defaultPromConfig("ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, promRulesSubDir, "*.yaml")), e2ethanos.DefaultPrometheusImage(), + nil, ) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) diff --git a/test/e2e/targets_api_test.go b/test/e2e/targets_api_test.go index a3b2d4a615..0920b71757 100644 --- a/test/e2e/targets_api_test.go +++ b/test/e2e/targets_api_test.go @@ -39,6 +39,7 @@ func TestTargetsAPI_Fanout(t *testing.T) { "prom1", defaultPromConfig("ha", 0, "", "", "localhost:9090", "localhost:80"), e2ethanos.DefaultPrometheusImage(), + nil, ) testutil.Ok(t, err) prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar( @@ -46,6 +47,7 @@ func TestTargetsAPI_Fanout(t *testing.T) { "prom2", defaultPromConfig("ha", 1, "", "", "localhost:9090", "localhost:80"), e2ethanos.DefaultPrometheusImage(), + nil, ) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) From 83185e49e3a65788e68509cfebb84ed389cddb57 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Sat, 31 Jul 2021 01:46:52 +0530 Subject: [PATCH 16/29] failing fast for --secure Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 22 +++++++++++----------- pkg/extgrpc/client.go | 3 +-- test/e2e/e2ethanos/services.go | 6 ------ test/e2e/query_test.go | 8 ++++---- 4 files changed, 16 insertions(+), 23 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 49f7af6120..b691b6368c 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -222,8 +222,8 @@ func registerQuery(app *extkingpin.App) { return err } - if (len(*fileSDFiles) != 0 || len(*stores) != 0) && len(endpointConfigYAML) != 0 { - return errors.Errorf("--sore/--store.sd-files and --endpoint.config parameters cannot be defined at the same time") + if (len(*fileSDFiles) != 0 || len(*stores) != 0 || *secure) && len(endpointConfigYAML) != 0 { + return errors.Errorf("--sore/--store.sd-files/--grpc-client-tls-secure and --endpoint.config parameters cannot be defined at the same time") } var fileSDConfig *file.SDConfig @@ -375,15 +375,6 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. - var TLSConfig store.TLSConfiguration - if secure && len(endpointConfigYAML) == 0 { - TLSConfig.CertFile = cert - TLSConfig.KeyFile = key - TLSConfig.CaCertFile = caCert - TLSConfig.ServerName = serverName - } - var endpointConfig []store.Config var err error if len(endpointConfigYAML) > 0 { @@ -392,6 +383,14 @@ func runQuery( return errors.Wrap(err, "loading endpoint config") } } else { + // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. + var TLSConfig store.TLSConfiguration + if secure { + TLSConfig.CertFile = cert + TLSConfig.KeyFile = key + TLSConfig.CaCertFile = caCert + TLSConfig.ServerName = serverName + } endpointConfig, err = store.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) if err != nil { return errors.Wrap(err, "initializing endpoint config from individual flags") @@ -425,6 +424,7 @@ func runQuery( var storeSets []*query.EndpointSet fileSDCache := cache.New() for _, config := range endpointConfig { + secure = !(config.TLSConfig == store.TLSConfiguration{}) dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) if err != nil { return errors.Wrap(err, "building gRPC client") diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index eea2cb875a..2efecd127e 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -54,8 +54,7 @@ func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer ope reg.MustRegister(grpcMets) } - // Insecure if secure is false and no TLS config is supplied. - if !secure && (tlsConfig == store.TLSConfiguration{}) { + if !secure { return append(dialOpts, grpc.WithInsecure()), nil } diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index c29fda82ea..e4a0cbb172 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -150,7 +150,6 @@ type QuerierBuilder struct { metadataAddresses []string targetAddresses []string exemplarAddresses []string - mutualTLSConfig []string endpointConfig []store.Config @@ -262,11 +261,6 @@ func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []store.Config) *Quer return q } -func (q *QuerierBuilder) WithMutualTLS(mutualTLSConfig []string) *QuerierBuilder { - q.mutualTLSConfig = mutualTLSConfig - return q -} - func (q *QuerierBuilder) Build() (*Service, error) { const replicaLabel = "replica" diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 4237aa1cb2..3266b7ca2d 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -234,15 +234,15 @@ func TestQueryWithEndpointConfig(t *testing.T) { container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") testutil.Ok(t, cpyDir("./certs", queryFileSDDir)) - args := e2e.BuildArgs(map[string]string{ + tlsConfig := e2e.BuildArgs(map[string]string{ "--grpc-server-tls-cert": filepath.Join(container, "e2e_test_query_config_server.crt"), "--grpc-server-tls-key": filepath.Join(container, "testserver.key"), "--grpc-server-tls-client-ca": filepath.Join(container, "testca.crt"), }) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), args) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage(), args) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) @@ -276,7 +276,7 @@ func TestQueryWithEndpointConfig(t *testing.T) { }, } - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", nil).WithEndpointConfig(endpointConfig).WithMutualTLS(args).Build() + q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", nil).WithEndpointConfig(endpointConfig).Build() testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(q)) From 18c830ef061e1a2f05dc921e1793aba6b660f797 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 11 Aug 2021 04:16:37 +0530 Subject: [PATCH 17/29] allow --store with --endpoint-config with noTLS Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 6 +++--- pkg/store/config.go | 34 ++++++++++++++++++++++------------ test/e2e/e2ethanos/services.go | 2 +- 3 files changed, 26 insertions(+), 16 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index b691b6368c..2c163a462c 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -222,8 +222,8 @@ func registerQuery(app *extkingpin.App) { return err } - if (len(*fileSDFiles) != 0 || len(*stores) != 0 || *secure) && len(endpointConfigYAML) != 0 { - return errors.Errorf("--sore/--store.sd-files/--grpc-client-tls-secure and --endpoint.config parameters cannot be defined at the same time") + if *secure && len(endpointConfigYAML) != 0 { + return errors.Errorf("deprecated flags --grpc-client-tls* and new --endpoint.config flag cannot be specified at the same time; use either of those") } var fileSDConfig *file.SDConfig @@ -378,7 +378,7 @@ func runQuery( var endpointConfig []store.Config var err error if len(endpointConfigYAML) > 0 { - endpointConfig, err = store.LoadConfig(endpointConfigYAML) + endpointConfig, err = store.LoadConfig(endpointConfigYAML, storeAddrs, fileSDConfig) if err != nil { return errors.Wrap(err, "loading endpoint config") } diff --git a/pkg/store/config.go b/pkg/store/config.go index 0557b570d4..e003814c47 100644 --- a/pkg/store/config.go +++ b/pkg/store/config.go @@ -42,30 +42,30 @@ const ( func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { var endpointConfig []Config - // Adding --endpoint, --endpoint.sd-files info to []endpointConfig, if provided. + // Adding --endpoint, --endpoint.sd-files to []endpointConfig, if provided. if len(endpointAddrs) > 0 || fileSDConfig != nil { - cfg1 := Config{} - cfg1.TLSConfig = TLSConfig - cfg1.Endpoints = endpointAddrs + cfg := Config{} + cfg.TLSConfig = TLSConfig + cfg.Endpoints = endpointAddrs if fileSDConfig != nil { - cfg1.EndpointsSD = []file.SDConfig{*fileSDConfig} + cfg.EndpointsSD = []file.SDConfig{*fileSDConfig} } - endpointConfig = append(endpointConfig, cfg1) + endpointConfig = append(endpointConfig, cfg) } // Adding --endpoint-strict endpoints if provided. if len(strictEndpointAddrs) > 0 { - cfg2 := Config{} - cfg2.TLSConfig = TLSConfig - cfg2.Endpoints = strictEndpointAddrs - cfg2.Mode = StrictEndpointMode - endpointConfig = append(endpointConfig, cfg2) + cfg := Config{} + cfg.TLSConfig = TLSConfig + cfg.Endpoints = strictEndpointAddrs + cfg.Mode = StrictEndpointMode + endpointConfig = append(endpointConfig, cfg) } return endpointConfig, nil } // LoadConfig returns list of per-endpoint TLS config. -func LoadConfig(confYAML []byte) ([]Config, error) { +func LoadConfig(confYAML []byte, endpointAddrs []string, fileSDConfig *file.SDConfig) ([]Config, error) { var endpointConfig []Config if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { @@ -86,6 +86,16 @@ func LoadConfig(confYAML []byte) ([]Config, error) { } } + // Adding --endpoint, --endpoint.sd-files with NO-TLS to []endpointConfig, if provided. + if len(endpointAddrs) > 0 || fileSDConfig != nil { + cfg := Config{} + cfg.Endpoints = endpointAddrs + if fileSDConfig != nil { + cfg.EndpointsSD = []file.SDConfig{*fileSDConfig} + } + endpointConfig = append(endpointConfig, cfg) + } + // Checking if some endpoints are inputted more than once. allEndpoints := make(map[string]struct{}) for _, config := range endpointConfig { diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index e4a0cbb172..0306621e10 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -121,7 +121,7 @@ func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promIm "--log.level": infoLogLevel, }) - args = append(args, tlsConfig...) + args = append(args, extraArgs...) sidecar := NewService( e, From 1a0f2edefe5d433b42b03b2249398232dca4f1fe Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 11 Aug 2021 16:06:02 +0530 Subject: [PATCH 18/29] generating temp certs while testing from create.sh Signed-off-by: Namanl2001 --- test/e2e/certs/create.sh | 8 +- .../certs/e2e_test_query_config_client.crt | 23 ------ .../certs/e2e_test_query_config_client.csr | 16 ---- .../certs/e2e_test_query_config_server.crt | 23 ------ .../certs/e2e_test_query_config_server.csr | 16 ---- test/e2e/certs/testca.crt | 19 ----- test/e2e/certs/testca.key | 28 ------- test/e2e/certs/testca.srl | 1 - test/e2e/certs/testclient.key | 27 ------- test/e2e/certs/testserver.key | 27 ------- test/e2e/query_test.go | 76 +++---------------- 11 files changed, 14 insertions(+), 250 deletions(-) delete mode 100644 test/e2e/certs/e2e_test_query_config_client.crt delete mode 100644 test/e2e/certs/e2e_test_query_config_client.csr delete mode 100644 test/e2e/certs/e2e_test_query_config_server.crt delete mode 100644 test/e2e/certs/e2e_test_query_config_server.csr delete mode 100644 test/e2e/certs/testca.crt delete mode 100644 test/e2e/certs/testca.key delete mode 100644 test/e2e/certs/testca.srl delete mode 100644 test/e2e/certs/testclient.key delete mode 100644 test/e2e/certs/testserver.key diff --git a/test/e2e/certs/create.sh b/test/e2e/certs/create.sh index 6c2ee6b5c8..ff231df9f8 100644 --- a/test/e2e/certs/create.sh +++ b/test/e2e/certs/create.sh @@ -15,13 +15,13 @@ openssl genrsa -out testclient.key 2048 openssl req \ -new \ -key testserver.key \ - -subj "/CN=e2e_test_query_config-sidecar-alone" \ + -subj "/CN=e2e_test_query_config-sidecar" \ -out e2e_test_query_config_server.csr openssl req \ -new \ -key testclient.key \ - -subj "/CN=e2e_test_query_config-querier-1" \ + -subj "/CN=e2e_test_query_config-querier" \ -out e2e_test_query_config_client.csr openssl x509 \ @@ -42,7 +42,7 @@ keyUsage = critical, digitalSignature, keyEncipherment extendedKeyUsage = serverAuth subjectAltName = @alt_names [alt_names] -DNS.1 = e2e_test_query_config-sidecar-alone +DNS.1 = e2e_test_query_config-sidecar EOF ) \ -out e2e_test_query_config_server.crt @@ -65,7 +65,7 @@ keyUsage = critical, digitalSignature, keyEncipherment extendedKeyUsage = clientAuth subjectAltName = @alt_names [alt_names] -DNS.1 = e2e_test_query_config-querier-1 +DNS.1 = e2e_test_query_config-querier EOF ) \ -out e2e_test_query_config_client.crt diff --git a/test/e2e/certs/e2e_test_query_config_client.crt b/test/e2e/certs/e2e_test_query_config_client.crt deleted file mode 100644 index 3137facee9..0000000000 --- a/test/e2e/certs/e2e_test_query_config_client.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID4DCCAsigAwIBAgIUOgft+f/l5XvnweoJJs1E1f8wEOcwDQYJKoZIhvcNAQEL -BQAwEDEOMAwGA1UEAwwFbXktY2EwIBcNMjEwNzI1MjIxOTIwWhgPMjI5NTA1MDky -MjE5MjBaMCoxKDAmBgNVBAMMH2UyZV90ZXN0X3F1ZXJ5X2NvbmZpZy1xdWVyaWVy -LTEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDCyxR1e50V7nVZ31Cz -3bZUePC1Jf0PkKl5exXysTGsxJIFixUWbZo040VQtR0UqfWqskad5CqppwlvopsK -h2xjskQK7VVmAZ84hKOSjRkrhO4ksPQtOu1+kK4S5uS+IcAv3lag9cdq83c9RUE4 -pbjQvdVvvTRkDWtCAY7GIYDXIkxARXZZbwZoCQGhNW+W+MhHTw6+7shkJC9BPd4o -xI/MuYMNevIHICHqFshaMvXGS6A1vQGKJEONJFVLIEIBTB6zDulhLnMlWi1De9d7 -lhSGxMkJAzu08uUil40j7s+xelZEwok+Qejkiq032osrhqE7c/N92FEShlp9D3Bt -sY/vAgMBAAGjggEUMIIBEDAJBgNVHRMEAjAAMBEGCWCGSAGG+EIBAQQEAwIHgDAz -BglghkgBhvhCAQ0EJhYkT3BlblNTTCBHZW5lcmF0ZWQgQ2xpZW50IENlcnRpZmlj -YXRlMB0GA1UdDgQWBBSEaIDn5IKg9m4D8OIsWL+STQ42OTBLBgNVHSMERDBCgBQc -Gp7wudiNIelAlOPjStRtBAy196EUpBIwEDEOMAwGA1UEAwwFbXktY2GCFBuNAghw -4XEGc2LX51v9/p9mtYMiMA4GA1UdDwEB/wQEAwIFoDATBgNVHSUEDDAKBggrBgEF -BQcDAjAqBgNVHREEIzAhgh9lMmVfdGVzdF9xdWVyeV9jb25maWctcXVlcmllci0x -MA0GCSqGSIb3DQEBCwUAA4IBAQCFIZPOKWAGL3HXVEyucqw1VNy2uNEiJqm2Pp8W -sbX3HQk+abQB8UhT25cWVYaZB84czUBIzjBcNziZqhUfls4nx6N+5ksdsRa/jtPD -wbXmdNQdU05Vp8K8O22nikrLgMPveqOAZuuxC6DjMg5sqofMPf7cQlMA6ec3a+7x -2jshwyViX1wr08MzhqKwvAWeYsDRRcH9q80PAqOc461GL63j0zPfTEJdeglOAaHQ -n0ocsQ5FXIJVlntwTASuhdtevokwYYW/C5i/B2RBgSz9XN2QnJbLNADyqRLaN8+5 -YiN8Ua2IANfIcFS2GuESUJxdGl3e58gPj0URnddQQXrjCawG ------END CERTIFICATE----- diff --git a/test/e2e/certs/e2e_test_query_config_client.csr b/test/e2e/certs/e2e_test_query_config_client.csr deleted file mode 100644 index 0f8bddb756..0000000000 --- a/test/e2e/certs/e2e_test_query_config_client.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICbzCCAVcCAQAwKjEoMCYGA1UEAwwfZTJlX3Rlc3RfcXVlcnlfY29uZmlnLXF1 -ZXJpZXItMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMLLFHV7nRXu -dVnfULPdtlR48LUl/Q+QqXl7FfKxMazEkgWLFRZtmjTjRVC1HRSp9aqyRp3kKqmn -CW+imwqHbGOyRArtVWYBnziEo5KNGSuE7iSw9C067X6QrhLm5L4hwC/eVqD1x2rz -dz1FQTiluNC91W+9NGQNa0IBjsYhgNciTEBFdllvBmgJAaE1b5b4yEdPDr7uyGQk -L0E93ijEj8y5gw168gcgIeoWyFoy9cZLoDW9AYokQ40kVUsgQgFMHrMO6WEucyVa -LUN713uWFIbEyQkDO7Ty5SKXjSPuz7F6VkTCiT5B6OSKrTfaiyuGoTtz833YURKG -Wn0PcG2xj+8CAwEAAaAAMA0GCSqGSIb3DQEBCwUAA4IBAQAToQ8VV1Ur5WEkFBPq -Os8ThCh428M399foOR0f7YrAxKSaNIxH0gZjNO9204zEVHl+dwdtnjI/8BNXl61L -Gb8zX0NS+SLLlCgZG0Z5jzKew9el4AoCCDYi7DjVOvGdM1KEF38gwIiUoIAaB6dT -ts/MCfx6msPLIx4FgyxYL8yx7nwlJ0AJ9hXrhlHUFrtVpOfAkf9i38Jt7YtytHTr -2MZzUlDo4zge/zphtDCPOOpLlBUl27bYo4VDt8xsBgXcOiNpJvKOVrSj3y90Qrnh -tIBauFCxKPZRptcbgVxn4XFP9IjivTODa5HOBPuxI3Mb9bxLisoISzm7ffe1nVeD -6dlT ------END CERTIFICATE REQUEST----- diff --git a/test/e2e/certs/e2e_test_query_config_server.crt b/test/e2e/certs/e2e_test_query_config_server.crt deleted file mode 100644 index f9764eb484..0000000000 --- a/test/e2e/certs/e2e_test_query_config_server.crt +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID6DCCAtCgAwIBAgIUOgft+f/l5XvnweoJJs1E1f8wEOYwDQYJKoZIhvcNAQEL -BQAwEDEOMAwGA1UEAwwFbXktY2EwIBcNMjEwNzI1MjIxNjU5WhgPMjI5NTA1MDky -MjE2NTlaMC4xLDAqBgNVBAMMI2UyZV90ZXN0X3F1ZXJ5X2NvbmZpZy1zaWRlY2Fy -LWFsb25lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAzUlmGN13bhFj -i84de+73UPDyB4NRUrdBn4WDkZDcRIagiAbOPIDqYvj4gAQEpxQfl/srs2taoPCm -M2kc5OtwCmoRO8uKMcdNnIAiLByDcpWQusOCPQiw0178y1g/XZ+GU3UkVUGXD8M2 -SI5bbEU/F7HPp/MC5ILLktnCnGUQKqB3S9tAvPbZ01KN/xqHHwo2JzxkWpFu+W90 -ix1VStJoCjZVYwlUt+VH9i7AHX/buDIBIeyDDCI3PLFP+mPfvpvKPzgwSP5VHRK3 -l0+AqFGEo5dF+nB4w/HexMzVz3A5c+HLChDH1qORmtIadzHqyq0CqNLd1Fp0Im7f -F1K4pt3SzwIDAQABo4IBGDCCARQwCQYDVR0TBAIwADARBglghkgBhvhCAQEEBAMC -BkAwMwYJYIZIAYb4QgENBCYWJE9wZW5TU0wgR2VuZXJhdGVkIFNlcnZlciBDZXJ0 -aWZpY2F0ZTAdBgNVHQ4EFgQU88Aco+YLRzKRPKb9eQCwEITbJAQwSwYDVR0jBEQw -QoAUHBqe8LnYjSHpQJTj40rUbQQMtfehFKQSMBAxDjAMBgNVBAMMBW15LWNhghQb -jQIIcOFxBnNi1+db/f6fZrWDIjAOBgNVHQ8BAf8EBAMCBaAwEwYDVR0lBAwwCgYI -KwYBBQUHAwEwLgYDVR0RBCcwJYIjZTJlX3Rlc3RfcXVlcnlfY29uZmlnLXNpZGVj -YXItYWxvbmUwDQYJKoZIhvcNAQELBQADggEBAAj6NcE+UfxuI+XJhbYAZ/ovMv5c -qatqmmq4MF+vrMVLVScwTS3qNjoN5npz1e+01XBpL7pAJQAKbsgxN+2QotYWxzzv -f9XcWa378+u04FGBXKA46iAyOmfVPcUuNT9aIfEIZc6UP39HXlz1qXLTN8LVtWDv -59yCGOe87x97/R9i2p/qLlXqbW8hdDU5STNBN15oc2yRSQ0b29laGbjZTTXD/74V -kqJS6EC+MKNup5XQEHk4Ngy1x6zTk6jqNY4aj12kzxRpZVFkUDD3JMnN7vVHOu4/ -HZ8bcm6Hm4SUeaXHK75a+GUb2jehEKnt2MYIJC2tLmj7h3G4A54xnh1JFjo= ------END CERTIFICATE----- diff --git a/test/e2e/certs/e2e_test_query_config_server.csr b/test/e2e/certs/e2e_test_query_config_server.csr deleted file mode 100644 index 5c879ab075..0000000000 --- a/test/e2e/certs/e2e_test_query_config_server.csr +++ /dev/null @@ -1,16 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIICczCCAVsCAQAwLjEsMCoGA1UEAwwjZTJlX3Rlc3RfcXVlcnlfY29uZmlnLXNp -ZGVjYXItYWxvbmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNSWYY -3XduEWOLzh177vdQ8PIHg1FSt0GfhYORkNxEhqCIBs48gOpi+PiABASnFB+X+yuz -a1qg8KYzaRzk63AKahE7y4oxx02cgCIsHINylZC6w4I9CLDTXvzLWD9dn4ZTdSRV -QZcPwzZIjltsRT8Xsc+n8wLkgsuS2cKcZRAqoHdL20C89tnTUo3/GocfCjYnPGRa -kW75b3SLHVVK0mgKNlVjCVS35Uf2LsAdf9u4MgEh7IMMIjc8sU/6Y9++m8o/ODBI -/lUdEreXT4CoUYSjl0X6cHjD8d7EzNXPcDlz4csKEMfWo5Ga0hp3MerKrQKo0t3U -WnQibt8XUrim3dLPAgMBAAGgADANBgkqhkiG9w0BAQsFAAOCAQEAvYhHqWREJE40 -+0FFB52LWoig0HASv1ytZCzbaZWSFcV30h2iSSU+gGTk3wdf+eM3xWysIbqPqhOm -xyAfkoGRsFbJO0sZXvB6/GGicUDRQ74pIcP/1LHLcGe+wFUYojHIyczMxX3REvJH -tqhN2xSz0veo7+jZmF64SxTdwClY5/khAPdYdde3vu3KIW4nHDCXF4sFxOB+BFLg -YxBFUQAKAFgRU89tXs2/O3VVYP2PUe6cRv3suL/C7UUW3rxUSfxCiYumG8Wm3xdp -gtqQGRhTxHw6QjZQNywnOY79QQTR/bypplcLayKqEq8bO6KsYXB/jbYi6TsaTBMl -Y1RHz5UQWw== ------END CERTIFICATE REQUEST----- diff --git a/test/e2e/certs/testca.crt b/test/e2e/certs/testca.crt deleted file mode 100644 index 368cadb785..0000000000 --- a/test/e2e/certs/testca.crt +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDAzCCAeugAwIBAgIUG40CCHDhcQZzYtfnW/3+n2a1gyIwDQYJKoZIhvcNAQEL -BQAwEDEOMAwGA1UEAwwFbXktY2EwIBcNMjEwNzI1MjIwNDQ2WhgPMjI5NTA1MDky -MjA0NDZaMBAxDjAMBgNVBAMMBW15LWNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAnkpmvnBW4UWw/W3m2Q9FJl1vVrZ21NneUapo9snyHH7rq9V/Kl+K -NZw4ZnxYUJI6QCJ0FLipj/RhtHnBEBu5SnyCi78aInGkkUECuvXLVFnG/9vPjpGV -fqQg3457VvsCexVIpEVHAVCI+pyQmVntIm9k5jzixzJvcnoXvlQLWO5w/b2KtA0B -wZGvaFoXl6KXr70T6Nhj6UOA1CZF2+cNB4EAIv7dV67y5+sBp7zu9pgF1ApjNV7e -QxEQUaT8LmKFsi7Xuj5ABlzb6Yiqzm2JZBO8V9wRiScchy8yRwS256eX1ASLRTI9 -l9Tvx9mYDyND27vTAlnJyPkeOeB12YgMgQIDAQABo1MwUTAdBgNVHQ4EFgQUHBqe -8LnYjSHpQJTj40rUbQQMtfcwHwYDVR0jBBgwFoAUHBqe8LnYjSHpQJTj40rUbQQM -tfcwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAH6OVef/bWgg3 -xJbdtQC5dI1OAHCL2GYZfHyquEXaCX5hACfBm5Nmqv/GYU5nrrS9+sdVOzkRL4pt -HuAwYHBdxvg1qvZTqBp65hTkni/zQC5K0rCgjDe452EuwK2pXhp8k4ChYG0BCGo0 -nXJU2yfpdu/WjHdrz/Nj+XoaPveb0IeLSdWJ8zUj9LZmpeetBkCppy68kKhCzgVZ -hbme+XKE6gnNxEYmQsQPc14NOVXGjxll9FMpkjMTLcXwjWkRmoqDDcfnmDcdg/eD -2WIgVZy7GlwtU1/dXs3AhBnwyUALgllHhXZd7DWSRNko/kg6L35Ncv0SyFiFRZY4 -06xGFXOrIg== ------END CERTIFICATE----- diff --git a/test/e2e/certs/testca.key b/test/e2e/certs/testca.key deleted file mode 100644 index dec4156918..0000000000 --- a/test/e2e/certs/testca.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCeSma+cFbhRbD9 -bebZD0UmXW9WtnbU2d5Rqmj2yfIcfuur1X8qX4o1nDhmfFhQkjpAInQUuKmP9GG0 -ecEQG7lKfIKLvxoicaSRQQK69ctUWcb/28+OkZV+pCDfjntW+wJ7FUikRUcBUIj6 -nJCZWe0ib2TmPOLHMm9yehe+VAtY7nD9vYq0DQHBka9oWheXopevvRPo2GPpQ4DU -JkXb5w0HgQAi/t1XrvLn6wGnvO72mAXUCmM1Xt5DERBRpPwuYoWyLte6PkAGXNvp -iKrObYlkE7xX3BGJJxyHLzJHBLbnp5fUBItFMj2X1O/H2ZgPI0Pbu9MCWcnI+R45 -4HXZiAyBAgMBAAECggEBAJp/B/VMdJ//g5Z4FF87tkSR4cAC680osQOIjlaCGRuT -Dbdz9GCJS76zsaDmMNTDRLOgbShkbKui3VJnm/Z6+Mko6co4Revke7mCeIW+riyw -S7ki9Fy03/Lxg1xwFxZ4fJ1h0rrzGzC/SFNDSSdETF3V1+eY+cdvBLP5Uv9jzIeI -+htIg9hAwiWhLtt9NyyO1zJ3Npfe6c+aPkJIWrf//TU9Bkv0Cje7+wZJDLeN4Trm -2kwmj774CYpKPsRKjD9RRRpmN9M9B7vomssoCdF0Qe/KD5Frq4rXJywY1/ZSU68B -HhbaiHckg/Zzm41OHxRhC6fyVXsdXqfPWrvw4DEaMaECgYEAzUOq+Q5kwWrY2tMT -6bl41l/sh7CczhMa5zTnLgREqxELueCtLRZUIf16HXQikQfnHHWOfDLL6IK6CO9e -KKw+S2yeywKBLAx9GnWgqUzdkqn39NGUmkp/+HgyYWj38VCZlYwAPwHAdBzU8puT -47Yo6y9c+fl+JT/v0RqqiyonN+UCgYEAxWprc569OIqDHrMlTZTiVYuYxRXlgDcK -tnefefDmoU2wAOX29ak7rzuaktPzJZ6NWhbFPN2US21y4y2zEMgNdjufxvJj7Dsv -+LL135P+7+pJPwGOQnJXcUVDq1a6lYsPp0Xov/+MaaprOtupqwVG5h7Y7EVh+p/T -C9eQ7K+sQG0CgYAs/3uYs7YUzLaTahiAcV9zJ34z1NzbNEb+jpNrfBYNpmP6sK1m -J/HzrZy6nj4Bq0jX7VF6Gqym4RmlYcwzgGb7o+k/ueHVZTt7tHOvCaG9SDOKYPOB -y3k4YCkzP46zQScaKKHZLiohijyInuCR19oqjKyGRFKKGfUZ0w22ClyknQKBgQCx -U6o5LPli9GhbBygWzAmSNdaAEe8YgFiLFmCVtCI1+gaCiA6RE/acrLln8iKSRB1M -Mu/wgEhYiHOORNp31HrjHSOkHM5Mv82TY40fy1xQPO6WPdt/LB4HjOP/3+OxuDvT -USSEchO6Rm4TbIS3LgJFaqgHcztTWITGOlffgt0C8QKBgESD9fgd7+qcqcEj6uk1 -uIKamTzacbbSmjcu3OU8S9hNItw+UDARcvan4wN0Y/LtbsaCaVU4DdRcxfo5ZgGi -JIK5ZiwaQ1gVO3+4CuNJ4Mq8z2AqmK9m5PBhunm1PxwQlh1BR8GeJzHo8RAMfqLC -pk6M/9D0m2Sqa4MAOzNAX1rA ------END PRIVATE KEY----- diff --git a/test/e2e/certs/testca.srl b/test/e2e/certs/testca.srl deleted file mode 100644 index 3b1528d849..0000000000 --- a/test/e2e/certs/testca.srl +++ /dev/null @@ -1 +0,0 @@ -3A07EDF9FFE5E57BE7C1EA0926CD44D5FF3010E7 diff --git a/test/e2e/certs/testclient.key b/test/e2e/certs/testclient.key deleted file mode 100644 index f7b0db76b9..0000000000 --- a/test/e2e/certs/testclient.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAwssUdXudFe51Wd9Qs922VHjwtSX9D5CpeXsV8rExrMSSBYsV -Fm2aNONFULUdFKn1qrJGneQqqacJb6KbCodsY7JECu1VZgGfOISjko0ZK4TuJLD0 -LTrtfpCuEubkviHAL95WoPXHavN3PUVBOKW40L3Vb700ZA1rQgGOxiGA1yJMQEV2 -WW8GaAkBoTVvlvjIR08Ovu7IZCQvQT3eKMSPzLmDDXryByAh6hbIWjL1xkugNb0B -iiRDjSRVSyBCAUwesw7pYS5zJVotQ3vXe5YUhsTJCQM7tPLlIpeNI+7PsXpWRMKJ -PkHo5IqtN9qLK4ahO3PzfdhREoZafQ9wbbGP7wIDAQABAoIBAAkyOTcKrPHaHiQu -DMtRDkiSpRqIPpg7hiS8EN8ySlwTs6kbVdWHOx6AFEepJURSecyjOf4+RV1ZC3L/ -xD4NAqr6QYgaiEuWjAPJD0fd7XBGiE+nIv/3pjqxbeZqjCYFxeL8pi5XLCD6eYik -+l1IRZ9lNuxdjFmk4IlvNWRZNJHzcXb00fzkouf88hzDKryrY9c499AY71Hq1oV4 -1JaMvNQE5+mha42TB9DlMDrpo+4KmbSQEzmWR0vJaSBYZoJo2BTNt15ZqfqGZxcF -E2v6HbWwfrpJAKHVukoV9+O0Aww17fv59PAOiobx16HTDnnmvmfkd4rFRC2jBSrW -llCSqIECgYEA6Xwcgr3wlIkTp2ROCih56tKs9U449Cvs1YBttQw00KCs120MI7LJ -j1o5iSVHXwy6Li/Xkan4wMq8Q6wJde2lTmXuzqJNbBpPJ7f7nImApEI233GtUeDB -8jNfBbFgDxPJ/SUKV95Pkts4c702TKEjcIW1QorGQ9kfNW/pqqwVSqECgYEA1ZPQ -1ldDjMryh+9aSCeosI81dTHD8XB3hWonh6cgGSGlamHvnkHr4yha63b1SFAczUXK -UMsHhzjVNgwuOY79J4oKFMhkOilLbQTJAW+49dPYLd7YkWR2HCeg8zQDhTCOPih+ -sfPuJpXzMLxyPnYZB3PPnftxdL2wYvQDwNyA4I8CgYEAopo9nMu8pSUFm/jd5HMl -3OSVcUzlmx6QFGwZexGiIn0Tgf++iYn0lzIFib59gvNxIcNNxHLz+wf1rsc723YC -PqP8eMlNU/DKmWWnA/A6t6opXtljE/eL+inOjj9mH3nvnK1UE7eOAT9MhEYMb7d0 -3+MuRcYxp4SpGGT0/VhUh6ECgYEAmOJL7vLefu1T+Wyj4tzIy0JoUPEDw1PvqsgX -bep7dqH4KBPdFBs6QQxFMSKhaFc4ltRrg7QiOnC7ZwoHGNq7as9Mch4UaPHgdTSk -smuIKiiwKf3IZyNTzMmtVAqarn/YgrQ4JShpBKP4Rp3O5dliB0ymhP9V8uxKVDs6 -aJcvhg8CgYBPXYYud2RU105diuyO+R2kB1VpoT5k+qGUtLWSq8Z8M/Om2tJSkTCj -g7uBaGHCIOCum4sDKKtcaCV6ReNRuxENOP/0zOvz4D61opgannwriQiLbnxtD6Tm -Jr6zhxOEEbh4uktqafBvm2XMGxj75mvI9UUQPZbxg+QKd+H4gttPsQ== ------END RSA PRIVATE KEY----- diff --git a/test/e2e/certs/testserver.key b/test/e2e/certs/testserver.key deleted file mode 100644 index 341c21eac2..0000000000 --- a/test/e2e/certs/testserver.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAzUlmGN13bhFji84de+73UPDyB4NRUrdBn4WDkZDcRIagiAbO -PIDqYvj4gAQEpxQfl/srs2taoPCmM2kc5OtwCmoRO8uKMcdNnIAiLByDcpWQusOC -PQiw0178y1g/XZ+GU3UkVUGXD8M2SI5bbEU/F7HPp/MC5ILLktnCnGUQKqB3S9tA -vPbZ01KN/xqHHwo2JzxkWpFu+W90ix1VStJoCjZVYwlUt+VH9i7AHX/buDIBIeyD -DCI3PLFP+mPfvpvKPzgwSP5VHRK3l0+AqFGEo5dF+nB4w/HexMzVz3A5c+HLChDH -1qORmtIadzHqyq0CqNLd1Fp0Im7fF1K4pt3SzwIDAQABAoIBAFma1iO4V28FzAu0 -Fg6Iig8cfKhm21JdhxlYL8uUDXEjymJVhiPGqYg14FIQbu92yyuNiM+x5kclg4Yh -NWl+ktMY5RjSiSmdMvVCwKe8cmvLePbsa6a0amp4paMki9QT05AOK6PDE2rf7IvT -B3jE6Psws9E9M2AG71DUGCwnuNzBkYFzbgFBhOcOeXE4olkBLzImrjdwZhu7bx8h -MGzkLTOom9pqnCJ1lPatX+jaKETTP/7OMwXBnNxD0byLxRLWS5fhdbHtNCUjRIGV -t5x8Fp7dEfMKr7FzMKeXxIAp9RBvHYNgSYuwabrTL4m/yI6fK9d2PfDRavELISSH -0hgmyEECgYEA9NEg09jB/bJR3H7FS9DHQliaAjzM05jyO5nb2Hb6KebZA4t4dB1o -5kiDsK+l7Fo7DE9Ev96D88Z+KjsT8kVkmnQhESJ63PUtIh/OFwaaQ0wYUzDA4rY+ -JWvtuGJI4m0V0ay26E1p7x05Yc6rP4h6Pj4fAfXBi4SqhKeY0rt0k98CgYEA1qoB -3M26/Reu6U+hgBefQJjVPz/OHXFoPOkpuPuuV4T72YUs+1WCGKac/KbWyvP5mRNR -/r6JEP1O7PhyubTASL32HvWEiukGcspD9PfvUiPKVDUfuwfAfA5ki9eE+RxF7xGW -06JD6eBDDn2KUUeteWGcBgvixvsthbMj/nDIHxECgYAaWcAczUHkGqNxBjwaMvif -jYXWI9EwHVBFVgQxwmyewfgtx39JkDjQQJHcHKvRSsz8QDP4Ku0s+fpudTlqZEm1 -zjYY8AslaUZYW/AEznMgNzqPCbpZLKB3yqwlon6R0If+r4ZobpyuXgjlHCeWiQwP -UwRnY5pXeAoaiSeL1x1vJQKBgQDPObsmjqwuW0iNDkRXxXYFBAUIU+4D2lDITEXs -v3nkHwqJR9jOoBNA/Mab1+uObMvNDCuhggoLjp08gfMuduEQ1+DariTPfHivI7dK -qnYrk0Vk2QvSn38fwz0pClrhqHuSeCZkE0cAZXAWcvat1EYQ4TufUqYPdLbTVMYw -gGuQIQKBgAOshacIU0wDRz+FFN6rGKiroiNPPu3iXLlapdBhyh8xjzF0X6hdwfrY -EfXd4JuLfxeJCfP0hAx32+pv6OPRJXC10MIn/wJOZb18dh57vSOdvhybj6j0nYU0 -noS2EZrfPuoBxxRjJws1WIrixhM6wWvrUafs8uIltsn0/u0c2QmY ------END RSA PRIVATE KEY----- diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 3266b7ca2d..00087d803f 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -6,15 +6,14 @@ package e2e_test import ( "context" "fmt" - "io" "io/ioutil" "net/http/httptest" "net/url" "os" + "os/exec" "path/filepath" "sort" "strings" - "syscall" "testing" "time" @@ -232,7 +231,14 @@ func TestQueryWithEndpointConfig(t *testing.T) { queryFileSDDir := filepath.Join(s.SharedDir(), "data", "querier", "1") container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") - testutil.Ok(t, cpyDir("./certs", queryFileSDDir)) + err = os.MkdirAll(queryFileSDDir, 0750) + testutil.Ok(t, err) + + // Generate certificates from ./test/e2e/certs/create.sh + cmd := exec.Command("/bin/bash", "../../../../certs/create.sh") + cmd.Dir = queryFileSDDir + _, err = cmd.Output() + testutil.Ok(t, err) tlsConfig := e2e.BuildArgs(map[string]string{ "--grpc-server-tls-cert": filepath.Join(container, "e2e_test_query_config_server.crt"), @@ -260,7 +266,7 @@ func TestQueryWithEndpointConfig(t *testing.T) { CertFile: filepath.Join(container, "e2e_test_query_config_client.crt"), KeyFile: filepath.Join(container, "testclient.key"), CaCertFile: filepath.Join(container, "testca.crt"), - ServerName: "e2e_test_query_config-sidecar-alone", + ServerName: "e2e_test_query_config-sidecar", }, Endpoints: []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc")}, }, @@ -875,65 +881,3 @@ func queryExemplars(t *testing.T, ctx context.Context, addr, q string, start, en return nil })) } - -func cpyDir(scrDir, dest string) error { - entries, err := ioutil.ReadDir(scrDir) - if err != nil { - return err - } - for _, entry := range entries { - sourcePath := filepath.Join(scrDir, entry.Name()) - destPath := filepath.Join(dest, entry.Name()) - - fileInfo, err := os.Stat(sourcePath) - if err != nil { - return err - } - - stat, ok := fileInfo.Sys().(*syscall.Stat_t) - if !ok { - return fmt.Errorf("failed to get raw syscall.Stat_t data for '%s'", sourcePath) - } - - switch fileInfo.Mode() & os.ModeType { - case os.ModeDir: - if err := os.MkdirAll(destPath, 0755); err != nil { - return err - } - if err := cpyDir(sourcePath, destPath); err != nil { - return err - } - case os.ModeSymlink: - return errors.New("symlink copy is not supported") - default: - if err := os.MkdirAll(filepath.Dir(destPath), 0755); err != nil { - return err - } - if err := cpyFile(sourcePath, destPath); err != nil { - return err - } - } - if err := os.Lchown(destPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - } - return nil -} - -func cpyFile(srcFile, dstFile string) (err error) { - out, err := os.Create(dstFile) - if err != nil { - return err - } - - defer runutil.CloseWithErrCapture(&err, out, "close dst") - - in, err := os.Open(srcFile) - defer runutil.CloseWithErrCapture(&err, in, "close src") - if err != nil { - return err - } - - _, err = io.Copy(out, in) - return err -} From e6ad5e931d134f9182fafd941f93b4ef48cdbf7b Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Thu, 12 Aug 2021 17:56:36 +0530 Subject: [PATCH 19/29] added --store-strict (noTLS) and some comments to code Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 5 +++-- cmd/thanos/receive.go | 11 ++++++----- pkg/query/storeset.go | 2 +- pkg/store/config.go | 16 +++++++++++++--- 4 files changed, 23 insertions(+), 11 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 2c163a462c..47d857521f 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -378,7 +378,7 @@ func runQuery( var endpointConfig []store.Config var err error if len(endpointConfigYAML) > 0 { - endpointConfig, err = store.LoadConfig(endpointConfigYAML, storeAddrs, fileSDConfig) + endpointConfig, err = store.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig) if err != nil { return errors.Wrap(err, "loading endpoint config") } @@ -422,7 +422,6 @@ func runQuery( ) var storeSets []*query.EndpointSet - fileSDCache := cache.New() for _, config := range endpointConfig { secure = !(config.TLSConfig == store.TLSConfiguration{}) dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) @@ -578,6 +577,8 @@ func runQuery( } var ( + // Adding separate for loop for each client func() below because storeSets is being populated in a go-routine and this code executes before it. + // Implemented as a part of https://github.com/thanos-io/thanos/blob/main/docs/proposals-accepted/202106-automated-per-endpoint-mTLS.md allClients = func() []store.Client { var get []store.Client for _, ss := range storeSets { diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index c5b6f2023e..1de31917f3 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -120,11 +120,12 @@ func runReceive( return err } - var TLSConfig store.TLSConfiguration - TLSConfig.CertFile = conf.rwClientCert - TLSConfig.KeyFile = conf.rwClientKey - TLSConfig.CaCertFile = conf.rwClientServerCA - TLSConfig.ServerName = conf.rwClientServerName + TLSConfig := store.TLSConfiguration{ + CertFile: conf.rwClientCert, + KeyFile: conf.rwClientKey, + CaCertFile: conf.rwClientServerCA, + ServerName: conf.rwClientServerName, + } dialOpts, err := extgrpc.StoreClientGRPCOpts( logger, diff --git a/pkg/query/storeset.go b/pkg/query/storeset.go index 8d5bdc82be..d54d54571a 100644 --- a/pkg/query/storeset.go +++ b/pkg/query/storeset.go @@ -149,7 +149,7 @@ func newStoreSetNodeCollector(configInstance string) *storeSetNodeCollector { connectionsDesc: prometheus.NewDesc( "thanos_store_nodes_grpc_connections", "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", - []string{"external_labels", "store_type"}, map[string]string{"config_instance": configInstance}, + []string{"external_labels", "store_type"}, map[string]string{"config_provider_name": configInstance}, ), } } diff --git a/pkg/store/config.go b/pkg/store/config.go index e003814c47..113f28a7b7 100644 --- a/pkg/store/config.go +++ b/pkg/store/config.go @@ -11,6 +11,8 @@ import ( ) // Config represents the configuration of a set of Store API endpoints. +// If `tls_config` is omitted then TLS will not be used. +// Configs must have a name and they must be unique. type Config struct { Name string `yaml:"name"` TLSConfig TLSConfiguration `yaml:"tls_config"` @@ -39,7 +41,7 @@ const ( ) // NewConfig returns list of per-endpoint TLS config from individual flags. -func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { +func NewConfig(endpointAddrs, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { var endpointConfig []Config // Adding --endpoint, --endpoint.sd-files to []endpointConfig, if provided. @@ -65,7 +67,7 @@ func NewConfig(endpointAddrs []string, strictEndpointAddrs []string, fileSDConfi } // LoadConfig returns list of per-endpoint TLS config. -func LoadConfig(confYAML []byte, endpointAddrs []string, fileSDConfig *file.SDConfig) ([]Config, error) { +func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, fileSDConfig *file.SDConfig) ([]Config, error) { var endpointConfig []Config if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { @@ -86,7 +88,7 @@ func LoadConfig(confYAML []byte, endpointAddrs []string, fileSDConfig *file.SDCo } } - // Adding --endpoint, --endpoint.sd-files with NO-TLS to []endpointConfig, if provided. + // Adding --endpoint, --endpoint.sd-files with NO-TLS, if provided. if len(endpointAddrs) > 0 || fileSDConfig != nil { cfg := Config{} cfg.Endpoints = endpointAddrs @@ -96,6 +98,14 @@ func LoadConfig(confYAML []byte, endpointAddrs []string, fileSDConfig *file.SDCo endpointConfig = append(endpointConfig, cfg) } + // Adding --endpoint-strict endpoints with NO-TLS, if provided. + if len(strictEndpointAddrs) > 0 { + cfg := Config{} + cfg.Endpoints = strictEndpointAddrs + cfg.Mode = StrictEndpointMode + endpointConfig = append(endpointConfig, cfg) + } + // Checking if some endpoints are inputted more than once. allEndpoints := make(map[string]struct{}) for _, config := range endpointConfig { From 81e1f19009e55cbc8e620ca960b3f7aa1dee3d77 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 25 Aug 2021 17:05:37 +0530 Subject: [PATCH 20/29] amending flag description Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 4 ++-- docs/components/query.md | 16 ++++++++-------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 47d857521f..be89cd38ce 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -132,7 +132,7 @@ func registerQuery(app *extkingpin.App) { fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback."). Default("5m")) - endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains store API servers configuration. Either use this option or separate endpoint options (endpoint, endpoint.sd-files, endpoint.srict).", extflag.WithEnvSubstitution()) + endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains set of endpoints (e.g Store API) with optional TLS options. To enable TLS either use this option or deprecated ones --grpc-client-tls* .", extflag.WithEnvSubstitution()) // TODO(bwplotka): Grab this from TTL at some point. dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions."). @@ -423,7 +423,7 @@ func runQuery( var storeSets []*query.EndpointSet for _, config := range endpointConfig { - secure = !(config.TLSConfig == store.TLSConfiguration{}) + secure = (config.TLSConfig != store.TLSConfiguration{}) dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) if err != nil { return errors.Wrap(err, "building gRPC client") diff --git a/docs/components/query.md b/docs/components/query.md index 754198cd46..68e15e0da9 100644 --- a/docs/components/query.md +++ b/docs/components/query.md @@ -258,15 +258,15 @@ Flags: --endpoint.config= Alternative to 'endpoint.config-file' flag (mutually exclusive). Content of YAML file that - contains store API servers configuration. - Either use this option or separate endpoint - options (endpoint, endpoint.sd-files, - endpoint.srict). + contains set of endpoints (e.g Store API) with + optional TLS options. To enable TLS either use + this option or deprecated ones + --grpc-client-tls* . --endpoint.config-file= - Path to YAML file that contains store API - servers configuration. Either use this option - or separate endpoint options (endpoint, - endpoint.sd-files, endpoint.srict). + Path to YAML file that contains set of + endpoints (e.g Store API) with optional TLS + options. To enable TLS either use this option + or deprecated ones --grpc-client-tls* . --grpc-address="0.0.0.0:10901" Listen ip:port address for gRPC endpoints (StoreAPI). Make sure this address is routable From 1a81f80f39b5e7c22eef062c8aeba7c68d5a90e1 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Fri, 1 Oct 2021 17:28:49 +0530 Subject: [PATCH 21/29] removing conflicts-1 Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 4 +- pkg/api/query/v1.go | 12 + pkg/query/endpointset.go | 10 +- pkg/query/storeset.go | 746 ------------------------ pkg/query/storeset_test.go | 1095 ------------------------------------ pkg/ui/query.go | 24 +- 6 files changed, 24 insertions(+), 1867 deletions(-) delete mode 100644 pkg/query/storeset.go delete mode 100644 pkg/query/storeset_test.go diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index be89cd38ce..4ef009a01a 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -582,7 +582,7 @@ func runQuery( allClients = func() []store.Client { var get []store.Client for _, ss := range storeSets { - get = append(get, ss.Get()...) + get = append(get, ss.GetStoreClients()...) } return get } @@ -603,7 +603,7 @@ func runQuery( metadataClients = func() []metadatapb.MetadataClient { var getMetadataClient []metadatapb.MetadataClient for _, ss := range storeSets { - getMetadataClient = append(getMetadataClient, ss.GetMetadataClients()...) + getMetadataClient = append(getMetadataClient, ss.GetMetricMetadataClients()...) } return getMetadataClient } diff --git a/pkg/api/query/v1.go b/pkg/api/query/v1.go index 34ace22a84..83276a67ba 100644 --- a/pkg/api/query/v1.go +++ b/pkg/api/query/v1.go @@ -98,12 +98,16 @@ type QueryAPI struct { endpointStatus func() []query.EndpointStatus ======= replicaLabels []string +<<<<<<< HEAD <<<<<<< HEAD endpointSet *query.EndpointSet ======= storeSets []*query.StoreSet >>>>>>> addressed comments for querier >>>>>>> addressed comments for querier +======= + endpointSet []*query.EndpointSet +>>>>>>> removing conflicts-1 defaultRangeQueryStep time.Duration defaultInstantQueryMaxSourceResolution time.Duration @@ -115,6 +119,7 @@ type QueryAPI struct { // NewQueryAPI returns an initialized QueryAPI type. func NewQueryAPI( logger log.Logger, +<<<<<<< HEAD <<<<<<< HEAD endpointStatus func() []query.EndpointStatus, ======= @@ -124,6 +129,9 @@ func NewQueryAPI( storeSets []*query.StoreSet, >>>>>>> addressed comments for querier >>>>>>> addressed comments for querier +======= + endpointSet []*query.EndpointSet, +>>>>>>> removing conflicts-1 qe func(int64) *promql.Engine, c query.QueryableCreator, ruleGroups rules.UnaryClient, @@ -163,6 +171,7 @@ func NewQueryAPI( enableMetricMetadataPartialResponse: enableMetricMetadataPartialResponse, enableExemplarPartialResponse: enableExemplarPartialResponse, replicaLabels: replicaLabels, +<<<<<<< HEAD <<<<<<< HEAD endpointStatus: endpointStatus, ======= @@ -172,6 +181,9 @@ func NewQueryAPI( storeSets: storeSets, >>>>>>> addressed comments for querier >>>>>>> addressed comments for querier +======= + endpointSet: endpointSet, +>>>>>>> removing conflicts-1 defaultRangeQueryStep: defaultRangeQueryStep, defaultInstantQueryMaxSourceResolution: defaultInstantQueryMaxSourceResolution, defaultMetadataTimeRange: defaultMetadataTimeRange, diff --git a/pkg/query/endpointset.go b/pkg/query/endpointset.go index 727299db1f..37220096b3 100644 --- a/pkg/query/endpointset.go +++ b/pkg/query/endpointset.go @@ -208,13 +208,13 @@ type endpointSetNodeCollector struct { connectionsDesc *prometheus.Desc } -func newEndpointSetNodeCollector() *endpointSetNodeCollector { +func newEndpointSetNodeCollector(configInstance string) *endpointSetNodeCollector { return &endpointSetNodeCollector{ storeNodes: map[component.Component]map[string]int{}, connectionsDesc: prometheus.NewDesc( "thanos_store_nodes_grpc_connections", "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", - []string{"external_labels", "store_type"}, nil, + []string{"external_labels", "store_type"}, map[string]string{"config_provider_name": configInstance}, ), } } @@ -284,11 +284,15 @@ type EndpointSet struct { func NewEndpointSet( logger log.Logger, reg *prometheus.Registry, + configInstance string, endpointSpecs func() []EndpointSpec, dialOpts []grpc.DialOption, unhealthyEndpointTimeout time.Duration, ) *EndpointSet { - endpointsMetric := newEndpointSetNodeCollector() + if configInstance == "" { + configInstance = "default" + } + endpointsMetric := newEndpointSetNodeCollector(configInstance) if reg != nil { reg.MustRegister(endpointsMetric) } diff --git a/pkg/query/storeset.go b/pkg/query/storeset.go deleted file mode 100644 index d54d54571a..0000000000 --- a/pkg/query/storeset.go +++ /dev/null @@ -1,746 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package query - -import ( - "context" - "encoding/json" - "fmt" - "math" - "sort" - "sync" - "time" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/pkg/labels" - "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" - "google.golang.org/grpc" - - "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/metadata/metadatapb" - "github.com/thanos-io/thanos/pkg/rules/rulespb" - "github.com/thanos-io/thanos/pkg/runutil" - "github.com/thanos-io/thanos/pkg/store" - "github.com/thanos-io/thanos/pkg/store/labelpb" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/targets/targetspb" -) - -const ( - unhealthyStoreMessage = "removing store because it's unhealthy or does not exist" -) - -type StoreSpec interface { - // Addr returns StoreAPI Address for the store spec. It is used as ID for store. - Addr() string - // Metadata returns current labels, store type and min, max ranges for store. - // It can change for every call for this method. - // If metadata call fails we assume that store is no longer accessible and we should not use it. - // NOTE: It is implementation responsibility to retry until context timeout, but a caller responsibility to manage - // given store connection. - Metadata(ctx context.Context, client storepb.StoreClient) (labelSets []labels.Labels, mint int64, maxt int64, storeType component.StoreAPI, err error) - - // StrictStatic returns true if the StoreAPI has been statically defined and it is under a strict mode. - StrictStatic() bool -} - -type RuleSpec interface { - // Addr returns RulesAPI Address for the rules spec. It is used as its ID. - Addr() string -} - -type TargetSpec interface { - // Addr returns TargetsAPI Address for the targets spec. It is used as its ID. - Addr() string -} - -type MetadataSpec interface { - // Addr returns MetadataAPI Address for the metadata spec. It is used as its ID. - Addr() string -} - -type ExemplarSpec interface { - // Addr returns ExemplarsAPI Address for the exemplars spec. It is used as its ID. - Addr() string -} - -// stringError forces the error to be a string -// when marshaled into a JSON. -type stringError struct { - originalErr error -} - -// MarshalJSON marshals the error into a string form. -func (e *stringError) MarshalJSON() ([]byte, error) { - return json.Marshal(e.originalErr.Error()) -} - -// Error returns the original underlying error. -func (e *stringError) Error() string { - return e.originalErr.Error() -} - -type StoreStatus struct { - Name string `json:"name"` - LastCheck time.Time `json:"lastCheck"` - LastError *stringError `json:"lastError"` - LabelSets []labels.Labels `json:"labelSets"` - StoreType component.StoreAPI `json:"-"` - MinTime int64 `json:"minTime"` - MaxTime int64 `json:"maxTime"` -} - -type grpcStoreSpec struct { - addr string - strictstatic bool -} - -// NewGRPCStoreSpec creates store pure gRPC spec. -// It uses Info gRPC call to get Metadata. -func NewGRPCStoreSpec(addr string, strictstatic bool) StoreSpec { - return &grpcStoreSpec{addr: addr, strictstatic: strictstatic} -} - -// StrictStatic returns true if the StoreAPI has been statically defined and it is under a strict mode. -func (s *grpcStoreSpec) StrictStatic() bool { - return s.strictstatic -} - -func (s *grpcStoreSpec) Addr() string { - // API addr should not change between state changes. - return s.addr -} - -// Metadata method for gRPC store API tries to reach host Info method until context timeout. If we are unable to get metadata after -// that time, we assume that the host is unhealthy and return error. -func (s *grpcStoreSpec) Metadata(ctx context.Context, client storepb.StoreClient) (labelSets []labels.Labels, mint, maxt int64, Type component.StoreAPI, err error) { - resp, err := client.Info(ctx, &storepb.InfoRequest{}, grpc.WaitForReady(true)) - if err != nil { - return nil, 0, 0, nil, errors.Wrapf(err, "fetching store info from %s", s.addr) - } - if len(resp.LabelSets) == 0 && len(resp.Labels) > 0 { - resp.LabelSets = []labelpb.ZLabelSet{{Labels: resp.Labels}} - } - - labelSets = make([]labels.Labels, 0, len(resp.LabelSets)) - for _, ls := range resp.LabelSets { - labelSets = append(labelSets, ls.PromLabels()) - } - return labelSets, resp.MinTime, resp.MaxTime, component.FromProto(resp.StoreType), nil -} - -// storeSetNodeCollector is a metric collector reporting the number of available storeAPIs for Querier. -// A Collector is required as we want atomic updates for all 'thanos_store_nodes_grpc_connections' series. -type storeSetNodeCollector struct { - mtx sync.Mutex - storeNodes map[component.StoreAPI]map[string]int - storePerExtLset map[string]int - - connectionsDesc *prometheus.Desc -} - -func newStoreSetNodeCollector(configInstance string) *storeSetNodeCollector { - return &storeSetNodeCollector{ - storeNodes: map[component.StoreAPI]map[string]int{}, - connectionsDesc: prometheus.NewDesc( - "thanos_store_nodes_grpc_connections", - "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", - []string{"external_labels", "store_type"}, map[string]string{"config_provider_name": configInstance}, - ), - } -} - -func (c *storeSetNodeCollector) Update(nodes map[component.StoreAPI]map[string]int) { - storeNodes := make(map[component.StoreAPI]map[string]int, len(nodes)) - storePerExtLset := map[string]int{} - - for k, v := range nodes { - storeNodes[k] = make(map[string]int, len(v)) - for kk, vv := range v { - storePerExtLset[kk] += vv - storeNodes[k][kk] = vv - } - } - - c.mtx.Lock() - defer c.mtx.Unlock() - c.storeNodes = storeNodes - c.storePerExtLset = storePerExtLset -} - -func (c *storeSetNodeCollector) Describe(ch chan<- *prometheus.Desc) { - ch <- c.connectionsDesc -} - -func (c *storeSetNodeCollector) Collect(ch chan<- prometheus.Metric) { - c.mtx.Lock() - defer c.mtx.Unlock() - - for storeType, occurrencesPerExtLset := range c.storeNodes { - for externalLabels, occurrences := range occurrencesPerExtLset { - var storeTypeStr string - if storeType != nil { - storeTypeStr = storeType.String() - } - ch <- prometheus.MustNewConstMetric(c.connectionsDesc, prometheus.GaugeValue, float64(occurrences), externalLabels, storeTypeStr) - } - } -} - -// StoreSet maintains a set of active stores. It is backed up by Store Specifications that are dynamically fetched on -// every Update() call. -type StoreSet struct { - logger log.Logger - - // Store specifications can change dynamically. If some store is missing from the list, we assuming it is no longer - // accessible and we close gRPC client for it. - storeSpecs func() []StoreSpec - ruleSpecs func() []RuleSpec - targetSpecs func() []TargetSpec - metadataSpecs func() []MetadataSpec - exemplarSpecs func() []ExemplarSpec - dialOpts []grpc.DialOption - gRPCInfoCallTimeout time.Duration - - updateMtx sync.Mutex - storesMtx sync.RWMutex - storesStatusesMtx sync.RWMutex - - // Main map of stores currently used for fanout. - stores map[string]*storeRef - storesMetric *storeSetNodeCollector - - // Map of statuses used only by UI. - storeStatuses map[string]*StoreStatus - unhealthyStoreTimeout time.Duration -} - -// NewStoreSet returns a new set of store APIs and potentially Rules APIs from given specs. -func NewStoreSet( - logger log.Logger, - reg *prometheus.Registry, - configInstance string, - storeSpecs func() []StoreSpec, - ruleSpecs func() []RuleSpec, - targetSpecs func() []TargetSpec, - metadataSpecs func() []MetadataSpec, - exemplarSpecs func() []ExemplarSpec, - dialOpts []grpc.DialOption, - unhealthyStoreTimeout time.Duration, -) *StoreSet { - if configInstance == "" { - configInstance = "default" - } - storesMetric := newStoreSetNodeCollector(configInstance) - if reg != nil { - reg.MustRegister(storesMetric) - } - - if logger == nil { - logger = log.NewNopLogger() - } - if storeSpecs == nil { - storeSpecs = func() []StoreSpec { return nil } - } - if ruleSpecs == nil { - ruleSpecs = func() []RuleSpec { return nil } - } - if targetSpecs == nil { - targetSpecs = func() []TargetSpec { return nil } - } - if metadataSpecs == nil { - metadataSpecs = func() []MetadataSpec { return nil } - } - if exemplarSpecs == nil { - exemplarSpecs = func() []ExemplarSpec { return nil } - } - - ss := &StoreSet{ - logger: log.With(logger, "component", "storeset"), - storeSpecs: storeSpecs, - ruleSpecs: ruleSpecs, - targetSpecs: targetSpecs, - metadataSpecs: metadataSpecs, - exemplarSpecs: exemplarSpecs, - dialOpts: dialOpts, - storesMetric: storesMetric, - gRPCInfoCallTimeout: 5 * time.Second, - stores: make(map[string]*storeRef), - storeStatuses: make(map[string]*StoreStatus), - unhealthyStoreTimeout: unhealthyStoreTimeout, - } - return ss -} - -// TODO(bwplotka): Consider moving storeRef out of this package and renaming it, as it also supports rules API. -type storeRef struct { - storepb.StoreClient - - mtx sync.RWMutex - cc *grpc.ClientConn - addr string - // If rule is not nil, then this store also supports rules API. - rule rulespb.RulesClient - metadata metadatapb.MetadataClient - - // If exemplar is not nil, then this store also support exemplars API. - exemplar exemplarspb.ExemplarsClient - - // If target is not nil, then this store also supports targets API. - target targetspb.TargetsClient - - // Meta (can change during runtime). - labelSets []labels.Labels - storeType component.StoreAPI - minTime int64 - maxTime int64 - - logger log.Logger -} - -func (s *storeRef) Update(labelSets []labels.Labels, minTime, maxTime int64, storeType component.StoreAPI, rule rulespb.RulesClient, target targetspb.TargetsClient, metadata metadatapb.MetadataClient, exemplar exemplarspb.ExemplarsClient) { - s.mtx.Lock() - defer s.mtx.Unlock() - - s.storeType = storeType - s.labelSets = labelSets - s.minTime = minTime - s.maxTime = maxTime - s.rule = rule - s.target = target - s.metadata = metadata - s.exemplar = exemplar -} - -func (s *storeRef) StoreType() component.StoreAPI { - s.mtx.RLock() - defer s.mtx.RUnlock() - - return s.storeType -} - -func (s *storeRef) HasRulesAPI() bool { - s.mtx.RLock() - defer s.mtx.RUnlock() - - return s.rule != nil -} - -func (s *storeRef) HasTargetsAPI() bool { - s.mtx.RLock() - defer s.mtx.RUnlock() - - return s.target != nil -} - -func (s *storeRef) HasMetadataAPI() bool { - s.mtx.RLock() - defer s.mtx.RUnlock() - - return s.metadata != nil -} - -func (s *storeRef) HasExemplarsAPI() bool { - s.mtx.RLock() - defer s.mtx.RUnlock() - - return s.exemplar != nil -} - -func (s *storeRef) LabelSets() []labels.Labels { - s.mtx.RLock() - defer s.mtx.RUnlock() - - labelSet := make([]labels.Labels, 0, len(s.labelSets)) - for _, ls := range s.labelSets { - if len(ls) == 0 { - continue - } - // Compatibility label for Queriers pre 0.8.1. Filter it out now. - if ls[0].Name == store.CompatibilityTypeLabelName { - continue - } - labelSet = append(labelSet, ls.Copy()) - } - return labelSet -} - -func (s *storeRef) TimeRange() (mint, maxt int64) { - s.mtx.RLock() - defer s.mtx.RUnlock() - - return s.minTime, s.maxTime -} - -func (s *storeRef) String() string { - mint, maxt := s.TimeRange() - return fmt.Sprintf("Addr: %s LabelSets: %v Mint: %d Maxt: %d", s.addr, labelpb.PromLabelSetsToString(s.LabelSets()), mint, maxt) -} - -func (s *storeRef) Addr() string { - return s.addr -} - -func (s *storeRef) Close() { - runutil.CloseWithLogOnErr(s.logger, s.cc, fmt.Sprintf("store %v connection close", s.addr)) -} - -func newStoreAPIStats() map[component.StoreAPI]map[string]int { - nodes := make(map[component.StoreAPI]map[string]int, len(storepb.StoreType_name)) - for i := range storepb.StoreType_name { - nodes[component.FromProto(storepb.StoreType(i))] = map[string]int{} - } - return nodes -} - -// Update updates the store set. It fetches current list of store specs from function and updates the fresh metadata -// from all stores. Keeps around statically defined nodes that were defined with the strict mode. -func (s *StoreSet) Update(ctx context.Context) { - s.updateMtx.Lock() - defer s.updateMtx.Unlock() - - s.storesMtx.RLock() - stores := make(map[string]*storeRef, len(s.stores)) - for addr, st := range s.stores { - stores[addr] = st - } - s.storesMtx.RUnlock() - - level.Debug(s.logger).Log("msg", "starting updating storeAPIs", "cachedStores", len(stores)) - - activeStores := s.getActiveStores(ctx, stores) - level.Debug(s.logger).Log("msg", "checked requested storeAPIs", "activeStores", len(activeStores), "cachedStores", len(stores)) - - stats := newStoreAPIStats() - - // Close stores that where not active this time (are not in active stores map). - for addr, st := range stores { - if _, ok := activeStores[addr]; ok { - stats[st.StoreType()][labelpb.PromLabelSetsToString(st.LabelSets())]++ - continue - } - - st.Close() - delete(stores, addr) - s.updateStoreStatus(st, errors.New(unhealthyStoreMessage)) - level.Info(s.logger).Log("msg", unhealthyStoreMessage, "address", addr, "extLset", labelpb.PromLabelSetsToString(st.LabelSets())) - } - - // Add stores that are not yet in stores. - for addr, st := range activeStores { - if _, ok := stores[addr]; ok { - continue - } - - extLset := labelpb.PromLabelSetsToString(st.LabelSets()) - - // All producers should have unique external labels. While this does not check only StoreAPIs connected to - // this querier this allows to notify early user about misconfiguration. Warn only. This is also detectable from metric. - if st.StoreType() != nil && - (st.StoreType() == component.Sidecar || st.StoreType() == component.Rule) && - stats[component.Sidecar][extLset]+stats[component.Rule][extLset] > 0 { - - level.Warn(s.logger).Log("msg", "found duplicate storeAPI producer (sidecar or ruler). This is not advices as it will malform data in in the same bucket", - "address", addr, "extLset", extLset, "duplicates", fmt.Sprintf("%v", stats[component.Sidecar][extLset]+stats[component.Rule][extLset]+1)) - } - stats[st.StoreType()][extLset]++ - - stores[addr] = st - s.updateStoreStatus(st, nil) - - if st.HasRulesAPI() { - level.Info(s.logger).Log("msg", "adding new rulesAPI to query storeset", "address", addr) - } - - if st.HasExemplarsAPI() { - level.Info(s.logger).Log("msg", "adding new exemplarsAPI to query storeset", "address", addr) - } - - if st.HasTargetsAPI() { - level.Info(s.logger).Log("msg", "adding new targetsAPI to query storeset", "address", addr) - } - - level.Info(s.logger).Log("msg", "adding new storeAPI to query storeset", "address", addr, "extLset", extLset) - } - - s.storesMetric.Update(stats) - s.storesMtx.Lock() - s.stores = stores - s.storesMtx.Unlock() - - s.cleanUpStoreStatuses(stores) -} - -func (s *StoreSet) getActiveStores(ctx context.Context, stores map[string]*storeRef) map[string]*storeRef { - var ( - // UNIQUE? - activeStores = make(map[string]*storeRef, len(stores)) - mtx sync.Mutex - wg sync.WaitGroup - - storeAddrSet = make(map[string]struct{}) - ruleAddrSet = make(map[string]struct{}) - targetAddrSet = make(map[string]struct{}) - metadataAddrSet = make(map[string]struct{}) - exemplarAddrSet = make(map[string]struct{}) - ) - - // Gather active stores map concurrently. Build new store if does not exist already. - for _, ruleSpec := range s.ruleSpecs() { - ruleAddrSet[ruleSpec.Addr()] = struct{}{} - } - - // Gather active targets map concurrently. Add a new target if it does not exist already. - for _, targetSpec := range s.targetSpecs() { - targetAddrSet[targetSpec.Addr()] = struct{}{} - } - - // Gather active stores map concurrently. Build new store if does not exist already. - for _, metadataSpec := range s.metadataSpecs() { - metadataAddrSet[metadataSpec.Addr()] = struct{}{} - } - - // Gather active stores map concurrently. Build new store if does not exist already. - for _, exemplarSpec := range s.exemplarSpecs() { - exemplarAddrSet[exemplarSpec.Addr()] = struct{}{} - } - - // Gather healthy stores map concurrently. Build new store if does not exist already. - for _, storeSpec := range s.storeSpecs() { - if _, ok := storeAddrSet[storeSpec.Addr()]; ok { - level.Warn(s.logger).Log("msg", "duplicated address in store nodes", "address", storeSpec.Addr()) - continue - } - storeAddrSet[storeSpec.Addr()] = struct{}{} - - wg.Add(1) - go func(spec StoreSpec) { - defer wg.Done() - - addr := spec.Addr() - - ctx, cancel := context.WithTimeout(ctx, s.gRPCInfoCallTimeout) - defer cancel() - - st, seenAlready := stores[addr] - if !seenAlready { - // New store or was unactive and was removed in the past - create new one. - conn, err := grpc.DialContext(ctx, addr, s.dialOpts...) - if err != nil { - s.updateStoreStatus(&storeRef{addr: addr}, err) - level.Warn(s.logger).Log("msg", "update of store node failed", "err", errors.Wrap(err, "dialing connection"), "address", addr) - return - } - - st = &storeRef{StoreClient: storepb.NewStoreClient(conn), storeType: component.UnknownStoreAPI, cc: conn, addr: addr, logger: s.logger} - if spec.StrictStatic() { - st.maxTime = math.MaxInt64 - } - } - - var rule rulespb.RulesClient - if _, ok := ruleAddrSet[addr]; ok { - rule = rulespb.NewRulesClient(st.cc) - } - - var target targetspb.TargetsClient - if _, ok := targetAddrSet[addr]; ok { - target = targetspb.NewTargetsClient(st.cc) - } - - var metadata metadatapb.MetadataClient - if _, ok := metadataAddrSet[addr]; ok { - metadata = metadatapb.NewMetadataClient(st.cc) - } - - var exemplar exemplarspb.ExemplarsClient - if _, ok := exemplarAddrSet[addr]; ok { - exemplar = exemplarspb.NewExemplarsClient(st.cc) - } - - // Check existing or new store. Is it healthy? What are current metadata? - labelSets, minTime, maxTime, storeType, err := spec.Metadata(ctx, st.StoreClient) - if err != nil { - if !seenAlready && !spec.StrictStatic() { - // Close only if new and not a strict static node. - // Unactive `s.stores` will be closed later on. - st.Close() - } - s.updateStoreStatus(st, err) - level.Warn(s.logger).Log("msg", "update of store node failed", "err", errors.Wrap(err, "getting metadata"), "address", addr) - - if !spec.StrictStatic() { - return - } - - // Still keep it around if static & strict mode enabled. - mtx.Lock() - defer mtx.Unlock() - - activeStores[addr] = st - return - } - - s.updateStoreStatus(st, nil) - st.Update(labelSets, minTime, maxTime, storeType, rule, target, metadata, exemplar) - - mtx.Lock() - defer mtx.Unlock() - - activeStores[addr] = st - }(storeSpec) - } - wg.Wait() - - for ruleAddr := range ruleAddrSet { - if _, ok := storeAddrSet[ruleAddr]; !ok { - level.Warn(s.logger).Log("msg", "ignored rule store", "address", ruleAddr) - } - } - return activeStores -} - -func (s *StoreSet) updateStoreStatus(store *storeRef, err error) { - s.storesStatusesMtx.Lock() - defer s.storesStatusesMtx.Unlock() - - status := StoreStatus{Name: store.addr} - prev, ok := s.storeStatuses[store.addr] - if ok { - status = *prev - } else { - mint, maxt := store.TimeRange() - status.MinTime = mint - status.MaxTime = maxt - } - - if err == nil { - status.LastCheck = time.Now() - mint, maxt := store.TimeRange() - status.LabelSets = store.LabelSets() - status.StoreType = store.StoreType() - status.MinTime = mint - status.MaxTime = maxt - status.LastError = nil - } else { - status.LastError = &stringError{originalErr: err} - } - - s.storeStatuses[store.addr] = &status -} - -func (s *StoreSet) GetStoreStatus() []StoreStatus { - s.storesStatusesMtx.RLock() - defer s.storesStatusesMtx.RUnlock() - - statuses := make([]StoreStatus, 0, len(s.storeStatuses)) - for _, v := range s.storeStatuses { - statuses = append(statuses, *v) - } - - sort.Slice(statuses, func(i, j int) bool { - return statuses[i].Name < statuses[j].Name - }) - return statuses -} - -// Get returns a list of all active stores. -func (s *StoreSet) Get() []store.Client { - s.storesMtx.RLock() - defer s.storesMtx.RUnlock() - - stores := make([]store.Client, 0, len(s.stores)) - for _, st := range s.stores { - stores = append(stores, st) - } - return stores -} - -// GetRulesClients returns a list of all active rules clients. -func (s *StoreSet) GetRulesClients() []rulespb.RulesClient { - s.storesMtx.RLock() - defer s.storesMtx.RUnlock() - - rules := make([]rulespb.RulesClient, 0, len(s.stores)) - for _, st := range s.stores { - if st.HasRulesAPI() { - rules = append(rules, st.rule) - } - } - return rules -} - -// GetTargetsClients returns a list of all active targets clients. -func (s *StoreSet) GetTargetsClients() []targetspb.TargetsClient { - s.storesMtx.RLock() - defer s.storesMtx.RUnlock() - - targets := make([]targetspb.TargetsClient, 0, len(s.stores)) - for _, st := range s.stores { - if st.HasTargetsAPI() { - targets = append(targets, st.target) - } - } - return targets -} - -// GetMetadataClients returns a list of all active metadata clients. -func (s *StoreSet) GetMetadataClients() []metadatapb.MetadataClient { - s.storesMtx.RLock() - defer s.storesMtx.RUnlock() - - metadataClients := make([]metadatapb.MetadataClient, 0, len(s.stores)) - for _, st := range s.stores { - if st.HasMetadataAPI() { - metadataClients = append(metadataClients, st.metadata) - } - } - return metadataClients -} - -// GetExemplarsStores returns a list of all active exemplars stores. -func (s *StoreSet) GetExemplarsStores() []*exemplarspb.ExemplarStore { - s.storesMtx.RLock() - defer s.storesMtx.RUnlock() - - exemplarStores := make([]*exemplarspb.ExemplarStore, 0, len(s.stores)) - for _, st := range s.stores { - if st.HasExemplarsAPI() { - exemplarStores = append(exemplarStores, &exemplarspb.ExemplarStore{ - ExemplarsClient: st.exemplar, - LabelSets: st.labelSets, - }) - } - } - return exemplarStores -} - -func (s *StoreSet) Close() { - s.storesMtx.Lock() - defer s.storesMtx.Unlock() - - for _, st := range s.stores { - st.Close() - } - s.stores = map[string]*storeRef{} -} - -func (s *StoreSet) cleanUpStoreStatuses(stores map[string]*storeRef) { - s.storesStatusesMtx.Lock() - defer s.storesStatusesMtx.Unlock() - - now := time.Now() - for addr, status := range s.storeStatuses { - if _, ok := stores[addr]; ok { - continue - } - - if now.Sub(status.LastCheck) >= s.unhealthyStoreTimeout { - delete(s.storeStatuses, addr) - } - } -} diff --git a/pkg/query/storeset_test.go b/pkg/query/storeset_test.go deleted file mode 100644 index a67f23fc26..0000000000 --- a/pkg/query/storeset_test.go +++ /dev/null @@ -1,1095 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package query - -import ( - "context" - "encoding/json" - "fmt" - "math" - "net" - "testing" - "time" - - "github.com/pkg/errors" - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/store" - "github.com/thanos-io/thanos/pkg/store/labelpb" - "github.com/thanos-io/thanos/pkg/store/storepb" - "github.com/thanos-io/thanos/pkg/testutil" -) - -var testGRPCOpts = []grpc.DialOption{ - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), - grpc.WithInsecure(), -} - -type mockedStore struct { - infoDelay time.Duration - info storepb.InfoResponse -} - -func (s *mockedStore) Info(ctx context.Context, r *storepb.InfoRequest) (*storepb.InfoResponse, error) { - if s.infoDelay > 0 { - time.Sleep(s.infoDelay) - } - return &s.info, nil -} - -func (s *mockedStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - return status.Error(codes.Unimplemented, "not implemented") -} - -func (s *mockedStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) ( - *storepb.LabelNamesResponse, error, -) { - return nil, status.Error(codes.Unimplemented, "not implemented") -} - -func (s *mockedStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) ( - *storepb.LabelValuesResponse, error, -) { - return nil, status.Error(codes.Unimplemented, "not implemented") -} - -type testStoreMeta struct { - extlsetFn func(addr string) []labelpb.ZLabelSet - storeType component.StoreAPI - minTime, maxTime int64 - infoDelay time.Duration -} - -type testStores struct { - srvs map[string]*grpc.Server - orderAddrs []string -} - -func startTestStores(storeMetas []testStoreMeta) (*testStores, error) { - st := &testStores{ - srvs: map[string]*grpc.Server{}, - } - - for _, meta := range storeMetas { - listener, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - // Close so far started servers. - st.Close() - return nil, err - } - - srv := grpc.NewServer() - - storeSrv := &mockedStore{ - info: storepb.InfoResponse{ - LabelSets: meta.extlsetFn(listener.Addr().String()), - MaxTime: meta.maxTime, - MinTime: meta.minTime, - }, - infoDelay: meta.infoDelay, - } - if meta.storeType != nil { - storeSrv.info.StoreType = meta.storeType.ToProto() - } - storepb.RegisterStoreServer(srv, storeSrv) - go func() { - _ = srv.Serve(listener) - }() - - st.srvs[listener.Addr().String()] = srv - st.orderAddrs = append(st.orderAddrs, listener.Addr().String()) - } - - return st, nil -} - -func (s *testStores) StoreAddresses() []string { - var stores []string - stores = append(stores, s.orderAddrs...) - return stores -} - -func (s *testStores) Close() { - for _, srv := range s.srvs { - srv.Stop() - } - s.srvs = nil -} - -func (s *testStores) CloseOne(addr string) { - srv, ok := s.srvs[addr] - if !ok { - return - } - - srv.Stop() - delete(s.srvs, addr) -} - -func TestStoreSet_Update(t *testing.T) { - stores, err := startTestStores([]testStoreMeta{ - { - storeType: component.Sidecar, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "addr", Value: addr}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "a", Value: "b"}, - }, - }, - } - }, - }, - { - storeType: component.Sidecar, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "addr", Value: addr}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "a", Value: "b"}, - }, - }, - } - }, - }, - { - storeType: component.Query, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "a", Value: "broken"}, - }, - }, - } - }, - }, - }) - testutil.Ok(t, err) - defer stores.Close() - - discoveredStoreAddr := stores.StoreAddresses() - - // Testing if duplicates can cause weird results. - discoveredStoreAddr = append(discoveredStoreAddr, discoveredStoreAddr[0]) - storeSet := NewStoreSet(nil, nil, "", - func() (specs []StoreSpec) { - for _, addr := range discoveredStoreAddr { - specs = append(specs, NewGRPCStoreSpec(addr, false)) - } - return specs - }, - func() (specs []RuleSpec) { - return nil - }, - func() (specs []TargetSpec) { - return nil - }, - func() (specs []MetadataSpec) { - return nil - }, - func() (specs []ExemplarSpec) { - return nil - }, - testGRPCOpts, time.Minute) - storeSet.gRPCInfoCallTimeout = 2 * time.Second - defer storeSet.Close() - - // Initial update. - storeSet.Update(context.Background()) - - // Start with one not available. - stores.CloseOne(discoveredStoreAddr[2]) - - // Should not matter how many of these we run. - storeSet.Update(context.Background()) - storeSet.Update(context.Background()) - testutil.Equals(t, 2, len(storeSet.stores)) - testutil.Equals(t, 3, len(storeSet.storeStatuses)) - - for addr, st := range storeSet.stores { - testutil.Equals(t, addr, st.addr) - - lset := st.LabelSets() - testutil.Equals(t, 2, len(lset)) - testutil.Equals(t, "addr", lset[0][0].Name) - testutil.Equals(t, addr, lset[0][0].Value) - testutil.Equals(t, "a", lset[1][0].Name) - testutil.Equals(t, "b", lset[1][0].Value) - } - - // Check stats. - expected := newStoreAPIStats() - expected[component.Sidecar] = map[string]int{ - fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[0]): 1, - fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[1]): 1, - } - testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) - - // Remove address from discovered and reset last check, which should ensure cleanup of status on next update. - storeSet.storeStatuses[discoveredStoreAddr[2]].LastCheck = time.Now().Add(-4 * time.Minute) - discoveredStoreAddr = discoveredStoreAddr[:len(discoveredStoreAddr)-2] - storeSet.Update(context.Background()) - testutil.Equals(t, 2, len(storeSet.storeStatuses)) - - stores.CloseOne(discoveredStoreAddr[0]) - delete(expected[component.Sidecar], fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[0])) - - // We expect Update to tear down store client for closed store server. - storeSet.Update(context.Background()) - testutil.Equals(t, 1, len(storeSet.stores), "only one service should respond just fine, so we expect one client to be ready.") - testutil.Equals(t, 2, len(storeSet.storeStatuses)) - - addr := discoveredStoreAddr[1] - st, ok := storeSet.stores[addr] - testutil.Assert(t, ok, "addr exist") - testutil.Equals(t, addr, st.addr) - - lset := st.LabelSets() - testutil.Equals(t, 2, len(lset)) - testutil.Equals(t, "addr", lset[0][0].Name) - testutil.Equals(t, addr, lset[0][0].Value) - testutil.Equals(t, "a", lset[1][0].Name) - testutil.Equals(t, "b", lset[1][0].Value) - testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) - - // New big batch of storeAPIs. - stores2, err := startTestStores([]testStoreMeta{ - { - storeType: component.Query, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "l3", Value: "v4"}, - }, - }, - } - }, - }, - { - // Duplicated Querier, in previous versions it would be deduplicated. Now it should be not. - storeType: component.Query, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "l3", Value: "v4"}, - }, - }, - } - }, - }, - { - storeType: component.Sidecar, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - } - }, - }, - { - // Duplicated Sidecar, in previous versions it would be deduplicated. Now it should be not. - storeType: component.Sidecar, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - } - }, - }, - { - // Querier that duplicates with sidecar, in previous versions it would be deduplicated. Now it should be not. - storeType: component.Query, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - } - }, - }, - { - // Ruler that duplicates with sidecar, in previous versions it would be deduplicated. Now it should be not. - // Warning should be produced. - storeType: component.Rule, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - } - }, - }, - { - // Duplicated Rule, in previous versions it would be deduplicated. Now it should be not. Warning should be produced. - storeType: component.Rule, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - } - }, - }, - { - // No storeType. - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "no-store-type"}, - {Name: "l2", Value: "v3"}, - }, - }, - } - }, - }, - // Two pre v0.8.0 store gateway nodes, they don't have ext labels set. - { - storeType: component.Store, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{} - }, - }, - { - storeType: component.Store, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{} - }, - }, - // Regression tests against https://github.com/thanos-io/thanos/issues/1632: From v0.8.0 stores advertise labels. - // If the object storage handled by store gateway has only one sidecar we used to hitting issue. - { - storeType: component.Store, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "l3", Value: "v4"}, - }, - }, - } - }, - }, - // Stores v0.8.1 has compatibility labels. Check if they are correctly removed. - { - storeType: component.Store, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "l3", Value: "v4"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: store.CompatibilityTypeLabelName, Value: "store"}, - }, - }, - } - }, - }, - // Duplicated store, in previous versions it would be deduplicated. Now it should be not. - { - storeType: component.Store, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - {Name: "l1", Value: "v2"}, - {Name: "l2", Value: "v3"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: "l3", Value: "v4"}, - }, - }, - { - Labels: []labelpb.ZLabel{ - {Name: store.CompatibilityTypeLabelName, Value: "store"}, - }, - }, - } - }, - }, - }) - testutil.Ok(t, err) - defer stores2.Close() - - discoveredStoreAddr = append(discoveredStoreAddr, stores2.StoreAddresses()...) - - // New stores should be loaded. - storeSet.Update(context.Background()) - testutil.Equals(t, 1+len(stores2.srvs), len(storeSet.stores)) - - // Check stats. - expected = newStoreAPIStats() - expected[component.UnknownStoreAPI] = map[string]int{ - "{l1=\"no-store-type\", l2=\"v3\"}": 1, - } - expected[component.Query] = map[string]int{ - "{l1=\"v2\", l2=\"v3\"}": 1, - "{l1=\"v2\", l2=\"v3\"},{l3=\"v4\"}": 2, - } - expected[component.Rule] = map[string]int{ - "{l1=\"v2\", l2=\"v3\"}": 2, - } - expected[component.Sidecar] = map[string]int{ - fmt.Sprintf("{a=\"b\"},{addr=\"%s\"}", discoveredStoreAddr[1]): 1, - "{l1=\"v2\", l2=\"v3\"}": 2, - } - expected[component.Store] = map[string]int{ - "": 2, - "{l1=\"v2\", l2=\"v3\"},{l3=\"v4\"}": 3, - } - testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) - - // Check statuses. - testutil.Equals(t, 2+len(stores2.srvs), len(storeSet.storeStatuses)) -} - -func TestStoreSet_Update_NoneAvailable(t *testing.T) { - st, err := startTestStores([]testStoreMeta{ - { - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - { - Name: "addr", - Value: addr, - }, - }, - }, - } - }, - storeType: component.Sidecar, - }, - { - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - { - Name: "addr", - Value: addr, - }, - }, - }, - } - }, - storeType: component.Sidecar, - }, - }) - testutil.Ok(t, err) - defer st.Close() - - initialStoreAddr := st.StoreAddresses() - st.CloseOne(initialStoreAddr[0]) - st.CloseOne(initialStoreAddr[1]) - - storeSet := NewStoreSet(nil, nil, "", - func() (specs []StoreSpec) { - for _, addr := range initialStoreAddr { - specs = append(specs, NewGRPCStoreSpec(addr, false)) - } - return specs - }, - func() (specs []RuleSpec) { return nil }, - func() (specs []TargetSpec) { return nil }, - func() (specs []MetadataSpec) { return nil }, - func() (specs []ExemplarSpec) { return nil }, - testGRPCOpts, time.Minute) - storeSet.gRPCInfoCallTimeout = 2 * time.Second - - // Should not matter how many of these we run. - storeSet.Update(context.Background()) - storeSet.Update(context.Background()) - testutil.Equals(t, 0, len(storeSet.stores), "none of services should respond just fine, so we expect no client to be ready.") - - // Leak test will ensure that we don't keep client connection around. - - expected := newStoreAPIStats() - testutil.Equals(t, expected, storeSet.storesMetric.storeNodes) -} - -// TestQuerierStrict tests what happens when the strict mode is enabled/disabled. -func TestQuerierStrict(t *testing.T) { - st, err := startTestStores([]testStoreMeta{ - { - minTime: 12345, - maxTime: 54321, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - { - Name: "addr", - Value: addr, - }, - }, - }, - } - }, - storeType: component.Sidecar, - }, - { - minTime: 66666, - maxTime: 77777, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - { - Name: "addr", - Value: addr, - }, - }, - }, - } - }, - storeType: component.Sidecar, - }, - // Slow store. - { - minTime: 65644, - maxTime: 77777, - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{ - { - Labels: []labelpb.ZLabel{ - { - Name: "addr", - Value: addr, - }, - }, - }, - } - }, - storeType: component.Sidecar, - infoDelay: 2 * time.Second, - }, - }) - - testutil.Ok(t, err) - defer st.Close() - - staticStoreAddr := st.StoreAddresses()[0] - slowStaticStoreAddr := st.StoreAddresses()[2] - storeSet := NewStoreSet(nil, nil, "", func() (specs []StoreSpec) { - return []StoreSpec{ - NewGRPCStoreSpec(st.StoreAddresses()[0], true), - NewGRPCStoreSpec(st.StoreAddresses()[1], false), - NewGRPCStoreSpec(st.StoreAddresses()[2], true), - } - }, func() []RuleSpec { - return nil - }, func() []TargetSpec { - return nil - }, func() (specs []MetadataSpec) { - return nil - }, func() []ExemplarSpec { - return nil - }, testGRPCOpts, time.Minute) - defer storeSet.Close() - storeSet.gRPCInfoCallTimeout = 1 * time.Second - - // Initial update. - storeSet.Update(context.Background()) - testutil.Equals(t, 3, len(storeSet.stores), "three clients must be available for running store nodes") - - // The store has not responded to the info call and is assumed to cover everything. - curMin, curMax := storeSet.stores[slowStaticStoreAddr].minTime, storeSet.stores[slowStaticStoreAddr].maxTime - testutil.Assert(t, storeSet.stores[slowStaticStoreAddr].cc.GetState().String() != "SHUTDOWN", "slow store's connection should not be closed") - testutil.Equals(t, int64(0), curMin) - testutil.Equals(t, int64(math.MaxInt64), curMax) - - // The store is statically defined + strict mode is enabled - // so its client + information must be retained. - curMin, curMax = storeSet.stores[staticStoreAddr].minTime, storeSet.stores[staticStoreAddr].maxTime - testutil.Equals(t, int64(12345), curMin, "got incorrect minimum time") - testutil.Equals(t, int64(54321), curMax, "got incorrect minimum time") - - // Successfully retrieve the information and observe minTime/maxTime updating. - storeSet.gRPCInfoCallTimeout = 3 * time.Second - storeSet.Update(context.Background()) - updatedCurMin, updatedCurMax := storeSet.stores[slowStaticStoreAddr].minTime, storeSet.stores[slowStaticStoreAddr].maxTime - testutil.Equals(t, int64(65644), updatedCurMin) - testutil.Equals(t, int64(77777), updatedCurMax) - storeSet.gRPCInfoCallTimeout = 1 * time.Second - - // Turn off the stores. - st.Close() - - // Update again many times. Should not matter WRT the static one. - storeSet.Update(context.Background()) - storeSet.Update(context.Background()) - storeSet.Update(context.Background()) - - // Check that the information is the same. - testutil.Equals(t, 2, len(storeSet.stores), "two static clients must remain available") - testutil.Equals(t, curMin, storeSet.stores[staticStoreAddr].minTime, "minimum time reported by the store node is different") - testutil.Equals(t, curMax, storeSet.stores[staticStoreAddr].maxTime, "minimum time reported by the store node is different") - testutil.NotOk(t, storeSet.storeStatuses[staticStoreAddr].LastError.originalErr) - - testutil.Equals(t, updatedCurMin, storeSet.stores[slowStaticStoreAddr].minTime, "minimum time reported by the store node is different") - testutil.Equals(t, updatedCurMax, storeSet.stores[slowStaticStoreAddr].maxTime, "minimum time reported by the store node is different") -} - -func TestStoreSet_Update_Rules(t *testing.T) { - stores, err := startTestStores([]testStoreMeta{ - { - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{} - }, - storeType: component.Sidecar, - }, - { - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{} - }, - storeType: component.Rule, - }, - }) - testutil.Ok(t, err) - defer stores.Close() - - for _, tc := range []struct { - name string - storeSpecs func() []StoreSpec - ruleSpecs func() []RuleSpec - exemplarSpecs func() []ExemplarSpec - expectedStores int - expectedRules int - }{ - { - name: "stores, no rules", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - NewGRPCStoreSpec(stores.orderAddrs[1], false), - } - }, - expectedStores: 2, - expectedRules: 0, - }, - { - name: "rules, no stores", - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedStores: 0, - expectedRules: 0, - }, - { - name: "one store, different rule", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[1], false), - } - }, - expectedStores: 1, - expectedRules: 0, - }, - { - name: "two stores, one rule", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - NewGRPCStoreSpec(stores.orderAddrs[1], false), - } - }, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedStores: 2, - expectedRules: 1, - }, - { - name: "two stores, two rules", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - NewGRPCStoreSpec(stores.orderAddrs[1], false), - } - }, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - NewGRPCStoreSpec(stores.orderAddrs[1], false), - } - }, - exemplarSpecs: func() []ExemplarSpec { - return []ExemplarSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - NewGRPCStoreSpec(stores.orderAddrs[1], false), - } - }, - expectedStores: 2, - expectedRules: 2, - }, - } { - storeSet := NewStoreSet(nil, nil, "", - tc.storeSpecs, - tc.ruleSpecs, - func() []TargetSpec { return nil }, - func() []MetadataSpec { return nil }, - tc.exemplarSpecs, - testGRPCOpts, time.Minute) - - t.Run(tc.name, func(t *testing.T) { - defer storeSet.Close() - storeSet.Update(context.Background()) - testutil.Equals(t, tc.expectedStores, len(storeSet.stores)) - - gotRules := 0 - for _, ref := range storeSet.stores { - if ref.HasRulesAPI() { - gotRules += 1 - } - } - - testutil.Equals(t, tc.expectedRules, gotRules) - }) - } -} - -func TestStoreSet_Rules_Discovery(t *testing.T) { - stores, err := startTestStores([]testStoreMeta{ - { - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{} - }, - storeType: component.Sidecar, - }, - { - extlsetFn: func(addr string) []labelpb.ZLabelSet { - return []labelpb.ZLabelSet{} - }, - storeType: component.Rule, - }, - }) - testutil.Ok(t, err) - defer stores.Close() - - type discoveryState struct { - name string - storeSpecs func() []StoreSpec - ruleSpecs func() []RuleSpec - expectedStores int - expectedRules int - } - - for _, tc := range []struct { - states []discoveryState - name string - }{ - { - name: "StoreAPI and RulesAPI concurrent discovery", - states: []discoveryState{ - { - name: "no stores", - storeSpecs: nil, - ruleSpecs: nil, - expectedRules: 0, - expectedStores: 0, - }, - { - name: "RulesAPI discovered", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedRules: 1, - expectedStores: 1, - }, - }, - }, - - { - name: "StoreAPI discovery first, eventually discovered RulesAPI", - states: []discoveryState{ - { - name: "no stores", - storeSpecs: nil, - ruleSpecs: nil, - expectedRules: 0, - expectedStores: 0, - }, - { - name: "StoreAPI discovered, no RulesAPI discovered", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedStores: 1, - expectedRules: 0, - }, - { - name: "RulesAPI discovered", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedStores: 1, - expectedRules: 1, - }, - }, - }, - - { - name: "RulesAPI discovery first, eventually discovered StoreAPI", - states: []discoveryState{ - { - name: "no stores", - storeSpecs: nil, - ruleSpecs: nil, - expectedRules: 0, - expectedStores: 0, - }, - { - name: "RulesAPI discovered, no StoreAPI discovered", - storeSpecs: nil, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedStores: 0, - expectedRules: 0, - }, - { - name: "StoreAPI discovered", - storeSpecs: func() []StoreSpec { - return []StoreSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - ruleSpecs: func() []RuleSpec { - return []RuleSpec{ - NewGRPCStoreSpec(stores.orderAddrs[0], false), - } - }, - expectedStores: 1, - expectedRules: 1, - }, - }, - }, - } { - t.Run(tc.name, func(t *testing.T) { - currentState := 0 - - storeSet := NewStoreSet(nil, nil, "", - func() []StoreSpec { - if tc.states[currentState].storeSpecs == nil { - return nil - } - - return tc.states[currentState].storeSpecs() - }, - func() []RuleSpec { - if tc.states[currentState].ruleSpecs == nil { - return nil - } - - return tc.states[currentState].ruleSpecs() - }, - func() []TargetSpec { return nil }, - func() []MetadataSpec { - return nil - }, - func() []ExemplarSpec { return nil }, - testGRPCOpts, time.Minute) - - defer storeSet.Close() - - for { - storeSet.Update(context.Background()) - testutil.Equals( - t, - tc.states[currentState].expectedStores, - len(storeSet.stores), - "unexepected discovered stores in state %q", - tc.states[currentState].name, - ) - - gotRules := 0 - for _, ref := range storeSet.stores { - if ref.HasRulesAPI() { - gotRules += 1 - } - } - testutil.Equals( - t, - tc.states[currentState].expectedRules, - gotRules, - "unexpected discovered rules in state %q", - tc.states[currentState].name, - ) - - currentState = currentState + 1 - if len(tc.states) == currentState { - break - } - } - }) - } -} - -type errThatMarshalsToEmptyDict struct { - msg string -} - -// MarshalJSON marshals the error and returns and empty dict, not the error string. -func (e *errThatMarshalsToEmptyDict) MarshalJSON() ([]byte, error) { - return json.Marshal(map[string]string{}) -} - -// Error returns the original, underlying string. -func (e *errThatMarshalsToEmptyDict) Error() string { - return e.msg -} - -// Test highlights that without wrapping the error, it is marshaled to empty dict {}, not its message. -func TestStringError(t *testing.T) { - dictErr := &errThatMarshalsToEmptyDict{msg: "Error message"} - stringErr := &stringError{originalErr: dictErr} - - storestatusMock := map[string]error{} - storestatusMock["dictErr"] = dictErr - storestatusMock["stringErr"] = stringErr - - b, err := json.Marshal(storestatusMock) - - testutil.Ok(t, err) - testutil.Equals(t, []byte(`{"dictErr":{},"stringErr":"Error message"}`), b, "expected to get proper results") -} - -// Errors that usually marshal to empty dict should return the original error string. -func TestUpdateStoreStateLastError(t *testing.T) { - tcs := []struct { - InputError error - ExpectedLastErr string - }{ - {errors.New("normal_err"), `"normal_err"`}, - {nil, `null`}, - {&errThatMarshalsToEmptyDict{"the error message"}, `"the error message"`}, - } - - for _, tc := range tcs { - mockStoreSet := &StoreSet{ - storeStatuses: map[string]*StoreStatus{}, - } - mockStoreRef := &storeRef{ - addr: "mockedStore", - } - - mockStoreSet.updateStoreStatus(mockStoreRef, tc.InputError) - - b, err := json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) - testutil.Ok(t, err) - testutil.Equals(t, tc.ExpectedLastErr, string(b)) - } -} - -func TestUpdateStoreStateForgetsPreviousErrors(t *testing.T) { - mockStoreSet := &StoreSet{ - storeStatuses: map[string]*StoreStatus{}, - } - mockStoreRef := &storeRef{ - addr: "mockedStore", - } - - mockStoreSet.updateStoreStatus(mockStoreRef, errors.New("test err")) - - b, err := json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) - testutil.Ok(t, err) - testutil.Equals(t, `"test err"`, string(b)) - - // updating status without and error should clear the previous one. - mockStoreSet.updateStoreStatus(mockStoreRef, nil) - - b, err = json.Marshal(mockStoreSet.storeStatuses["mockedStore"].LastError) - testutil.Ok(t, err) - testutil.Equals(t, `null`, string(b)) -} diff --git a/pkg/ui/query.go b/pkg/ui/query.go index 577ff34cb2..ce335e735f 100644 --- a/pkg/ui/query.go +++ b/pkg/ui/query.go @@ -22,11 +22,7 @@ import ( type Query struct { *BaseUI -<<<<<<< HEAD endpointSet []*query.EndpointSet -======= - storeSets []*query.StoreSet ->>>>>>> addressed comments for querier externalPrefix, prefixHeader string @@ -36,11 +32,7 @@ type Query struct { now func() model.Time } -<<<<<<< HEAD func NewQueryUI(logger log.Logger, endpointSet []*query.EndpointSet, externalPrefix, prefixHeader string) *Query { -======= -func NewQueryUI(logger log.Logger, storeSets []*query.StoreSet, externalPrefix, prefixHeader string) *Query { ->>>>>>> addressed comments for querier tmplVariables := map[string]string{ "Component": component.Query.String(), } @@ -51,11 +43,7 @@ func NewQueryUI(logger log.Logger, storeSets []*query.StoreSet, externalPrefix, return &Query{ BaseUI: NewBaseUI(logger, "query_menu.html", tmplFuncs, tmplVariables, externalPrefix, prefixHeader, component.Query), -<<<<<<< HEAD endpointSet: endpointSet, -======= - storeSets: storeSets, ->>>>>>> addressed comments for querier externalPrefix: externalPrefix, prefixHeader: prefixHeader, cwd: runtimeInfo().CWD, @@ -123,17 +111,11 @@ func (q *Query) status(w http.ResponseWriter, r *http.Request) { func (q *Query) stores(w http.ResponseWriter, r *http.Request) { prefix := GetWebPrefix(q.logger, q.externalPrefix, q.prefixHeader, r) -<<<<<<< HEAD statuses := make(map[component.Component][]query.EndpointStatus) - for _, status := range q.endpointSet.GetEndpointStatus() { - statuses[status.ComponentType] = append(statuses[status.ComponentType], status) -======= - statuses := make(map[component.StoreAPI][]query.StoreStatus) - for _, storesSet := range q.storeSets { - for _, status := range storesSet.GetStoreStatus() { - statuses[status.StoreType] = append(statuses[status.StoreType], status) + for _, endpointSet := range q.endpointSet { + for _, status := range endpointSet.GetEndpointStatus() { + statuses[status.ComponentType] = append(statuses[status.ComponentType], status) } ->>>>>>> addressed comments for querier } sources := make([]component.Component, 0, len(statuses)) From 2bd35ca6c066513e245959874960de4ff3718f71 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Fri, 1 Oct 2021 19:02:37 +0530 Subject: [PATCH 22/29] removing errors-2 Signed-off-by: Namanl2001 --- pkg/query/endpointset_test.go | 8 ++--- test/e2e/e2ethanos/services.go | 48 ++++--------------------- test/e2e/query_frontend_test.go | 4 +-- test/e2e/query_test.go | 63 ++++++++++++++++++--------------- 4 files changed, 47 insertions(+), 76 deletions(-) diff --git a/pkg/query/endpointset_test.go b/pkg/query/endpointset_test.go index 5dc7eefa45..d941e59d69 100644 --- a/pkg/query/endpointset_test.go +++ b/pkg/query/endpointset_test.go @@ -300,7 +300,7 @@ func TestEndpointSet_Update(t *testing.T) { // Testing if duplicates can cause weird results. discoveredEndpointAddr = append(discoveredEndpointAddr, discoveredEndpointAddr[0]) - endpointSet := NewEndpointSet(nil, nil, + endpointSet := NewEndpointSet(nil, nil, "", func() (specs []EndpointSpec) { for _, addr := range discoveredEndpointAddr { specs = append(specs, NewGRPCEndpointSpec(addr, false)) @@ -683,7 +683,7 @@ func TestEndpointSet_Update_NoneAvailable(t *testing.T) { endpoints.CloseOne(initialEndpointAddr[0]) endpoints.CloseOne(initialEndpointAddr[1]) - endpointSet := NewEndpointSet(nil, nil, + endpointSet := NewEndpointSet(nil, nil, "", func() (specs []EndpointSpec) { for _, addr := range initialEndpointAddr { specs = append(specs, NewGRPCEndpointSpec(addr, false)) @@ -801,7 +801,7 @@ func TestEndpoint_Update_QuerierStrict(t *testing.T) { staticEndpointAddr := discoveredEndpointAddr[0] slowStaticEndpointAddr := discoveredEndpointAddr[2] - endpointSet := NewEndpointSet(nil, nil, func() (specs []EndpointSpec) { + endpointSet := NewEndpointSet(nil, nil, "", func() (specs []EndpointSpec) { return []EndpointSpec{ NewGRPCEndpointSpec(discoveredEndpointAddr[0], true), NewGRPCEndpointSpec(discoveredEndpointAddr[1], false), @@ -977,7 +977,7 @@ func TestEndpointSet_APIs_Discovery(t *testing.T) { t.Run(tc.name, func(t *testing.T) { currentState := 0 - endpointSet := NewEndpointSet(nil, nil, + endpointSet := NewEndpointSet(nil, nil, "", func() []EndpointSpec { if tc.states[currentState].endpointSpec == nil { return nil diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 0306621e10..899f9b84e1 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -101,11 +101,11 @@ func NewPrometheus(e e2e.Environment, name, config, promImage string, enableFeat return prom, container, nil } -func NewPrometheusWithSidecar(e e2e.Environment, name, config, promImage string, enableFeatures ...string) (*e2e.InstrumentedRunnable, *e2e.InstrumentedRunnable, error) { - return NewPrometheusWithSidecarCustomImage(e, name, config, promImage, DefaultImage(), enableFeatures...) +func NewPrometheusWithSidecar(e e2e.Environment, name, config, promImage string, extraArgs []string, enableFeatures ...string) (*e2e.InstrumentedRunnable, *e2e.InstrumentedRunnable, error) { + return NewPrometheusWithSidecarCustomImage(e, name, config, promImage, extraArgs, DefaultImage(), enableFeatures...) } -func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promImage string, sidecarImage string, enableFeatures ...string) (*e2e.InstrumentedRunnable, *e2e.InstrumentedRunnable, error) { +func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promImage string, extraArgs []string, sidecarImage string, enableFeatures ...string) (*e2e.InstrumentedRunnable, *e2e.InstrumentedRunnable, error) { prom, dataDir, err := NewPrometheus(e, name, config, promImage, enableFeatures...) if err != nil { return nil, nil, err @@ -116,7 +116,7 @@ func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promIm "--grpc-address": ":9091", "--grpc-grace-period": "0s", "--http-address": ":8080", - "--prometheus.url": "http://" + prom.NetworkEndpointFor(netName, 9090), + "--prometheus.url": "http://" + prom.InternalEndpoint("http"), "--tsdb.path": dataDir, "--log.level": infoLogLevel, }) @@ -128,7 +128,7 @@ func NewPrometheusWithSidecarCustomImage(e e2e.Environment, name, config, promIm fmt.Sprintf("sidecar-%s", name), DefaultImage(), e2e.NewCommand("sidecar", args...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), + e2e.NewHTTPReadinessProbe("http", "/-/ready", 200, 200), 8080, 9091, ) @@ -255,13 +255,12 @@ func (q *QuerierBuilder) Build() (*e2e.InstrumentedRunnable, error) { return querier, nil } -func (q *QuerierBuilder) collectArgs() ([]string, error) { func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []store.Config) *QuerierBuilder { q.endpointConfig = endpointConfig return q } -func (q *QuerierBuilder) Build() (*Service, error) { +func (q *QuerierBuilder) collectArgs() ([]string, error) { const replicaLabel = "replica" args := e2e.BuildArgs(map[string]string{ @@ -295,28 +294,6 @@ func (q *QuerierBuilder) Build() (*Service, error) { args = append(args, "--exemplar="+addr) } - if len(q.fileSDStoreAddresses) > 0 { - queryFileSDDir := filepath.Join(q.sharedDir, "data", "querier", q.name) - container := filepath.Join(ContainerSharedDir, "data", "querier", q.name) - if err := os.MkdirAll(queryFileSDDir, 0750); err != nil { - return nil, errors.Wrap(err, "create query dir failed") - } - - fileSD := []*targetgroup.Group{{}} - for _, a := range q.fileSDStoreAddresses { - fileSD[0].Targets = append(fileSD[0].Targets, model.LabelSet{model.AddressLabel: model.LabelValue(a)}) - } - - b, err := yaml.Marshal(fileSD) - if err != nil { - return nil, err - } - - if err := ioutil.WriteFile(queryFileSDDir+"/filesd.yaml", b, 0600); err != nil { - return nil, errors.Wrap(err, "creating query SD config failed") - } - - args = append(args, "--store.sd-files="+filepath.Join(container, "filesd.yaml")) if q.fileSDPath != "" { args = append(args, "--store.sd-files="+q.fileSDPath) } @@ -341,18 +318,7 @@ func (q *QuerierBuilder) Build() (*Service, error) { args = append(args, "--endpoint.config="+string(endpointCfgBytes)) } - querier := NewService( - fmt.Sprintf("querier-%v", q.name), - DefaultImage(), - e2e.NewCommand("query", args...), - e2e.NewHTTPReadinessProbe(8080, "/-/ready", 200, 200), - 8080, - 9091, - ) - querier.SetUser(strconv.Itoa(os.Getuid())) - querier.SetBackoff(defaultBackoffConfig) - - return querier, nil + return args, nil } func RemoteWriteEndpoint(addr string) string { return fmt.Sprintf("http://%s/api/v1/receive", addr) } diff --git a/test/e2e/query_frontend_test.go b/test/e2e/query_frontend_test.go index 6635555ed0..d95495d387 100644 --- a/test/e2e/query_frontend_test.go +++ b/test/e2e/query_frontend_test.go @@ -32,7 +32,7 @@ func TestQueryFrontend(t *testing.T) { now := time.Now() - prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom, sidecar)) @@ -396,7 +396,7 @@ func TestQueryFrontendMemcachedCache(t *testing.T) { now := time.Now() - prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom, sidecar, err := e2ethanos.NewPrometheusWithSidecar(e, "1", defaultPromConfig("test", 0, "", ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom, sidecar)) diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 00087d803f..b8512bd13c 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -42,7 +42,10 @@ import ( ) // NOTE: by using aggregation all results are now unsorted. -const queryUpWithoutInstance = "sum(up) without (instance)" +const ( + queryUpWithoutInstance = "sum(up) without (instance)" + ContainerSharedDir = "/shared" +) // defaultPromConfig returns Prometheus config that sets Prometheus to: // * expose 2 external labels, source and replica. @@ -103,7 +106,7 @@ func sortResults(res model.Vector) { func createSDFile(sharedDir string, name string, fileSDStoreAddresses []string) (string, error) { if len(fileSDStoreAddresses) > 0 { queryFileSDDir := filepath.Join(sharedDir, "data", "querier", name) - container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", name) + container := filepath.Join(ContainerSharedDir, "data", "querier", name) if err := os.MkdirAll(queryFileSDDir, 0750); err != nil { return "", errors.Wrap(err, "create query dir failed") } @@ -137,31 +140,31 @@ func TestQuery(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) - prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(e, "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) + prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(e, "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) - prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(e, "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage()) + prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(e, "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) - fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) + fileSDPath, err := createSDFile(e.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) // Querier. Both fileSD and directly by flags. - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", []string{sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")}). + q, err := e2ethanos.NewQuerierBuilder(e, "1", sidecar1.InternalEndpoint("grpc"), sidecar2.InternalEndpoint("grpc"), receiver.InternalEndpoint("grpc")). WithFileSDStoreAddresses(fileSDPath).Build() testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(q)) + testutil.Ok(t, e2e.StartAndWaitReady(q)) ctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute) t.Cleanup(cancel) - testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(5), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics)) + testutil.Ok(t, q.WaitSumMetricsWithOptions(e2e.Equals(5), []string{"thanos_store_nodes_grpc_connections"}, e2e.WaitMissingMetrics())) - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: false, }, []model.Metric{ { @@ -194,7 +197,7 @@ func TestQuery(t *testing.T) { }) // With deduplication. - queryAndAssertSeries(t, ctx, q.HTTPEndpoint(), queryUpWithoutInstance, promclient.QueryOptions{ + queryAndAssertSeries(t, ctx, q.Endpoint("http"), queryUpWithoutInstance, promclient.QueryOptions{ Deduplicate: true, }, []model.Metric{ { @@ -221,16 +224,17 @@ func TestQuery(t *testing.T) { func TestQueryWithEndpointConfig(t *testing.T) { t.Parallel() - s, err := e2e.NewScenario("e2e_test_query_config") + e, err := e2e.NewDockerEnvironment("e2e_test_query") testutil.Ok(t, err) - t.Cleanup(e2ethanos.CleanScenario(t, s)) + t.Cleanup(e2ethanos.CleanScenario(t, e)) - receiver, err := e2ethanos.NewRoutingAndIngestingReceiver(s.SharedDir(), s.NetworkName(), "1", 1) + receiver := e2ethanos.NewUninitiatedReceiver(e, "1") + receiverRunnable, err := e2ethanos.NewRoutingAndIngestingReceiverFromService(receiver, e.SharedDir(), 1) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(receiver)) + testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - queryFileSDDir := filepath.Join(s.SharedDir(), "data", "querier", "1") - container := filepath.Join(e2e.ContainerSharedDir, "data", "querier", "1") + queryFileSDDir := filepath.Join(e.SharedDir(), "data", "querier", "1") + container := filepath.Join(ContainerSharedDir, "data", "querier", "1") err = os.MkdirAll(queryFileSDDir, 0750) testutil.Ok(t, err) @@ -246,17 +250,17 @@ func TestQueryWithEndpointConfig(t *testing.T) { "--grpc-server-tls-client-ca": filepath.Join(container, "testca.crt"), }) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.NetworkEndpoint(8081)), ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) - prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) + prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(e, "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) - prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(s.SharedDir(), "e2e_test_query_config", "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2e.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) + prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(e, "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) - testutil.Ok(t, s.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) + testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) - fileSDPath, err := createSDFile(s.SharedDir(), "1", []string{sidecar3.GRPCNetworkEndpoint(), sidecar4.GRPCNetworkEndpoint()}) + fileSDPath, err := createSDFile(e.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) endpointConfig := []store.Config{ @@ -282,7 +286,7 @@ func TestQueryWithEndpointConfig(t *testing.T) { }, } - q, err := e2ethanos.NewQuerierBuilder(s.SharedDir(), "1", nil).WithEndpointConfig(endpointConfig).Build() + q, err := e2ethanos.NewQuerierBuilder(e, "1").WithEndpointConfig(endpointConfig).Build() testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(q)) @@ -424,9 +428,9 @@ func TestQueryLabelNames(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) @@ -476,9 +480,9 @@ func TestQueryLabelValues(t *testing.T) { testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(receiverRunnable)) - prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage()) + prom1, sidecar1, err := e2ethanos.NewPrometheusWithSidecar(e, "alone", defaultPromConfig("prom-alone", 0, "", ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) - prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage()) + prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2)) @@ -548,6 +552,7 @@ func TestQueryCompatibilityWithPreInfoAPI(t *testing.T) { "p1", defaultPromConfig("p1", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, promRulesSubDir, "*.yaml"), "localhost:9090", qUninit.InternalEndpoint("http")), e2ethanos.DefaultPrometheusImage(), + nil, tcase.sidecarImage, e2ethanos.FeatureExemplarStorage, ) From 94a2f17d2f8d95edebcaf9a9ed493cd373ca4c6d Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Fri, 1 Oct 2021 19:18:38 +0530 Subject: [PATCH 23/29] removing errors-3 Signed-off-by: Namanl2001 --- test/e2e/query_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index b8512bd13c..dfdaae4d42 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -224,7 +224,7 @@ func TestQuery(t *testing.T) { func TestQueryWithEndpointConfig(t *testing.T) { t.Parallel() - e, err := e2e.NewDockerEnvironment("e2e_test_query") + e, err := e2e.NewDockerEnvironment("e2e_test_query_config") testutil.Ok(t, err) t.Cleanup(e2ethanos.CleanScenario(t, e)) From 61d2dc5512aa4c099afa78f6a0bc184330167d5a Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Fri, 1 Oct 2021 20:10:25 +0530 Subject: [PATCH 24/29] removing errors-4 Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 25 ++++++++++++++----------- test/e2e/query_test.go | 4 ++-- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 4ef009a01a..79e246e008 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -422,6 +422,17 @@ func runQuery( ) var storeSets []*query.EndpointSet + var engineOpts = promql.EngineOpts{ + Logger: logger, + Reg: reg, + // TODO(bwplotka): Expose this as a flag: https://github.com/thanos-io/thanos/issues/703. + MaxSamples: math.MaxInt32, + Timeout: queryTimeout, + LookbackDelta: lookbackDelta, + NoStepSubqueryIntervalFn: func(int64) int64 { + return defaultEvaluationInterval.Milliseconds() + }, + } for _, config := range endpointConfig { secure = (config.TLSConfig != store.TLSConfiguration{}) dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) @@ -504,6 +515,9 @@ func runQuery( }) } + engineOpts.EnableAtModifier = enableAtModifier + engineOpts.EnableNegativeOffset = enableNegativeOffset + ctxUpdate, cancelUpdate := context.WithCancel(context.Background()) staticAddresses := config.Endpoints g.Add(func() error { @@ -626,17 +640,6 @@ func runQuery( maxConcurrentSelects, queryTimeout, ) - engineOpts = promql.EngineOpts{ - Logger: logger, - Reg: reg, - // TODO(bwplotka): Expose this as a flag: https://github.com/thanos-io/thanos/issues/703. - MaxSamples: math.MaxInt32, - Timeout: queryTimeout, - LookbackDelta: lookbackDelta, - NoStepSubqueryIntervalFn: func(int64) int64 { - return defaultEvaluationInterval.Milliseconds() - }, - } grpcProbe = prober.NewGRPC() httpProbe = prober.NewHTTP() statusProber = prober.Combine( diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index dfdaae4d42..2a702a9ae8 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -175,7 +175,7 @@ func TestQuery(t *testing.T) { { "job": "myself", "prometheus": "prom-both-remote-write-and-sidecar", - "receive": "1", + "receive": "receive-1", "replica": "1234", "tenant_id": "default-tenant", }, @@ -207,7 +207,7 @@ func TestQuery(t *testing.T) { { "job": "myself", "prometheus": "prom-both-remote-write-and-sidecar", - "receive": "1", + "receive": "receive-1", "tenant_id": "default-tenant", }, { From 81a5b8d281662035530ad430eba2ae69d365ab4e Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Fri, 1 Oct 2021 23:17:35 +0530 Subject: [PATCH 25/29] removing errors-5 Signed-off-by: Namanl2001 --- test/e2e/query_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 2a702a9ae8..04f8b6a3c1 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -254,9 +254,9 @@ func TestQueryWithEndpointConfig(t *testing.T) { testutil.Ok(t, err) prom2, sidecar2, err := e2ethanos.NewPrometheusWithSidecar(e, "remote-and-sidecar", defaultPromConfig("prom-both-remote-write-and-sidecar", 1234, e2ethanos.RemoteWriteEndpoint(receiver.InternalEndpoint("remote-write")), ""), e2ethanos.DefaultPrometheusImage(), tlsConfig) testutil.Ok(t, err) - prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(e, "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), tlsConfig) + prom3, sidecar3, err := e2ethanos.NewPrometheusWithSidecar(e, "ha1", defaultPromConfig("prom-ha", 0, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) - prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(e, "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), tlsConfig) + prom4, sidecar4, err := e2ethanos.NewPrometheusWithSidecar(e, "ha2", defaultPromConfig("prom-ha", 1, "", filepath.Join(e2ethanos.ContainerSharedDir, "", "*.yaml")), e2ethanos.DefaultPrometheusImage(), nil) testutil.Ok(t, err) testutil.Ok(t, e2e.StartAndWaitReady(prom1, sidecar1, prom2, sidecar2, prom3, sidecar3, prom4, sidecar4)) From 88fef2a4dd445aa0693421457c2f3d380e1dfcfd Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 6 Oct 2021 00:34:34 +0530 Subject: [PATCH 26/29] =?UTF-8?q?=C2=83pkg/store/config.go=20->=20pkg/quer?= =?UTF-8?q?y/config.go?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Namanl2001 --- pkg/{store => query}/config.go | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename pkg/{store => query}/config.go (100%) diff --git a/pkg/store/config.go b/pkg/query/config.go similarity index 100% rename from pkg/store/config.go rename to pkg/query/config.go From 4d9cc57abede670c96941dd712971b5e48214356 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 6 Oct 2021 00:52:25 +0530 Subject: [PATCH 27/29] store -> query Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 12 ++++++------ cmd/thanos/receive.go | 3 ++- pkg/extgrpc/client.go | 4 ++-- pkg/query/config.go | 2 +- test/e2e/e2ethanos/services.go | 5 +++-- test/e2e/query_test.go | 6 +++--- 6 files changed, 17 insertions(+), 15 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 79e246e008..a2c4eda0fd 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -375,23 +375,23 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - var endpointConfig []store.Config + var endpointConfig []query.Config var err error if len(endpointConfigYAML) > 0 { - endpointConfig, err = store.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig) + endpointConfig, err = query.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig) if err != nil { return errors.Wrap(err, "loading endpoint config") } } else { // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. - var TLSConfig store.TLSConfiguration + var TLSConfig query.TLSConfiguration if secure { TLSConfig.CertFile = cert TLSConfig.KeyFile = key TLSConfig.CaCertFile = caCert TLSConfig.ServerName = serverName } - endpointConfig, err = store.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) + endpointConfig, err = query.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) if err != nil { return errors.Wrap(err, "initializing endpoint config from individual flags") } @@ -434,7 +434,7 @@ func runQuery( }, } for _, config := range endpointConfig { - secure = (config.TLSConfig != store.TLSConfiguration{}) + secure = (config.TLSConfig != query.TLSConfiguration{}) dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) if err != nil { return errors.Wrap(err, "building gRPC client") @@ -453,7 +453,7 @@ func runQuery( var spec []query.EndpointSpec // Add strict & static nodes. - if config.Mode == store.StrictEndpointMode { + if config.Mode == query.StrictEndpointMode { for _, addr := range config.Endpoints { if dns.IsDynamicNode(addr) { return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", addr) diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 1de31917f3..0ce01dcc59 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -35,6 +35,7 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" + "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/receive" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" @@ -120,7 +121,7 @@ func runReceive( return err } - TLSConfig := store.TLSConfiguration{ + TLSConfig := query.TLSConfiguration{ CertFile: conf.rwClientCert, KeyFile: conf.rwClientKey, CaCertFile: conf.rwClientServerCA, diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go index 2efecd127e..9272d4a348 100644 --- a/pkg/extgrpc/client.go +++ b/pkg/extgrpc/client.go @@ -15,13 +15,13 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/credentials" - "github.com/thanos-io/thanos/pkg/store" + "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/tls" "github.com/thanos-io/thanos/pkg/tracing" ) // StoreClientGRPCOpts creates gRPC dial options for connecting to a store client. -func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, clientInstance string, secure, skipVerify bool, tlsConfig store.TLSConfiguration) ([]grpc.DialOption, error) { +func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, clientInstance string, secure, skipVerify bool, tlsConfig query.TLSConfiguration) ([]grpc.DialOption, error) { if clientInstance == "" { clientInstance = "default" } diff --git a/pkg/query/config.go b/pkg/query/config.go index 113f28a7b7..4eaf226b1e 100644 --- a/pkg/query/config.go +++ b/pkg/query/config.go @@ -1,7 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -package store +package query import ( "gopkg.in/yaml.v2" diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 899f9b84e1..95c509495c 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -23,6 +23,7 @@ import ( "github.com/thanos-io/thanos/pkg/alert" "github.com/thanos-io/thanos/pkg/objstore/client" + "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/queryfrontend" "github.com/thanos-io/thanos/pkg/receive" ) @@ -151,7 +152,7 @@ type QuerierBuilder struct { targetAddresses []string exemplarAddresses []string - endpointConfig []store.Config + endpointConfig []query.Config tracingConfig string } @@ -255,7 +256,7 @@ func (q *QuerierBuilder) Build() (*e2e.InstrumentedRunnable, error) { return querier, nil } -func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []store.Config) *QuerierBuilder { +func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []query.Config) *QuerierBuilder { q.endpointConfig = endpointConfig return q } diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 04f8b6a3c1..0d15f83f40 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -35,8 +35,8 @@ import ( "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" "github.com/thanos-io/thanos/pkg/promclient" + "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/runutil" - "github.com/thanos-io/thanos/pkg/store" "github.com/thanos-io/thanos/pkg/testutil" "github.com/thanos-io/thanos/test/e2e/e2ethanos" ) @@ -263,10 +263,10 @@ func TestQueryWithEndpointConfig(t *testing.T) { fileSDPath, err := createSDFile(e.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) - endpointConfig := []store.Config{ + endpointConfig := []query.Config{ { Name: "withTLS", - TLSConfig: store.TLSConfiguration{ + TLSConfig: query.TLSConfiguration{ CertFile: filepath.Join(container, "e2e_test_query_config_client.crt"), KeyFile: filepath.Join(container, "testclient.key"), CaCertFile: filepath.Join(container, "testca.crt"), From f19a67b371d40821c91d2902898abcd6f45293d9 Mon Sep 17 00:00:00 2001 From: Namanl2001 Date: Wed, 6 Oct 2021 02:09:30 +0530 Subject: [PATCH 28/29] single func for loading endpoint config Signed-off-by: Namanl2001 --- cmd/thanos/query.go | 31 +++++++++-------------- pkg/query/config.go | 60 ++++++++++++++------------------------------- 2 files changed, 31 insertions(+), 60 deletions(-) diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index a2c4eda0fd..7f8482d5f4 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -375,27 +375,20 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - var endpointConfig []query.Config - var err error - if len(endpointConfigYAML) > 0 { - endpointConfig, err = query.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig) - if err != nil { - return errors.Wrap(err, "loading endpoint config") - } - } else { - // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. - var TLSConfig query.TLSConfiguration - if secure { - TLSConfig.CertFile = cert - TLSConfig.KeyFile = key - TLSConfig.CaCertFile = caCert - TLSConfig.ServerName = serverName - } - endpointConfig, err = query.NewConfig(storeAddrs, strictStores, fileSDConfig, TLSConfig) - if err != nil { - return errors.Wrap(err, "initializing endpoint config from individual flags") + // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. + var TLSConfig query.TLSConfiguration + if secure { + TLSConfig = query.TLSConfiguration{ + CertFile: cert, + KeyFile: key, + CaCertFile: caCert, + ServerName: serverName, } } + endpointConfig, err := query.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig, TLSConfig) + if err != nil { + return errors.Wrap(err, "loading endpoint config") + } dnsRuleProvider := dns.NewProvider( logger, diff --git a/pkg/query/config.go b/pkg/query/config.go index 4eaf226b1e..d9340e62b3 100644 --- a/pkg/query/config.go +++ b/pkg/query/config.go @@ -40,57 +40,34 @@ const ( StrictEndpointMode EndpointMode = "strict" ) -// NewConfig returns list of per-endpoint TLS config from individual flags. -func NewConfig(endpointAddrs, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { - var endpointConfig []Config - - // Adding --endpoint, --endpoint.sd-files to []endpointConfig, if provided. - if len(endpointAddrs) > 0 || fileSDConfig != nil { - cfg := Config{} - cfg.TLSConfig = TLSConfig - cfg.Endpoints = endpointAddrs - if fileSDConfig != nil { - cfg.EndpointsSD = []file.SDConfig{*fileSDConfig} - } - endpointConfig = append(endpointConfig, cfg) - } - - // Adding --endpoint-strict endpoints if provided. - if len(strictEndpointAddrs) > 0 { - cfg := Config{} - cfg.TLSConfig = TLSConfig - cfg.Endpoints = strictEndpointAddrs - cfg.Mode = StrictEndpointMode - endpointConfig = append(endpointConfig, cfg) - } - return endpointConfig, nil -} - // LoadConfig returns list of per-endpoint TLS config. -func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, fileSDConfig *file.SDConfig) ([]Config, error) { +func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { var endpointConfig []Config - if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { - return nil, err - } + if len(confYAML) > 0 { + if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { + return nil, err + } - // Checking if wrong mode is provided. - for _, config := range endpointConfig { - if config.Mode != StrictEndpointMode && config.Mode != DefaultEndpointMode { - return nil, errors.Errorf("%s is wrong mode", config.Mode) + // Checking if wrong mode is provided. + for _, config := range endpointConfig { + if config.Mode != StrictEndpointMode && config.Mode != DefaultEndpointMode { + return nil, errors.Errorf("%s is wrong mode", config.Mode) + } } - } - // No dynamic endpoints in strict mode. - for _, config := range endpointConfig { - if config.Mode == StrictEndpointMode && len(config.EndpointsSD) != 0 { - return nil, errors.Errorf("no sd-files allowed in strict mode") + // No dynamic endpoints in strict mode. + for _, config := range endpointConfig { + if config.Mode == StrictEndpointMode && len(config.EndpointsSD) != 0 { + return nil, errors.Errorf("no sd-files allowed in strict mode") + } } } - // Adding --endpoint, --endpoint.sd-files with NO-TLS, if provided. + // Adding --endpoint, --endpoint.sd-files, if provided. if len(endpointAddrs) > 0 || fileSDConfig != nil { cfg := Config{} + cfg.TLSConfig = TLSConfig cfg.Endpoints = endpointAddrs if fileSDConfig != nil { cfg.EndpointsSD = []file.SDConfig{*fileSDConfig} @@ -98,9 +75,10 @@ func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, fi endpointConfig = append(endpointConfig, cfg) } - // Adding --endpoint-strict endpoints with NO-TLS, if provided. + // Adding --endpoint-strict endpoints, if provided. if len(strictEndpointAddrs) > 0 { cfg := Config{} + cfg.TLSConfig = TLSConfig cfg.Endpoints = strictEndpointAddrs cfg.Mode = StrictEndpointMode endpointConfig = append(endpointConfig, cfg) From d6a7988ab8a46c3bb36fe374a701478e989967b2 Mon Sep 17 00:00:00 2001 From: Bartlomiej Plotka Date: Thu, 14 Oct 2021 23:47:30 +0200 Subject: [PATCH 29/29] Proposed changes to endpoint.config. Signed-off-by: Bartlomiej Plotka --- CHANGELOG.md | 2 + cmd/thanos/query.go | 320 +++++---------------- cmd/thanos/receive.go | 6 +- cmd/thanos/rule.go | 40 ++- cmd/thanos/sidecar.go | 3 +- pkg/alert/config.go | 30 +- pkg/api/query/v1.go | 37 --- pkg/extgrpc/client.go | 68 ----- pkg/extgrpc/config.go | 53 ++++ pkg/extgrpc/grpc.go | 98 +++++++ pkg/{httpconfig => exthttp}/config.go | 14 +- pkg/{httpconfig => exthttp}/config_test.go | 24 +- pkg/{httpconfig => exthttp}/http.go | 95 +++--- pkg/query/config.go | 67 +++-- pkg/query/endpointset.go | 96 +++++-- pkg/ui/query.go | 12 +- test/e2e/e2ethanos/services.go | 4 +- test/e2e/query_test.go | 2 +- 18 files changed, 445 insertions(+), 526 deletions(-) delete mode 100644 pkg/extgrpc/client.go create mode 100644 pkg/extgrpc/config.go create mode 100644 pkg/extgrpc/grpc.go rename pkg/{httpconfig => exthttp}/config.go (88%) rename pkg/{httpconfig => exthttp}/config_test.go (78%) rename pkg/{httpconfig => exthttp}/http.go (83%) diff --git a/CHANGELOG.md b/CHANGELOG.md index a843f4839b..5a3b11d5d6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,8 @@ We use *breaking :warning:* to mark changes that are not backward compatible (re - [#4679](https://github.com/thanos-io/thanos/pull/4679) Added `enable-feature` flag to enable negative offsets and @ modifier, similar to Prometheus. - [#4696](https://github.com/thanos-io/thanos/pull/4696) Query: add cache name to tracing spans. - [#4764](https://github.com/thanos-io/thanos/pull/4764) Compactor: add `block-viewer.global.sync-block-timeout` flag to set the timeout of synchronization block metas. +- [#4389](https://github.com/thanos-io/thanos/pull/4389) Querier: add `endpoint.configuration` and `endpoint.configuration-file` for granular endpoint configuration YAML content or file. + - *:warning:* This also deprecates the following flags `store.sd-interval`, `store.sd-dns-interval`, `store.sd-dns-resolver`, `store.sd-files` and all `grpc-client-.*`. They will be removed in v0.27.0. ### Fixed diff --git a/cmd/thanos/query.go b/cmd/thanos/query.go index 7f8482d5f4..e15ceb7359 100644 --- a/cmd/thanos/query.go +++ b/cmd/thanos/query.go @@ -25,13 +25,13 @@ import ( "github.com/prometheus/prometheus/discovery/targetgroup" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" + "github.com/thanos-io/thanos/pkg/exthttp" extflag "github.com/efficientgo/tools/extkingpin" grpc_logging "github.com/grpc-ecosystem/go-grpc-middleware/v2/interceptors/logging" v1 "github.com/thanos-io/thanos/pkg/api/query" "github.com/thanos-io/thanos/pkg/compact/downsample" "github.com/thanos-io/thanos/pkg/component" - "github.com/thanos-io/thanos/pkg/discovery/cache" "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/thanos-io/thanos/pkg/exemplars" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" @@ -70,12 +70,13 @@ func registerQuery(app *extkingpin.App) { httpBindAddr, httpGracePeriod, httpTLSConfig := extkingpin.RegisterHTTPFlags(cmd) grpcBindAddr, grpcGracePeriod, grpcCert, grpcKey, grpcClientCA, grpcMaxConnAge := extkingpin.RegisterGRPCFlags(cmd) - secure := cmd.Flag("grpc-client-tls-secure", "Use TLS when talking to the gRPC server").Default("false").Bool() - skipVerify := cmd.Flag("grpc-client-tls-skip-verify", "Disable TLS certificate verification i.e self signed, signed by fake CA").Default("false").Bool() - cert := cmd.Flag("grpc-client-tls-cert", "TLS Certificates to use to identify this client to the server").Default("").String() - key := cmd.Flag("grpc-client-tls-key", "TLS Key for the client's certificate").Default("").String() - caCert := cmd.Flag("grpc-client-tls-ca", "TLS CA Certificates to use to verify gRPC servers").Default("").String() - serverName := cmd.Flag("grpc-client-server-name", "Server name to verify the hostname on the returned gRPC certificates. See https://tools.ietf.org/html/rfc4366#section-3.1").Default("").String() + // TODO(bwplotka): Remove in 0.27.0. + secure := cmd.Flag("grpc-client-tls-secure", "Deprecated: Use endpoint.config instead. Use TLS when talking to the gRPC server").Default("false").Bool() + skipVerify := cmd.Flag("grpc-client-tls-skip-verify", "Deprecated: Use endpoint.config instead. Disable TLS certificate verification i.e self signed, signed by fake CA").Default("false").Bool() + cert := cmd.Flag("grpc-client-tls-cert", "Deprecated: Use endpoint.config instead. TLS Certificates to use to identify this client to the server").Default("").String() + key := cmd.Flag("grpc-client-tls-key", "Deprecated: Use endpoint.config instead. TLS Key for the client's certificate").Default("").String() + caCert := cmd.Flag("grpc-client-tls-ca", "Deprecated: Use endpoint.config instead. TLS CA Certificates to use to verify gRPC servers").Default("").String() + serverName := cmd.Flag("grpc-client-server-name", "Deprecated: Use endpoint.config instead. Server name to verify the hostname on the returned gRPC certificates. See https://tools.ietf.org/html/rfc4366#section-3.1").Default("").String() webRoutePrefix := cmd.Flag("web.route-prefix", "Prefix for API and UI endpoints. This allows thanos UI to be served on a sub-path. Defaults to the value of --web.external-prefix. This option is analogous to --web.route-prefix of Prometheus.").Default("").String() webExternalPrefix := cmd.Flag("web.external-prefix", "Static prefix for all HTML links and redirect URLs in the UI query web interface. Actual endpoints are still served on / or the web.route-prefix. This allows thanos UI to be served behind a reverse proxy that strips a URL sub-path.").Default("").String() @@ -106,39 +107,33 @@ func registerQuery(app *extkingpin.App) { selectorLabels := cmd.Flag("selector-label", "Query selector labels that will be exposed in info endpoint (repeated)."). PlaceHolder("=\"\"").Strings() - stores := cmd.Flag("store", "Addresses of statically configured store API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect store API servers through respective DNS lookups."). - PlaceHolder("").Strings() - // TODO(bwplotka): Hidden because we plan to extract discovery to separate API: https://github.com/thanos-io/thanos/issues/2600. ruleEndpoints := cmd.Flag("rule", "Experimental: Addresses of statically configured rules API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect rule API servers through respective DNS lookups."). Hidden().PlaceHolder("").Strings() - metadataEndpoints := cmd.Flag("metadata", "Experimental: Addresses of statically configured metadata API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect metadata API servers through respective DNS lookups."). Hidden().PlaceHolder("").Strings() - - exemplarEndpoints := cmd.Flag("exemplar", "Experimental: Addresses of statically configured exemplars API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect exemplars API servers through respective DNS lookups."). + exemplarEndpoints := cmd.Flag("exemplar", "Experimental: Use endpoint or endpoint.config. Addresses of statically configured exemplars API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect exemplars API servers through respective DNS lookups."). Hidden().PlaceHolder("").Strings() - - // TODO(atunik): Hidden because we plan to extract discovery to separate API: https://github.com/thanos-io/thanos/issues/2600. - targetEndpoints := cmd.Flag("target", "Experimental: Addresses of statically configured target API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect target API servers through respective DNS lookups."). + targetEndpoints := cmd.Flag("target", "Experimental: Use endpoint or endpoint.config. Addresses of statically configured target API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect target API servers through respective DNS lookups."). Hidden().PlaceHolder("").Strings() + endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains set of endpoints (e.g Store API) with optional TLS options. To enable TLS either use this option or deprecated ones --grpc-client-tls* .", extflag.WithEnvSubstitution()) + + stores := cmd.Flag("store", "Addresses of statically configured store API servers (repeatable). The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect store API servers through respective DNS lookups."). + PlaceHolder("").Strings() + strictStores := cmd.Flag("store-strict", "Addresses of only statically configured store API servers that are always used, even if the health check fails. Useful if you have a caching layer on top."). PlaceHolder("").Strings() - fileSDFiles := cmd.Flag("store.sd-files", "Path to files that contain addresses of store API servers. The path can be a glob pattern (repeatable)."). + // TODO(bwplotka): Remove in 0.27.0. + fileSDFiles := cmd.Flag("store.sd-files", "Deprecated: Use endpoint.config instead. Path to files that contain addresses of store API servers. The path can be a glob pattern (repeatable)."). PlaceHolder("").Strings() - - fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Refresh interval to re-read file SD files. It is used as a resync fallback."). + fileSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-interval", "Deprecated: Use endpoint.config instead. Refresh interval to re-read file SD files. It is used as a resync fallback."). Default("5m")) - - endpointConfig := extflag.RegisterPathOrContent(cmd, "endpoint.config", "YAML file that contains set of endpoints (e.g Store API) with optional TLS options. To enable TLS either use this option or deprecated ones --grpc-client-tls* .", extflag.WithEnvSubstitution()) - // TODO(bwplotka): Grab this from TTL at some point. - dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Interval between DNS resolutions."). + dnsSDInterval := extkingpin.ModelDuration(cmd.Flag("store.sd-dns-interval", "Deprecated: Use endpoint.config instead. Interval between DNS resolutions."). Default("30s")) - - dnsSDResolver := cmd.Flag("store.sd-dns-resolver", fmt.Sprintf("Resolver to use. Possible options: [%s, %s]", dns.GolangResolverType, dns.MiekgdnsResolverType)). + dnsSDResolver := cmd.Flag("store.sd-dns-resolver", fmt.Sprintf("Deprecated: Use endpoint.config instead. Resolver to use. Possible options: [%s, %s]", dns.GolangResolverType, dns.MiekgdnsResolverType)). Default(string(dns.MiekgdnsResolverType)).Hidden().String() unhealthyStoreTimeout := extkingpin.ModelDuration(cmd.Flag("store.unhealthy-timeout", "Timeout before an unhealthy store is cleaned from the store UI page.").Default("5m")) @@ -375,47 +370,37 @@ func runQuery( Help: "The number of times a duplicated store addresses is detected from the different configs in query", }) - // TLSConfig for endpoints provided in --endpoint, --endpoint.sd-files and --endpoint-strict. - var TLSConfig query.TLSConfiguration + // TLSConfig for endpoints provided in --store, --store.sd-files and --store-strict. + var TLSConfig exthttp.TLSConfig if secure { - TLSConfig = query.TLSConfiguration{ + TLSConfig = exthttp.TLSConfig{ CertFile: cert, KeyFile: key, - CaCertFile: caCert, + CAFile: caCert, ServerName: serverName, } } - endpointConfig, err := query.LoadConfig(endpointConfigYAML, storeAddrs, strictStores, fileSDConfig, TLSConfig) + + // TODO(bwplotka): Allow filtering by API through config. + combinedAddresses := storeAddrs + combinedAddresses = append(combinedAddresses, ruleAddrs...) + combinedAddresses = append(combinedAddresses, metadataAddrs...) + combinedAddresses = append(combinedAddresses, exemplarAddrs...) + combinedAddresses = append(combinedAddresses, targetAddrs...) + + // Create endpoint config combining flag-based options with --endpoint.config. + endpointConfig, err := query.LoadConfig(endpointConfigYAML, combinedAddresses, strictStores, fileSDConfig, TLSConfig) if err != nil { return errors.Wrap(err, "loading endpoint config") } - dnsRuleProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_rule_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - dnsTargetProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_target_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - dnsMetadataProvider := dns.NewProvider( + dnsProvider := dns.NewProvider( logger, - extprom.WrapRegistererWithPrefix("thanos_query_metadata_apis_", reg), + extprom.WrapRegistererWithPrefix("thanos_query_endpoints_", reg), dns.ResolverType(dnsSDResolver), ) - dnsExemplarProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWithPrefix("thanos_query_exemplar_apis_", reg), - dns.ResolverType(dnsSDResolver), - ) - - var storeSets []*query.EndpointSet - var engineOpts = promql.EngineOpts{ + engineOpts := promql.EngineOpts{ Logger: logger, Reg: reg, // TODO(bwplotka): Expose this as a flag: https://github.com/thanos-io/thanos/issues/703. @@ -425,207 +410,57 @@ func runQuery( NoStepSubqueryIntervalFn: func(int64) int64 { return defaultEvaluationInterval.Milliseconds() }, + EnableAtModifier: enableAtModifier, + EnableNegativeOffset: enableNegativeOffset, } + + var groups []*query.EndpointGroup + endpointSetGRPCMetrics := extgrpc.ClientGRPCMetrics(reg, "endpointset") for _, config := range endpointConfig { - secure = (config.TLSConfig != query.TLSConfiguration{}) - dialOpts, err := extgrpc.StoreClientGRPCOpts(logger, reg, tracer, config.Name, secure, skipVerify, config.TLSConfig) + dialOpts, err := extgrpc.ClientGRPCOpts(logger, tracer, endpointSetGRPCMetrics, config.GRPCClientConfig) if err != nil { - return errors.Wrap(err, "building gRPC client") + return errors.Wrap(err, "building gRPC options") } - // Separate DNS provider for each endpoint config. - fileSDCache := cache.New() - dnsStoreProvider := dns.NewProvider( - logger, - extprom.WrapRegistererWith( - map[string]string{"config_name": config.Name}, - extprom.WrapRegistererWithPrefix("thanos_querier_store_apis_", reg), - ), - dns.ResolverType(dnsSDResolver), - ) + var g *query.EndpointGroup var spec []query.EndpointSpec - // Add strict & static nodes. if config.Mode == query.StrictEndpointMode { - for _, addr := range config.Endpoints { + // Add strict & static nodes. + for _, addr := range config.EndpointsConfig.Addresses { if dns.IsDynamicNode(addr) { return errors.Errorf("%s is a dynamically specified store i.e. it uses SD and that is not permitted under strict mode. Use --store for this", addr) } spec = append(spec, query.NewGRPCEndpointSpec(addr, true)) } - } - - endpoints := query.NewEndpointSet( - logger, - reg, - config.Name, - func() (specs []query.EndpointSpec) { - specs = spec - - for _, dnsProvider := range []*dns.Provider{dnsStoreProvider, dnsRuleProvider, dnsExemplarProvider, dnsMetadataProvider, dnsTargetProvider} { - var tmpSpecs []query.EndpointSpec - - for _, addr := range dnsProvider.Addresses() { - tmpSpecs = append(tmpSpecs, query.NewGRPCEndpointSpec(addr, false)) - } - tmpSpecs = removeDuplicateEndpointSpecs(logger, duplicatedStores, tmpSpecs) - specs = append(specs, tmpSpecs...) - } - - return specs - }, - dialOpts, - unhealthyStoreTimeout, - ) - storeSets = append(storeSets, endpoints) - - // Periodically update the store set with the addresses we see in our cluster. - { - ctx, cancel := context.WithCancel(context.Background()) - g.Add(func() error { - return runutil.Repeat(5*time.Second, ctx.Done(), func() error { - endpoints.Update(ctx) - return nil - }) - }, func(error) { - cancel() - endpoints.Close() - }) - } - // Run File Service Discovery and update the store set when the files are modified. - if len(config.EndpointsSD) > 0 { - fileSDUpdates := make(chan []*targetgroup.Group) - - for _, fSDConfig := range config.EndpointsSD { - ctxRun, cancelRun := context.WithCancel(context.Background()) - fileSD := file.NewDiscovery(&fSDConfig, logger) - g.Add(func() error { - fileSD.Run(ctxRun, fileSDUpdates) - return nil - }, func(error) { - cancelRun() - }) + // No dynamic resources when endpoint is strict. + g = query.NewEndpointGroup(nil, dialOpts) + } else { + // TODO(bwplotka): Consider adding provider per config name, for instrumentation purposes, but only if strongly requested. + d, err := extgrpc.NewDiscoverer(logger, config.EndpointsConfig, dnsProvider.Clone()) + if err != nil { + return errors.Wrap(err, "building discoverer") } - engineOpts.EnableAtModifier = enableAtModifier - engineOpts.EnableNegativeOffset = enableNegativeOffset - - ctxUpdate, cancelUpdate := context.WithCancel(context.Background()) - staticAddresses := config.Endpoints - g.Add(func() error { - for { - select { - case update := <-fileSDUpdates: - // Discoverers sometimes send nil updates so need to check for it to avoid panics. - if update == nil { - continue - } - fileSDCache.Update(update) - endpoints.Update(ctxUpdate) - - if err := dnsStoreProvider.Resolve(ctxUpdate, append(fileSDCache.Addresses(), staticAddresses...)); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) - } - - // Rules apis do not support file service discovery as of now. - case <-ctxUpdate.Done(): - return nil - } - } - }, func(error) { - cancelUpdate() - close(fileSDUpdates) - }) - } - // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. - { - ctx, cancel := context.WithCancel(context.Background()) - staticAddresses := config.Endpoints - g.Add(func() error { - return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error { - resolveCtx, resolveCancel := context.WithTimeout(ctx, dnsSDInterval) - defer resolveCancel() - if err := dnsStoreProvider.Resolve(resolveCtx, append(fileSDCache.Addresses(), staticAddresses...)); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for storeAPIs", "err", err) - } - return nil - }) - }, func(error) { - cancel() - }) + addDiscoveryGroups(g, d, ??) + g = query.NewEndpointGroup(d, dialOpts) } } - // Periodically update the addresses from static flags and file SD by resolving them using DNS SD if necessary. - { - ctx, cancel := context.WithCancel(context.Background()) - g.Add(func() error { - return runutil.Repeat(dnsSDInterval, ctx.Done(), func() error { - resolveCtx, resolveCancel := context.WithTimeout(ctx, dnsSDInterval) - defer resolveCancel() - if err := dnsRuleProvider.Resolve(resolveCtx, ruleAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for rulesAPIs", "err", err) - } - if err := dnsTargetProvider.Resolve(ctx, targetAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for targetsAPIs", "err", err) - } - if err := dnsMetadataProvider.Resolve(resolveCtx, metadataAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for metadataAPIs", "err", err) - } - if err := dnsExemplarProvider.Resolve(resolveCtx, exemplarAddrs); err != nil { - level.Error(logger).Log("msg", "failed to resolve addresses for exemplarsAPI", "err", err) - } - return nil - }) - }, func(error) { - cancel() - }) - } + endpointSet := query.NewEndpointSet( + logger, + reg, + groups, + unhealthyStoreTimeout, + ) var ( - // Adding separate for loop for each client func() below because storeSets is being populated in a go-routine and this code executes before it. - // Implemented as a part of https://github.com/thanos-io/thanos/blob/main/docs/proposals-accepted/202106-automated-per-endpoint-mTLS.md - allClients = func() []store.Client { - var get []store.Client - for _, ss := range storeSets { - get = append(get, ss.GetStoreClients()...) - } - return get - } - ruleClients = func() []rulespb.RulesClient { - var getRuleClient []rulespb.RulesClient - for _, ss := range storeSets { - getRuleClient = append(getRuleClient, ss.GetRulesClients()...) - } - return getRuleClient - } - targetClients = func() []targetspb.TargetsClient { - var getTargetClient []targetspb.TargetsClient - for _, ss := range storeSets { - getTargetClient = append(getTargetClient, ss.GetTargetsClients()...) - } - return getTargetClient - } - metadataClients = func() []metadatapb.MetadataClient { - var getMetadataClient []metadatapb.MetadataClient - for _, ss := range storeSets { - getMetadataClient = append(getMetadataClient, ss.GetMetricMetadataClients()...) - } - return getMetadataClient - } - exemplarStore = func() []*exemplarspb.ExemplarStore { - var getExemplarsStore []*exemplarspb.ExemplarStore - for _, ss := range storeSets { - getExemplarsStore = append(getExemplarsStore, ss.GetExemplarsStores()...) - } - return getExemplarsStore - } - proxy = store.NewProxyStore(logger, reg, allClients, component.Query, selectorLset, storeResponseTimeout) - rulesProxy = rules.NewProxy(logger, ruleClients) - targetsProxy = targets.NewProxy(logger, targetClients) - metadataProxy = metadata.NewProxy(logger, metadataClients) - exemplarsProxy = exemplars.NewProxy(logger, exemplarStore, selectorLset) + proxy = store.NewProxyStore(logger, reg, endpointSet.GetStoreClients, component.Query, selectorLset, storeResponseTimeout) + rulesProxy = rules.NewProxy(logger, endpointSet.GetRulesClients) + targetsProxy = targets.NewProxy(logger, endpointSet.GetTargetsClients) + metadataProxy = metadata.NewProxy(logger, endpointSet.GetMetricMetadataClients) + exemplarsProxy = exemplars.NewProxy(logger, endpointSet.GetExemplarsStores, selectorLset) queryableCreator = query.NewQueryableCreator( logger, extprom.WrapRegistererWithPrefix("thanos_query_", reg), @@ -665,11 +500,11 @@ func runQuery( ins := extpromhttp.NewInstrumentationMiddleware(reg, nil) // TODO(bplotka in PR #513 review): pass all flags, not only the flags needed by prefix rewriting. - ui.NewQueryUI(logger, storeSets, webExternalPrefix, webPrefixHeaderName).Register(router, ins) + ui.NewQueryUI(logger, endpointSet.GetEndpointStatus, webExternalPrefix, webPrefixHeaderName).Register(router, ins) api := v1.NewQueryAPI( logger, - endpoints.GetEndpointStatus, + endpointSet.GetEndpointStatus, engineFactory(promql.NewEngine, engineOpts, dynamicLookbackDelta), queryableCreator, // NOTE: Will share the same replica label as the query for now. @@ -748,23 +583,6 @@ func runQuery( return nil } -func removeDuplicateEndpointSpecs(logger log.Logger, duplicatedStores prometheus.Counter, specs []query.EndpointSpec) []query.EndpointSpec { - set := make(map[string]query.EndpointSpec) - for _, spec := range specs { - addr := spec.Addr() - if _, ok := set[addr]; ok { - level.Warn(logger).Log("msg", "Duplicate store address is provided", "addr", addr) - duplicatedStores.Inc() - } - set[addr] = spec - } - deduplicated := make([]query.EndpointSpec, 0, len(set)) - for _, value := range set { - deduplicated = append(deduplicated, value) - } - return deduplicated -} - // firstDuplicate returns the first duplicate string in the given string slice // or empty string if none was found. func firstDuplicate(ss []string) string { diff --git a/cmd/thanos/receive.go b/cmd/thanos/receive.go index 0ce01dcc59..cbeef9af55 100644 --- a/cmd/thanos/receive.go +++ b/cmd/thanos/receive.go @@ -22,6 +22,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/tsdb" + "github.com/thanos-io/thanos/pkg/exthttp" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/exemplars" @@ -35,7 +36,6 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/client" "github.com/thanos-io/thanos/pkg/prober" - "github.com/thanos-io/thanos/pkg/query" "github.com/thanos-io/thanos/pkg/receive" "github.com/thanos-io/thanos/pkg/runutil" grpcserver "github.com/thanos-io/thanos/pkg/server/grpc" @@ -121,10 +121,10 @@ func runReceive( return err } - TLSConfig := query.TLSConfiguration{ + TLSConfig := exthttp.TLSConfig{ CertFile: conf.rwClientCert, KeyFile: conf.rwClientKey, - CaCertFile: conf.rwClientServerCA, + CAFile: conf.rwClientServerCA, ServerName: conf.rwClientServerName, } diff --git a/cmd/thanos/rule.go b/cmd/thanos/rule.go index d5893edd2a..8ee9783196 100644 --- a/cmd/thanos/rule.go +++ b/cmd/thanos/rule.go @@ -33,8 +33,8 @@ import ( "github.com/prometheus/prometheus/tsdb" "github.com/prometheus/prometheus/util/strutil" "github.com/thanos-io/thanos/pkg/errutil" + "github.com/thanos-io/thanos/pkg/exthttp" "github.com/thanos-io/thanos/pkg/extkingpin" - "github.com/thanos-io/thanos/pkg/httpconfig" extflag "github.com/efficientgo/tools/extkingpin" "github.com/thanos-io/thanos/pkg/alert" @@ -265,29 +265,29 @@ func runRule( ) error { metrics := newRuleMetrics(reg) - var queryCfg []httpconfig.Config + var queryCfg []exthttp.Config var err error if len(conf.queryConfigYAML) > 0 { - queryCfg, err = httpconfig.LoadConfigs(conf.queryConfigYAML) + queryCfg, err = exthttp.LoadConfigs(conf.queryConfigYAML) if err != nil { return err } } else { - queryCfg, err = httpconfig.BuildConfig(conf.query.addrs) + queryCfg, err = exthttp.BuildConfig(conf.query.addrs) if err != nil { return errors.Wrap(err, "query configuration") } // Build the query configuration from the legacy query flags. - var fileSDConfigs []httpconfig.FileSDConfig + var fileSDConfigs []exthttp.FileSDConfig if len(conf.query.sdFiles) > 0 { - fileSDConfigs = append(fileSDConfigs, httpconfig.FileSDConfig{ + fileSDConfigs = append(fileSDConfigs, exthttp.FileSDConfig{ Files: conf.query.sdFiles, RefreshInterval: model.Duration(conf.query.sdInterval), }) queryCfg = append(queryCfg, - httpconfig.Config{ - EndpointsConfig: httpconfig.EndpointsConfig{ + exthttp.Config{ + EndpointsConfig: exthttp.EndpointsConfig{ Scheme: "http", FileSDConfigs: fileSDConfigs, }, @@ -301,22 +301,21 @@ func runRule( extprom.WrapRegistererWithPrefix("thanos_rule_query_apis_", reg), dns.ResolverType(conf.query.dnsSDResolver), ) - var queryClients []*httpconfig.Client + var queryClients []*exthttp.Client queryClientMetrics := extpromhttp.NewClientMetrics(extprom.WrapRegistererWith(prometheus.Labels{"client": "query"}, reg)) for _, cfg := range queryCfg { - cfg.HTTPClientConfig.ClientMetrics = queryClientMetrics - c, err := httpconfig.NewHTTPClient(cfg.HTTPClientConfig, "query") + c, err := exthttp.NewHTTPClient(cfg.HTTPClientConfig, "query", queryClientMetrics) if err != nil { return err } c.Transport = tracing.HTTPTripperware(logger, c.Transport) - queryClient, err := httpconfig.NewClient(logger, cfg.EndpointsConfig, c, queryProvider.Clone()) + queryClient, err := exthttp.NewClient(logger, cfg.EndpointsConfig, c, queryProvider.Clone()) if err != nil { return err } queryClients = append(queryClients, queryClient) // Discover and resolve query addresses. - addDiscoveryGroups(g, queryClient, conf.query.dnsSDInterval) + addDiscoveryGroups(g, queryClient.Discoverer, conf.query.dnsSDInterval) } db, err := tsdb.Open(conf.dataDir, log.With(logger, "component", "tsdb"), reg, tsdbOpts, nil) @@ -379,19 +378,18 @@ func runRule( extprom.WrapRegistererWith(prometheus.Labels{"client": "alertmanager"}, reg), ) for _, cfg := range alertingCfg.Alertmanagers { - cfg.HTTPClientConfig.ClientMetrics = amClientMetrics - c, err := httpconfig.NewHTTPClient(cfg.HTTPClientConfig, "alertmanager") + c, err := exthttp.NewHTTPClient(cfg.HTTPClientConfig, "alertmanager", amClientMetrics) if err != nil { return err } c.Transport = tracing.HTTPTripperware(logger, c.Transport) // Each Alertmanager client has a different list of targets thus each needs its own DNS provider. - amClient, err := httpconfig.NewClient(logger, cfg.EndpointsConfig, c, amProvider.Clone()) + amClient, err := exthttp.NewClient(logger, cfg.EndpointsConfig, c, amProvider.Clone()) if err != nil { return err } // Discover and resolve Alertmanager addresses. - addDiscoveryGroups(g, amClient, conf.alertmgr.alertmgrsDNSSDInterval) + addDiscoveryGroups(g, amClient.Discoverer, conf.alertmgr.alertmgrsDNSSDInterval) alertmgrs = append(alertmgrs, alert.NewAlertmanager(logger, amClient, time.Duration(cfg.Timeout), cfg.APIVersion)) } @@ -705,7 +703,7 @@ func removeDuplicateQueryEndpoints(logger log.Logger, duplicatedQueriers prometh func queryFuncCreator( logger log.Logger, - queriers []*httpconfig.Client, + queriers []*exthttp.Client, duplicatedQuery prometheus.Counter, ruleEvalWarnings *prometheus.CounterVec, httpMethod string, @@ -761,10 +759,10 @@ func queryFuncCreator( } } -func addDiscoveryGroups(g *run.Group, c *httpconfig.Client, interval time.Duration) { +func addDiscoveryGroups(g *run.Group, d *exthttp.Discoverer, interval time.Duration) { ctx, cancel := context.WithCancel(context.Background()) g.Add(func() error { - c.Discover(ctx) + d.Discover(ctx) return nil }, func(error) { cancel() @@ -772,7 +770,7 @@ func addDiscoveryGroups(g *run.Group, c *httpconfig.Client, interval time.Durati g.Add(func() error { return runutil.Repeat(interval, ctx.Done(), func() error { - return c.Resolve(ctx) + return d.Resolve(ctx) }) }, func(error) { cancel() diff --git a/cmd/thanos/sidecar.go b/cmd/thanos/sidecar.go index 8584492b4f..4187997c57 100644 --- a/cmd/thanos/sidecar.go +++ b/cmd/thanos/sidecar.go @@ -29,7 +29,6 @@ import ( "github.com/thanos-io/thanos/pkg/exthttp" "github.com/thanos-io/thanos/pkg/extkingpin" "github.com/thanos-io/thanos/pkg/extprom" - "github.com/thanos-io/thanos/pkg/httpconfig" "github.com/thanos-io/thanos/pkg/logging" meta "github.com/thanos-io/thanos/pkg/metadata" thanosmodel "github.com/thanos-io/thanos/pkg/model" @@ -228,7 +227,7 @@ func runSidecar( t := exthttp.NewTransport() t.MaxIdleConnsPerHost = conf.connection.maxIdleConnsPerHost t.MaxIdleConns = conf.connection.maxIdleConns - c := promclient.NewClient(&http.Client{Transport: tracing.HTTPTripperware(logger, t)}, logger, httpconfig.ThanosUserAgent) + c := promclient.NewClient(&http.Client{Transport: tracing.HTTPTripperware(logger, t)}, logger, exthttp.ThanosUserAgent) promStore, err := store.NewPrometheusStore(logger, reg, c, conf.prometheus.url, component.Sidecar, m.Labels, m.Timestamps, m.Version) if err != nil { diff --git a/pkg/alert/config.go b/pkg/alert/config.go index 1572821cf2..00d1466853 100644 --- a/pkg/alert/config.go +++ b/pkg/alert/config.go @@ -13,7 +13,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/relabel" - "github.com/thanos-io/thanos/pkg/httpconfig" + "github.com/thanos-io/thanos/pkg/exthttp" "gopkg.in/yaml.v2" "github.com/thanos-io/thanos/pkg/discovery/dns" @@ -25,10 +25,10 @@ type AlertingConfig struct { // AlertmanagerConfig represents a client to a cluster of Alertmanager endpoints. type AlertmanagerConfig struct { - HTTPClientConfig httpconfig.ClientConfig `yaml:"http_config"` - EndpointsConfig httpconfig.EndpointsConfig `yaml:",inline"` - Timeout model.Duration `yaml:"timeout"` - APIVersion APIVersion `yaml:"api_version"` + HTTPClientConfig exthttp.ClientConfig `yaml:"http_config"` + EndpointsConfig exthttp.EndpointsConfig `yaml:",inline"` + Timeout model.Duration `yaml:"timeout"` + APIVersion APIVersion `yaml:"api_version"` } // APIVersion represents the API version of the Alertmanager endpoint. @@ -61,10 +61,10 @@ func (v *APIVersion) UnmarshalYAML(unmarshal func(interface{}) error) error { func DefaultAlertmanagerConfig() AlertmanagerConfig { return AlertmanagerConfig{ - EndpointsConfig: httpconfig.EndpointsConfig{ - Scheme: "http", - StaticAddresses: []string{}, - FileSDConfigs: []httpconfig.FileSDConfig{}, + EndpointsConfig: exthttp.EndpointsConfig{ + Scheme: "http", + Addresses: []string{}, + FileSDConfigs: []exthttp.FileSDConfig{}, }, Timeout: model.Duration(time.Second * 10), APIVersion: APIv1, @@ -111,7 +111,7 @@ func BuildAlertmanagerConfig(address string, timeout time.Duration) (Alertmanage break } } - var basicAuth httpconfig.BasicAuth + var basicAuth exthttp.BasicAuth if parsed.User != nil && parsed.User.String() != "" { basicAuth.Username = parsed.User.Username() pw, _ := parsed.User.Password() @@ -119,13 +119,13 @@ func BuildAlertmanagerConfig(address string, timeout time.Duration) (Alertmanage } return AlertmanagerConfig{ - HTTPClientConfig: httpconfig.ClientConfig{ + HTTPClientConfig: exthttp.ClientConfig{ BasicAuth: basicAuth, }, - EndpointsConfig: httpconfig.EndpointsConfig{ - PathPrefix: parsed.Path, - Scheme: scheme, - StaticAddresses: []string{host}, + EndpointsConfig: exthttp.EndpointsConfig{ + PathPrefix: parsed.Path, + Scheme: scheme, + Addresses: []string{host}, }, Timeout: model.Duration(timeout), APIVersion: APIv1, diff --git a/pkg/api/query/v1.go b/pkg/api/query/v1.go index 83276a67ba..fd9ee99b68 100644 --- a/pkg/api/query/v1.go +++ b/pkg/api/query/v1.go @@ -93,21 +93,8 @@ type QueryAPI struct { enableExemplarPartialResponse bool disableCORS bool -<<<<<<< HEAD replicaLabels []string endpointStatus func() []query.EndpointStatus -======= - replicaLabels []string -<<<<<<< HEAD -<<<<<<< HEAD - endpointSet *query.EndpointSet -======= - storeSets []*query.StoreSet ->>>>>>> addressed comments for querier ->>>>>>> addressed comments for querier -======= - endpointSet []*query.EndpointSet ->>>>>>> removing conflicts-1 defaultRangeQueryStep time.Duration defaultInstantQueryMaxSourceResolution time.Duration @@ -119,19 +106,7 @@ type QueryAPI struct { // NewQueryAPI returns an initialized QueryAPI type. func NewQueryAPI( logger log.Logger, -<<<<<<< HEAD -<<<<<<< HEAD endpointStatus func() []query.EndpointStatus, -======= -<<<<<<< HEAD - endpointSet *query.EndpointSet, -======= - storeSets []*query.StoreSet, ->>>>>>> addressed comments for querier ->>>>>>> addressed comments for querier -======= - endpointSet []*query.EndpointSet, ->>>>>>> removing conflicts-1 qe func(int64) *promql.Engine, c query.QueryableCreator, ruleGroups rules.UnaryClient, @@ -171,19 +146,7 @@ func NewQueryAPI( enableMetricMetadataPartialResponse: enableMetricMetadataPartialResponse, enableExemplarPartialResponse: enableExemplarPartialResponse, replicaLabels: replicaLabels, -<<<<<<< HEAD -<<<<<<< HEAD endpointStatus: endpointStatus, -======= -<<<<<<< HEAD - endpointSet: endpointSet, -======= - storeSets: storeSets, ->>>>>>> addressed comments for querier ->>>>>>> addressed comments for querier -======= - endpointSet: endpointSet, ->>>>>>> removing conflicts-1 defaultRangeQueryStep: defaultRangeQueryStep, defaultInstantQueryMaxSourceResolution: defaultInstantQueryMaxSourceResolution, defaultMetadataTimeRange: defaultMetadataTimeRange, diff --git a/pkg/extgrpc/client.go b/pkg/extgrpc/client.go deleted file mode 100644 index 9272d4a348..0000000000 --- a/pkg/extgrpc/client.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) The Thanos Authors. -// Licensed under the Apache License 2.0. - -package extgrpc - -import ( - "math" - - "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" - grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - "github.com/opentracing/opentracing-go" - "github.com/prometheus/client_golang/prometheus" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - - "github.com/thanos-io/thanos/pkg/query" - "github.com/thanos-io/thanos/pkg/tls" - "github.com/thanos-io/thanos/pkg/tracing" -) - -// StoreClientGRPCOpts creates gRPC dial options for connecting to a store client. -func StoreClientGRPCOpts(logger log.Logger, reg *prometheus.Registry, tracer opentracing.Tracer, clientInstance string, secure, skipVerify bool, tlsConfig query.TLSConfiguration) ([]grpc.DialOption, error) { - if clientInstance == "" { - clientInstance = "default" - } - constLabels := map[string]string{"config_name": clientInstance} - grpcMets := grpc_prometheus.NewClientMetrics(grpc_prometheus.WithConstLabels(constLabels)) - grpcMets.EnableClientHandlingTimeHistogram( - grpc_prometheus.WithHistogramConstLabels(constLabels), - grpc_prometheus.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120, 240, 360, 720}), - ) - dialOpts := []grpc.DialOption{ - // We want to make sure that we can receive huge gRPC messages from storeAPI. - // On TCP level we can be fine, but the gRPC overhead for huge messages could be significant. - // Current limit is ~2GB. - // TODO(bplotka): Split sent chunks on store node per max 4MB chunks if needed. - grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), - grpc.WithUnaryInterceptor( - grpc_middleware.ChainUnaryClient( - grpcMets.UnaryClientInterceptor(), - tracing.UnaryClientInterceptor(tracer), - ), - ), - grpc.WithStreamInterceptor( - grpc_middleware.ChainStreamClient( - grpcMets.StreamClientInterceptor(), - tracing.StreamClientInterceptor(tracer), - ), - ), - } - if reg != nil { - reg.MustRegister(grpcMets) - } - - if !secure { - return append(dialOpts, grpc.WithInsecure()), nil - } - - level.Info(logger).Log("msg", "enabling client to server TLS") - - tlsCfg, err := tls.NewClientConfig(logger, tlsConfig.CertFile, tlsConfig.KeyFile, tlsConfig.CaCertFile, tlsConfig.ServerName, skipVerify) - if err != nil { - return nil, err - } - return append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg))), nil -} diff --git a/pkg/extgrpc/config.go b/pkg/extgrpc/config.go new file mode 100644 index 0000000000..cb870c6cee --- /dev/null +++ b/pkg/extgrpc/config.go @@ -0,0 +1,53 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package extgrpc + +import ( + "strings" + + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/exthttp" +) + +// Config is a structure that allows pointing to various gRPC endpoint, e.g Querier connecting to StoreAPI. +type Config struct { + GRPCClientConfig exthttp.ClientConfig `yaml:"grpc_config"` + EndpointsConfig EndpointsConfig `yaml:",inline"` +} + +func DefaultConfig() Config { + return Config{ + EndpointsConfig: EndpointsConfig{ + Addresses: []string{}, + FileSDConfigs: []exthttp.FileSDConfig{}, + }, + } +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error { + *c = DefaultConfig() + type plain Config + return unmarshal((*plain)(c)) +} + +// BuildConfig returns a configuration from a static addresses. +func BuildConfig(addrs []string) ([]Config, error) { + configs := make([]Config, 0, len(addrs)) + for i, addr := range addrs { + if addr == "" { + return nil, errors.Errorf("static address cannot be empty, but was at index %d", i) + } + if strings.Contains(addr, "/") { + return nil, errors.Errorf("gRPC address either has HTTP scheme or path. We expect only host+port with optional dns+ dnssrv+ prefix in it. Got %v", addr) + } + + configs = append(configs, Config{ + EndpointsConfig: EndpointsConfig{ + Addresses: []string{addr}, + }, + }) + } + return configs, nil +} diff --git a/pkg/extgrpc/grpc.go b/pkg/extgrpc/grpc.go new file mode 100644 index 0000000000..d62fda9763 --- /dev/null +++ b/pkg/extgrpc/grpc.go @@ -0,0 +1,98 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package extgrpc + +import ( + "math" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware/v2" + grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" + "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/exthttp" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/thanos-io/thanos/pkg/tls" + "github.com/thanos-io/thanos/pkg/tracing" +) + +// EndpointsConfig configures a cluster of gRPC endpoints from static addresses and +// file service discovery. Similar to exthttp.EndpointConfig but for gRPC. +type EndpointsConfig struct { + // List of addresses with DNS prefixes. + Addresses []string `yaml:"addresses"` + // List of file configurations (our FileSD supports different DNS lookups). + FileSDConfigs []exthttp.FileSDConfig `yaml:"file_sd_configs"` +} + +// NewDiscoverer returns a new exthttp.Discoverer. +func NewDiscoverer(logger log.Logger, cfg EndpointsConfig, provider exthttp.AddressProvider) (*exthttp.Discoverer, error) { + return exthttp.NewDiscoverer(logger, exthttp.EndpointsConfig{ + Addresses: cfg.Addresses, + FileSDConfigs: cfg.FileSDConfigs, + }, provider) +} + +// TODO: Description +func ClientGRPCMetrics(reg *prometheus.Registry, clientName string) *grpc_prometheus.ClientMetrics { + if clientName == "" { + clientName = "default" + } + + grpcMets := grpc_prometheus.NewClientMetrics(grpc_prometheus.WithConstLabels(map[string]string{"client": clientName})) + grpcMets.EnableClientHandlingTimeHistogram( + grpc_prometheus.WithHistogramConstLabels(map[string]string{"client": clientName}), + grpc_prometheus.WithHistogramBuckets([]float64{0.001, 0.01, 0.1, 0.3, 0.6, 1, 3, 6, 9, 20, 30, 60, 90, 120, 240, 360, 720}), + ) + if reg != nil { + reg.MustRegister(grpcMets) + } + + return grpcMets +} + +// ClientGRPCOpts creates gRPC dial options from config.. +func ClientGRPCOpts(logger log.Logger, tracer opentracing.Tracer, metrics *grpc_prometheus.ClientMetrics, config exthttp.ClientConfig) ([]grpc.DialOption, error) { + dialOpts := []grpc.DialOption{ + // We want to make sure that we can receive huge gRPC messages from storeAPI. + // On TCP level we can be fine, but the gRPC overhead for huge messages could be significant. + // Current limit is ~2GB. + // TODO(bplotka): Split sent chunks on store node per max 4MB chunks if needed. + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(math.MaxInt32)), + grpc.WithUnaryInterceptor( + grpc_middleware.ChainUnaryClient( + metrics.UnaryClientInterceptor(), + tracing.UnaryClientInterceptor(tracer), + ), + ), + grpc.WithStreamInterceptor( + grpc_middleware.ChainStreamClient( + metrics.StreamClientInterceptor(), + tracing.StreamClientInterceptor(tracer), + ), + ), + } + + // TODO(bwplotka): Add here support for non TLS exthttp.ClientConfig options, for not block them. + if (config.BasicAuth != exthttp.BasicAuth{} || config.BearerToken != "" || config.BearerTokenFile != "" || config.ProxyURL != "") { + return nil, errors.New("basic auth, bearer token and proxy URL options are currently not implemented") + + } + + if (config.TLSConfig == exthttp.TLSConfig{}) { + return append(dialOpts, grpc.WithInsecure()), nil + } + + level.Info(logger).Log("msg", "enabling client to server TLS") + + tlsCfg, err := tls.NewClientConfig(logger, config.TLSConfig.CertFile, config.TLSConfig.KeyFile, config.TLSConfig.CAFile, config.TLSConfig.ServerName, config.TLSConfig.InsecureSkipVerify) + if err != nil { + return nil, err + } + return append(dialOpts, grpc.WithTransportCredentials(credentials.NewTLS(tlsCfg))), nil +} diff --git a/pkg/httpconfig/config.go b/pkg/exthttp/config.go similarity index 88% rename from pkg/httpconfig/config.go rename to pkg/exthttp/config.go index 3280e33378..eedb3b8566 100644 --- a/pkg/httpconfig/config.go +++ b/pkg/exthttp/config.go @@ -1,7 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -package httpconfig +package exthttp import ( "fmt" @@ -22,9 +22,9 @@ type Config struct { func DefaultConfig() Config { return Config{ EndpointsConfig: EndpointsConfig{ - Scheme: "http", - StaticAddresses: []string{}, - FileSDConfigs: []FileSDConfig{}, + Scheme: "http", + Addresses: []string{}, + FileSDConfigs: []FileSDConfig{}, }, } } @@ -65,9 +65,9 @@ func BuildConfig(addrs []string) ([]Config, error) { } configs = append(configs, Config{ EndpointsConfig: EndpointsConfig{ - Scheme: u.Scheme, - StaticAddresses: []string{u.Host}, - PathPrefix: u.Path, + Scheme: u.Scheme, + Addresses: []string{u.Host}, + PathPrefix: u.Path, }, }) } diff --git a/pkg/httpconfig/config_test.go b/pkg/exthttp/config_test.go similarity index 78% rename from pkg/httpconfig/config_test.go rename to pkg/exthttp/config_test.go index fe876e859b..60727995a6 100644 --- a/pkg/httpconfig/config_test.go +++ b/pkg/exthttp/config_test.go @@ -1,7 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -package httpconfig +package exthttp import ( "testing" @@ -21,8 +21,8 @@ func TestBuildConfig(t *testing.T) { addresses: []string{"localhost:9093"}, expected: []Config{{ EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "http", + Addresses: []string{"localhost:9093"}, + Scheme: "http", }, }}, }, @@ -32,15 +32,15 @@ func TestBuildConfig(t *testing.T) { expected: []Config{ { EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "http", + Addresses: []string{"localhost:9093"}, + Scheme: "http", }, }, { EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9094"}, - Scheme: "http", - PathPrefix: "/prefix", + Addresses: []string{"localhost:9094"}, + Scheme: "http", + PathPrefix: "/prefix", }, }, }, @@ -50,8 +50,8 @@ func TestBuildConfig(t *testing.T) { addresses: []string{"http://localhost:9093"}, expected: []Config{{ EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "http", + Addresses: []string{"localhost:9093"}, + Scheme: "http", }, }}, }, @@ -60,8 +60,8 @@ func TestBuildConfig(t *testing.T) { addresses: []string{"https://localhost:9093"}, expected: []Config{{ EndpointsConfig: EndpointsConfig{ - StaticAddresses: []string{"localhost:9093"}, - Scheme: "https", + Addresses: []string{"localhost:9093"}, + Scheme: "https", }, }}, }, diff --git a/pkg/httpconfig/http.go b/pkg/exthttp/http.go similarity index 83% rename from pkg/httpconfig/http.go rename to pkg/exthttp/http.go index b00204e425..b2b7ca6fc3 100644 --- a/pkg/httpconfig/http.go +++ b/pkg/exthttp/http.go @@ -1,8 +1,7 @@ // Copyright (c) The Thanos Authors. // Licensed under the Apache License 2.0. -// Package httpconfig is a wrapper around github.com/prometheus/common/config. -package httpconfig +package exthttp import ( "context" @@ -37,9 +36,6 @@ type ClientConfig struct { ProxyURL string `yaml:"proxy_url"` // TLSConfig to use to connect to the targets. TLSConfig TLSConfig `yaml:"tls_config"` - // ClientMetrics contains metrics that will be used to instrument - // the client that will be created with this config. - ClientMetrics *extpromhttp.ClientMetrics `yaml:"-"` } // TLSConfig configures TLS connections. @@ -69,7 +65,7 @@ func (b BasicAuth) IsZero() bool { } // NewHTTPClient returns a new HTTP client. -func NewHTTPClient(cfg ClientConfig, name string) (*http.Client, error) { +func NewHTTPClient(cfg ClientConfig, name string, metrics *extpromhttp.ClientMetrics) (*http.Client, error) { httpClientConfig := config_util.HTTPClientConfig{ BearerToken: config_util.Secret(cfg.BearerToken), BearerTokenFile: cfg.BearerTokenFile, @@ -107,8 +103,8 @@ func NewHTTPClient(cfg ClientConfig, name string) (*http.Client, error) { tripper := client.Transport - if cfg.ClientMetrics != nil { - tripper = extpromhttp.InstrumentedRoundTripper(tripper, cfg.ClientMetrics) + if metrics != nil { + tripper = extpromhttp.InstrumentedRoundTripper(tripper, metrics) } client.Transport = &userAgentRoundTripper{name: ThanosUserAgent, rt: tripper} @@ -145,7 +141,7 @@ func (u userAgentRoundTripper) RoundTrip(r *http.Request) (*http.Response, error // file service discovery. type EndpointsConfig struct { // List of addresses with DNS prefixes. - StaticAddresses []string `yaml:"static_configs"` + Addresses []string `yaml:"static_configs"` // List of file configurations (our FileSD supports different DNS lookups). FileSDConfigs []FileSDConfig `yaml:"file_sd_configs"` @@ -162,7 +158,7 @@ type FileSDConfig struct { RefreshInterval model.Duration `yaml:"refresh_interval"` } -func (c FileSDConfig) convert() (file.SDConfig, error) { +func (c FileSDConfig) Convert() (file.SDConfig, error) { var fileSDConfig file.SDConfig b, err := yaml.Marshal(c) if err != nil { @@ -177,69 +173,62 @@ type AddressProvider interface { Addresses() []string } -// Client represents a client that can send requests to a cluster of HTTP-based endpoints. -type Client struct { - logger log.Logger - - httpClient *http.Client - scheme string - prefix string - +// Discoverer allows managing and discovering group of targets composed form static and dynamic (file SD) HTTP addresses. It works also for +// gRPC addresses (which are HTTP on underlying protocol). +type Discoverer struct { staticAddresses []string fileSDCache *cache.Cache fileDiscoverers []*file.Discovery + scheme string + prefix string + provider AddressProvider } -// NewClient returns a new Client. -func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, provider AddressProvider) (*Client, error) { +// NewDiscoverer returns a new Discoverer. +func NewDiscoverer(logger log.Logger, cfg EndpointsConfig, provider AddressProvider) (*Discoverer, error) { if logger == nil { logger = log.NewNopLogger() } var discoverers []*file.Discovery for _, sdCfg := range cfg.FileSDConfigs { - fileSDCfg, err := sdCfg.convert() + fileSDCfg, err := sdCfg.Convert() if err != nil { return nil, err } discoverers = append(discoverers, file.NewDiscovery(&fileSDCfg, logger)) } - return &Client{ - logger: logger, - httpClient: client, + return &Discoverer{ scheme: cfg.Scheme, prefix: cfg.PathPrefix, - staticAddresses: cfg.StaticAddresses, + staticAddresses: cfg.Addresses, fileSDCache: cache.New(), fileDiscoverers: discoverers, provider: provider, }, nil } -// Do executes an HTTP request with the underlying HTTP client. -func (c *Client) Do(req *http.Request) (*http.Response, error) { - return c.httpClient.Do(req) -} - // Endpoints returns the list of known endpoints. -func (c *Client) Endpoints() []*url.URL { +func (c *Discoverer) Endpoints() []*url.URL { var urls []*url.URL for _, addr := range c.provider.Addresses() { - urls = append(urls, - &url.URL{ - Scheme: c.scheme, - Host: addr, - Path: path.Join("/", c.prefix), - }, - ) + u := &url.URL{ + Scheme: c.scheme, + Host: addr, + } + + if c.prefix != "" { + u.Path = path.Join("/", c.prefix) + } + urls = append(urls, u) } return urls } -// Discover runs the service to discover endpoints until the given context is done. -func (c *Client) Discover(ctx context.Context) { +// Discover runs the service to discover endpoints from file SD until the given context is done. +func (c *Discoverer) Discover(ctx context.Context) { var wg sync.WaitGroup ch := make(chan []*targetgroup.Group) @@ -269,6 +258,30 @@ func (c *Client) Discover(ctx context.Context) { } // Resolve refreshes and resolves the list of targets. -func (c *Client) Resolve(ctx context.Context) error { +func (c *Discoverer) Resolve(ctx context.Context) error { return c.provider.Resolve(ctx, append(c.fileSDCache.Addresses(), c.staticAddresses...)) } + +// Client represents a client that can send requests to a cluster of HTTP-based endpoints. +type Client struct { + *Discoverer + + httpClient *http.Client +} + +// NewClient returns a new Client. +func NewClient(logger log.Logger, cfg EndpointsConfig, client *http.Client, provider AddressProvider) (*Client, error) { + d, err := NewDiscoverer(logger, cfg, provider) + if err != nil { + return nil, err + } + return &Client{ + httpClient: client, + Discoverer: d, + }, nil +} + +// Do executes an HTTP request with the underlying HTTP client. +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return c.httpClient.Do(req) +} diff --git a/pkg/query/config.go b/pkg/query/config.go index d9340e62b3..3ef301047c 100644 --- a/pkg/query/config.go +++ b/pkg/query/config.go @@ -4,33 +4,23 @@ package query import ( + "github.com/thanos-io/thanos/pkg/extgrpc" + "github.com/thanos-io/thanos/pkg/exthttp" "gopkg.in/yaml.v2" "github.com/pkg/errors" "github.com/prometheus/prometheus/discovery/file" ) -// Config represents the configuration of a set of Store API endpoints. +// EndpointConfig represents the configuration of a set of gRPC Store API endpoints. // If `tls_config` is omitted then TLS will not be used. // Configs must have a name and they must be unique. -type Config struct { - Name string `yaml:"name"` - TLSConfig TLSConfiguration `yaml:"tls_config"` - Endpoints []string `yaml:"endpoints"` - EndpointsSD []file.SDConfig `yaml:"endpoints_sd_files"` - Mode EndpointMode `yaml:"mode"` -} +type EndpointConfig struct { + extgrpc.Config `yaml:",inline"` + + Mode EndpointMode `yaml:"mode"` -// TlsConfiguration represents the TLS configuration for a set of Store API endpoints. -type TLSConfiguration struct { - // TLS Certificates file to use to identify this client to the server. - CertFile string `yaml:"cert_file"` - // TLS Key file for the client's certificate. - KeyFile string `yaml:"key_file"` - // TLS CA Certificates file to use to verify gRPC servers. - CaCertFile string `yaml:"ca_file"` - // Server name to verify the hostname on the returned gRPC certificates. See https://tools.ietf.org/html/rfc4366#section-3.1 - ServerName string `yaml:"server_name"` + // TODO(bwplotka): Allow filtering by API (e.g someone wants to have endpoint that serves Store and Exemplar API but want to connect to Store only. } type EndpointMode string @@ -41,8 +31,8 @@ const ( ) // LoadConfig returns list of per-endpoint TLS config. -func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, fileSDConfig *file.SDConfig, TLSConfig TLSConfiguration) ([]Config, error) { - var endpointConfig []Config +func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, globalFileSDConfig *file.SDConfig, globalTLSConfig exthttp.TLSConfig) ([]EndpointConfig, error) { + var endpointConfig []EndpointConfig if len(confYAML) > 0 { if err := yaml.UnmarshalStrict(confYAML, &endpointConfig); err != nil { @@ -58,42 +48,49 @@ func LoadConfig(confYAML []byte, endpointAddrs, strictEndpointAddrs []string, fi // No dynamic endpoints in strict mode. for _, config := range endpointConfig { - if config.Mode == StrictEndpointMode && len(config.EndpointsSD) != 0 { + if config.Mode == StrictEndpointMode && len(config.EndpointsConfig.FileSDConfigs) != 0 { return nil, errors.Errorf("no sd-files allowed in strict mode") } } } - // Adding --endpoint, --endpoint.sd-files, if provided. - if len(endpointAddrs) > 0 || fileSDConfig != nil { - cfg := Config{} - cfg.TLSConfig = TLSConfig - cfg.Endpoints = endpointAddrs - if fileSDConfig != nil { - cfg.EndpointsSD = []file.SDConfig{*fileSDConfig} + // Adding --store, rule, metadata, target, exemplar and --store.sd-files, if provided. + // Global TLS config applies until deprecated. + if len(endpointAddrs) > 0 || globalFileSDConfig != nil { + cfg := EndpointConfig{} + cfg.GRPCClientConfig.TLSConfig = globalTLSConfig + cfg.EndpointsConfig.Addresses = endpointAddrs + if globalFileSDConfig != nil { + cfg.EndpointsConfig.FileSDConfigs = []exthttp.FileSDConfig{ + { + Files: globalFileSDConfig.Files, + RefreshInterval: globalFileSDConfig.RefreshInterval, + }, + } } endpointConfig = append(endpointConfig, cfg) } - // Adding --endpoint-strict endpoints, if provided. + // Adding --store-strict endpoints, if provided. + // Global TLS config applies until deprecated. if len(strictEndpointAddrs) > 0 { - cfg := Config{} - cfg.TLSConfig = TLSConfig - cfg.Endpoints = strictEndpointAddrs + cfg := EndpointConfig{} + cfg.GRPCClientConfig.TLSConfig = globalTLSConfig + cfg.EndpointsConfig.Addresses = strictEndpointAddrs cfg.Mode = StrictEndpointMode endpointConfig = append(endpointConfig, cfg) } - // Checking if some endpoints are inputted more than once. + // Checking for duplicates. + // NOTE: This does not check dynamic endpoints of course. allEndpoints := make(map[string]struct{}) for _, config := range endpointConfig { - for _, addr := range config.Endpoints { + for _, addr := range config.EndpointsConfig.Addresses { if _, exists := allEndpoints[addr]; exists { return nil, errors.Errorf("%s endpoint provided more than once", addr) } allEndpoints[addr] = struct{}{} } } - return endpointConfig, nil } diff --git a/pkg/query/endpointset.go b/pkg/query/endpointset.go index 37220096b3..6c6c96ca8b 100644 --- a/pkg/query/endpointset.go +++ b/pkg/query/endpointset.go @@ -17,6 +17,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" "github.com/thanos-io/thanos/pkg/exemplars/exemplarspb" + "github.com/thanos-io/thanos/pkg/exthttp" "github.com/thanos-io/thanos/pkg/info/infopb" "google.golang.org/grpc" @@ -34,14 +35,16 @@ const ( unhealthyEndpointMessage = "removing endpoint because it's unhealthy or does not exist" noMetadataEndpointMessage = "cannot obtain metadata: neither info nor store client found" - // Default minimum and maximum time values used by Prometheus when they are not passed as query parameter. + // MinTime is a default minimum time used by Prometheus when it's not passed as query parameter. MinTime = -9223309901257974 + // MaxTime is a default maximum time used by Prometheus when it's not passed as query parameter. MaxTime = 9223309901257974 ) type EndpointSpec interface { - // Addr returns Thanos API Address for the endpoint spec. It is used as ID for endpoint. + // Addr returns host port address for the endpoint. It is used as ID for endpoint. Addr() string + // Metadata returns current labels, component type and min, max ranges for store. // It can change for every call for this method. // If metadata call fails we assume that store is no longer accessible and we should not use it. @@ -167,7 +170,13 @@ func (es *grpcEndpointSpec) fillExpectedAPIs(componentType component.Component, Rules: &infopb.RulesInfo{}, } default: - return infopb.InfoResponse{} + // This might break non-native StoreAPI implementation, so assume Store API too. + return infopb.InfoResponse{ + Store: &infopb.StoreInfo{ + MinTime: mintime, + MaxTime: maxTime, + }, + } } } @@ -208,13 +217,13 @@ type endpointSetNodeCollector struct { connectionsDesc *prometheus.Desc } -func newEndpointSetNodeCollector(configInstance string) *endpointSetNodeCollector { +func newEndpointSetNodeCollector() *endpointSetNodeCollector { return &endpointSetNodeCollector{ storeNodes: map[component.Component]map[string]int{}, connectionsDesc: prometheus.NewDesc( "thanos_store_nodes_grpc_connections", "Number of gRPC connection to Store APIs. Opened connection means healthy store APIs available for Querier.", - []string{"external_labels", "store_type"}, map[string]string{"config_provider_name": configInstance}, + []string{"external_labels", "store_type"}, nil, ), } } @@ -256,15 +265,63 @@ func (c *endpointSetNodeCollector) Collect(ch chan<- prometheus.Metric) { } } -// EndpointSet maintains a set of active Thanos endpoints. It is backed up by Endpoint Specifications that are dynamically fetched on -// every Update() call. +// EndpointGroup groups common endpoints (having the same gRPC dial Opts and coming from the same DNS discovery) together. +// It is backed up by set of *exthttp.Discoverer structs can give us addresses. +type EndpointGroup struct { + d *exthttp.Discoverer + + dialOpts []grpc.DialOption +} + +func NewEndpointGroup(d *exthttp.Discoverer, dialOpts []grpc.DialOption) *EndpointGroup { + return &EndpointGroup{ + d: d, + dialOpts: dialOpts, + } +} + +func removeDuplicateEndpointSpecs(logger log.Logger, duplicatedStores prometheus.Counter, specs []query.EndpointSpec) []query.EndpointSpec { + set := make(map[string]query.EndpointSpec) + for _, spec := range specs { + addr := spec.Addr() + if _, ok := set[addr]; ok { + level.Warn(logger).Log("msg", "Duplicate store address is provided", "addr", addr) + duplicatedStores.Inc() + } + set[addr] = spec + } + deduplicated := make([]query.EndpointSpec, 0, len(set)) + for _, value := range set { + deduplicated = append(deduplicated, value) + } + return deduplicated +} + +// Spec returns current set of endpoint specs. +// Note that endpoint specifications return can change dynamically. If some component is missing from the list, we assume it is no longer +// accessible and we close gRPC client for it, unless it is strict. +func (g *EndpointGroup) Spec() []EndpointSpec { + g.d.Endpoints() // TODO: ... + + /* + Something like.. + for _, dnsProvider := range []*dns.Provider{dnsStoreProvider, dnsRuleProvider, dnsExemplarProvider, dnsMetadataProvider, dnsTargetProvider} { + var tmpSpecs []query.EndpointSpec + + for _, addr := range dnsProvider.Addresses() { + tmpSpecs = append(tmpSpecs, query.NewGRPCEndpointSpec(addr, false)) + } + tmpSpecs = removeDuplicateEndpointSpecs(logger, duplicatedStores, tmpSpecs) + specs = append(specs, tmpSpecs...) + } + */ +} + +// EndpointSet maintains a set of active Thanos endpoints groups. type EndpointSet struct { logger log.Logger - // Endpoint specifications can change dynamically. If some component is missing from the list, we assume it is no longer - // accessible and we close gRPC client for it, unless it is strict. - endpointSpec func() []EndpointSpec - dialOpts []grpc.DialOption + groups []*EndpointGroup gRPCInfoCallTimeout time.Duration updateMtx sync.Mutex @@ -284,15 +341,11 @@ type EndpointSet struct { func NewEndpointSet( logger log.Logger, reg *prometheus.Registry, - configInstance string, - endpointSpecs func() []EndpointSpec, - dialOpts []grpc.DialOption, + groups []*EndpointGroup, unhealthyEndpointTimeout time.Duration, ) *EndpointSet { - if configInstance == "" { - configInstance = "default" - } - endpointsMetric := newEndpointSetNodeCollector(configInstance) + // TODO(bwplotka): Consider adding provider per config name, for instrumentation purposes, but only if strongly requested. + endpointsMetric := newEndpointSetNodeCollector() if reg != nil { reg.MustRegister(endpointsMetric) } @@ -301,19 +354,14 @@ func NewEndpointSet( logger = log.NewNopLogger() } - if endpointSpecs == nil { - endpointSpecs = func() []EndpointSpec { return nil } - } - es := &EndpointSet{ logger: log.With(logger, "component", "endpointset"), - dialOpts: dialOpts, endpointsMetric: endpointsMetric, gRPCInfoCallTimeout: 5 * time.Second, endpoints: make(map[string]*endpointRef), endpointStatuses: make(map[string]*EndpointStatus), unhealthyEndpointTimeout: unhealthyEndpointTimeout, - endpointSpec: endpointSpecs, + groups: groups, } return es } diff --git a/pkg/ui/query.go b/pkg/ui/query.go index ce335e735f..73f385bc81 100644 --- a/pkg/ui/query.go +++ b/pkg/ui/query.go @@ -22,7 +22,7 @@ import ( type Query struct { *BaseUI - endpointSet []*query.EndpointSet + endpointStatus func() []query.EndpointStatus externalPrefix, prefixHeader string @@ -32,7 +32,7 @@ type Query struct { now func() model.Time } -func NewQueryUI(logger log.Logger, endpointSet []*query.EndpointSet, externalPrefix, prefixHeader string) *Query { +func NewQueryUI(logger log.Logger, endpointStatus func() []query.EndpointStatus, externalPrefix, prefixHeader string) *Query { tmplVariables := map[string]string{ "Component": component.Query.String(), } @@ -43,7 +43,7 @@ func NewQueryUI(logger log.Logger, endpointSet []*query.EndpointSet, externalPre return &Query{ BaseUI: NewBaseUI(logger, "query_menu.html", tmplFuncs, tmplVariables, externalPrefix, prefixHeader, component.Query), - endpointSet: endpointSet, + endpointStatus: endpointStatus, externalPrefix: externalPrefix, prefixHeader: prefixHeader, cwd: runtimeInfo().CWD, @@ -112,10 +112,8 @@ func (q *Query) status(w http.ResponseWriter, r *http.Request) { func (q *Query) stores(w http.ResponseWriter, r *http.Request) { prefix := GetWebPrefix(q.logger, q.externalPrefix, q.prefixHeader, r) statuses := make(map[component.Component][]query.EndpointStatus) - for _, endpointSet := range q.endpointSet { - for _, status := range endpointSet.GetEndpointStatus() { - statuses[status.ComponentType] = append(statuses[status.ComponentType], status) - } + for _, status := range q.endpointStatus() { + statuses[status.ComponentType] = append(statuses[status.ComponentType], status) } sources := make([]component.Component, 0, len(statuses)) diff --git a/test/e2e/e2ethanos/services.go b/test/e2e/e2ethanos/services.go index 95c509495c..65a1e9866c 100644 --- a/test/e2e/e2ethanos/services.go +++ b/test/e2e/e2ethanos/services.go @@ -152,7 +152,7 @@ type QuerierBuilder struct { targetAddresses []string exemplarAddresses []string - endpointConfig []query.Config + endpointConfig []query.EndpointConfig tracingConfig string } @@ -256,7 +256,7 @@ func (q *QuerierBuilder) Build() (*e2e.InstrumentedRunnable, error) { return querier, nil } -func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []query.Config) *QuerierBuilder { +func (q *QuerierBuilder) WithEndpointConfig(endpointConfig []query.EndpointConfig) *QuerierBuilder { q.endpointConfig = endpointConfig return q } diff --git a/test/e2e/query_test.go b/test/e2e/query_test.go index 0d15f83f40..e19bb14a25 100644 --- a/test/e2e/query_test.go +++ b/test/e2e/query_test.go @@ -263,7 +263,7 @@ func TestQueryWithEndpointConfig(t *testing.T) { fileSDPath, err := createSDFile(e.SharedDir(), "1", []string{sidecar3.InternalEndpoint("grpc"), sidecar4.InternalEndpoint("grpc")}) testutil.Ok(t, err) - endpointConfig := []query.Config{ + endpointConfig := []query.EndpointConfig{ { Name: "withTLS", TLSConfig: query.TLSConfiguration{