From 7185f0316aa5da832b4e6319177b70dab39282d1 Mon Sep 17 00:00:00 2001 From: Aleksandr Razumov Date: Tue, 7 Jan 2025 01:34:30 +0300 Subject: [PATCH] refactor: Telemetry instead of Metrics --- app/app.go | 11 +++++++++-- app/profiler.go | 2 +- app/{metrics.go => telemetry.go} | 29 ++++++++++++++++------------- 3 files changed, 26 insertions(+), 16 deletions(-) rename app/{metrics.go => telemetry.go} (90%) diff --git a/app/app.go b/app/app.go index a8865ef..3850f85 100644 --- a/app/app.go +++ b/app/app.go @@ -31,12 +31,19 @@ const ( watchdogTimeout = shutdownTimeout + time.Second*5 ) +// Go runs f until interrupt. +func Go(f func(ctx context.Context, t *Telemetry) error, op ...Option) { + Run(func(ctx context.Context, _ *zap.Logger, t *Telemetry) error { + return f(ctx, t) + }, op...) +} + // Run f until interrupt. // // If errors.Is(err, ctx.Err()) is valid for returned error, shutdown is considered graceful. // Context is cancelled on SIGINT. After watchdogTimeout application is forcefully terminated // with exitCodeWatchdog. -func Run(f func(ctx context.Context, lg *zap.Logger, m *Metrics) error, op ...Option) { +func Run(f func(ctx context.Context, lg *zap.Logger, m *Telemetry) error, op ...Option) { // Apply options. opts := options{ zapConfig: zap.NewProductionConfig(), @@ -77,7 +84,7 @@ func Run(f func(ctx context.Context, lg *zap.Logger, m *Metrics) error, op ...Op panic(fmt.Sprintf("failed to get resource: %v", err)) } - m, err := newMetrics(ctx, lg.Named("metrics"), res, opts.meterOptions, opts.tracerOptions, opts.loggerOptions) + m, err := newTelemetry(ctx, lg.Named("metrics"), res, opts.meterOptions, opts.tracerOptions, opts.loggerOptions) if err != nil { panic(err) } diff --git a/app/profiler.go b/app/profiler.go index e192fc8..25c4dde 100644 --- a/app/profiler.go +++ b/app/profiler.go @@ -10,7 +10,7 @@ import ( "github.com/go-faster/sdk/profiler" ) -func (m *Metrics) registerProfiler(mux *http.ServeMux) { +func (m *Telemetry) registerProfiler(mux *http.ServeMux) { var routes []string if v := os.Getenv("PPROF_ROUTES"); v != "" { routes = strings.Split(v, ",") diff --git a/app/metrics.go b/app/telemetry.go similarity index 90% rename from app/metrics.go rename to app/telemetry.go index 25ab93b..cb35910 100644 --- a/app/metrics.go +++ b/app/telemetry.go @@ -37,8 +37,11 @@ type httpEndpoint struct { addr string } -// Metrics implement common basic metrics and infrastructure to it. -type Metrics struct { +// Deprecated: use Telemetry. +type Metrics = Telemetry + +// Telemetry implement common basic metrics and infrastructure to it. +type Telemetry struct { lg *zap.Logger prom *promClient.Registry @@ -54,7 +57,7 @@ type Metrics struct { shutdowns []shutdown } -func (m *Metrics) registerShutdown(name string, fn func(ctx context.Context) error) { +func (m *Telemetry) registerShutdown(name string, fn func(ctx context.Context) error) { m.shutdowns = append(m.shutdowns, shutdown{name: name, fn: fn}) } @@ -63,11 +66,11 @@ type shutdown struct { fn func(ctx context.Context) error } -func (m *Metrics) String() string { +func (m *Telemetry) String() string { return "metrics" } -func (m *Metrics) run(ctx context.Context) error { +func (m *Telemetry) run(ctx context.Context) error { defer m.lg.Debug("Stopped metrics") wg, ctx := errgroup.WithContext(ctx) @@ -101,7 +104,7 @@ func (m *Metrics) run(ctx context.Context) error { return wg.Wait() } -func (m *Metrics) shutdown(ctx context.Context) { +func (m *Telemetry) shutdown(ctx context.Context) { var wg sync.WaitGroup // Launch shutdowns in parallel. @@ -127,28 +130,28 @@ func (m *Metrics) shutdown(ctx context.Context) { wg.Wait() } -func (m *Metrics) MeterProvider() metric.MeterProvider { +func (m *Telemetry) MeterProvider() metric.MeterProvider { if m.meterProvider == nil { return otel.GetMeterProvider() } return m.meterProvider } -func (m *Metrics) TracerProvider() trace.TracerProvider { +func (m *Telemetry) TracerProvider() trace.TracerProvider { if m.tracerProvider == nil { return otel.GetTracerProvider() } return m.tracerProvider } -func (m *Metrics) LoggerProvider() log.LoggerProvider { +func (m *Telemetry) LoggerProvider() log.LoggerProvider { if m.loggerProvider == nil { return noop.NewLoggerProvider() } return m.loggerProvider } -func (m *Metrics) TextMapPropagator() propagation.TextMapPropagator { +func (m *Telemetry) TextMapPropagator() propagation.TextMapPropagator { return m.propagator } @@ -172,21 +175,21 @@ func (z zapErrorHandler) Handle(err error) { z.lg.Error("Error", zap.Error(err)) } -func newMetrics( +func newTelemetry( ctx context.Context, lg *zap.Logger, res *resource.Resource, meterOptions []autometer.Option, tracerOptions []autotracer.Option, logsOptions []autologs.Option, -) (*Metrics, error) { +) (*Telemetry, error) { { // Setup global OTEL logger and error handler. logger := lg.Named("otel") otel.SetLogger(zapr.NewLogger(logger)) otel.SetErrorHandler(zapErrorHandler{lg: logger}) } - m := &Metrics{ + m := &Telemetry{ lg: lg, resource: res, }