From eede8da5989fa4a4e2408ca2070bb12529954db0 Mon Sep 17 00:00:00 2001 From: frrist Date: Thu, 19 Sep 2019 16:16:44 +1000 Subject: [PATCH] remove deprecated code and update README --- README.md | 47 ++- context.go | 38 --- context_test.go | 43 --- entry.go | 7 - example_test.go | 16 -- log.go | 375 ------------------------ log_test.go | 449 ----------------------------- loggable.go | 42 --- metadata.go | 77 ----- metadata_test.go | 50 ---- oldlog.go => setup.go | 20 -- tracer/LICENSE | 21 -- tracer/context.go | 42 --- tracer/debug.go | 78 ----- tracer/event.go | 62 ---- tracer/propagation.go | 61 ---- tracer/propagation_ot.go | 180 ------------ tracer/raw.go | 34 --- tracer/recorder.go | 103 ------- tracer/recorder_test.go | 54 ---- tracer/span.go | 274 ------------------ tracer/tracer.go | 280 ------------------ tracer/util.go | 25 -- tracer/wire/Makefile | 6 - tracer/wire/carrier.go | 40 --- tracer/wire/carrier_test.go | 38 --- tracer/wire/gen.go | 6 - tracer/wire/wire.pb.go | 554 ------------------------------------ tracer/wire/wire.proto | 10 - writer/option.go | 4 - writer/writer.go | 258 ----------------- writer/writer_test.go | 166 ----------- 32 files changed, 17 insertions(+), 3443 deletions(-) delete mode 100644 context.go delete mode 100644 context_test.go delete mode 100644 entry.go delete mode 100644 example_test.go delete mode 100644 log_test.go delete mode 100644 loggable.go delete mode 100644 metadata.go delete mode 100644 metadata_test.go rename oldlog.go => setup.go (87%) delete mode 100644 tracer/LICENSE delete mode 100644 tracer/context.go delete mode 100644 tracer/debug.go delete mode 100644 tracer/event.go delete mode 100644 tracer/propagation.go delete mode 100644 tracer/propagation_ot.go delete mode 100644 tracer/raw.go delete mode 100644 tracer/recorder.go delete mode 100644 tracer/recorder_test.go delete mode 100644 tracer/span.go delete mode 100644 tracer/tracer.go delete mode 100644 tracer/util.go delete mode 100644 tracer/wire/Makefile delete mode 100644 tracer/wire/carrier.go delete mode 100644 tracer/wire/carrier_test.go delete mode 100644 tracer/wire/gen.go delete mode 100644 tracer/wire/wire.pb.go delete mode 100644 tracer/wire/wire.proto delete mode 100644 writer/option.go delete mode 100644 writer/writer.go delete mode 100644 writer/writer_test.go diff --git a/README.md b/README.md index b0b5b9f..e734008 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,8 @@ > The logging library used by go-ipfs -It currently uses a modified version of [go-logging](https://github.com/whyrusleeping/go-logging) to implement the standard printf-style log output. +go-log wraps [zap](https://github.com/uber-go/zap) to provide a logging facade. go-log manages logging +instances and allows for their levels to be controlled individually. ## Install @@ -28,41 +29,27 @@ Once the package is imported under the name `logging`, an instance of `EventLogg var log = logging.Logger("subsystem name") ``` -It can then be used to emit log messages, either plain printf-style messages at six standard levels or structured messages using `Start`, `StartFromParentState`, `Finish` and `FinishWithErr` methods. +It can then be used to emit log messages in plain printf-style messages at seven standard levels: -## Example +Levels may be set for all loggers: ```go -func (s *Session) GetBlock(ctx context.Context, c *cid.Cid) (blk blocks.Block, err error) { - - // Starts Span called "Session.GetBlock", associates with `ctx` - ctx = log.Start(ctx, "Session.GetBlock") - - // defer so `blk` and `err` can be evaluated after call - defer func() { - // tag span associated with `ctx` - log.SetTags(ctx, map[string]interface{}{ - "cid": c, - "block", blk, - }) - // if err is non-nil tag the span with an error - log.FinishWithErr(ctx, err) - }() - - if shouldStartSomething() { - // log message on span associated with `ctx` - log.LogKV(ctx, "startSomething", true) - } - ... -} +lvl, err := logging.LevelFromString("error") + if err != nil { + panic(err) + } +logging.SetAllLoggers(lvl) ``` -## Tracing -`go-log` wraps the [opentracing-go](https://github.com/opentracing/opentracing-go) methods - `StartSpan`, `Finish`, `LogKV`, and `SetTag`. +or individually: -`go-log` implements its own tracer - `loggabletracer` - based on the [basictracer-go](https://github.com/opentracing/basictracer-go) implementation. If there is an active [`WriterGroup`](https://github.com/ipfs/go-log/blob/master/writer/option.go) the `loggabletracer` will [record](https://github.com/ipfs/go-log/blob/master/tracer/recorder.go) span data to the `WriterGroup`. An example of this can be seen in the [`log tail`](https://github.com/ipfs/go-ipfs/blob/master/core/commands/log.go) command of `go-ipfs`. - -Third party tracers may be used by calling `opentracing.SetGlobalTracer()` with your desired tracing implementation. An example of this can be seen using the [`go-jaeger-plugin`](https://github.com/ipfs/go-jaeger-plugin) and the `go-ipfs` [tracer plugin](https://github.com/ipfs/go-ipfs/blob/master/plugin/tracer.go) +```go +lvl, err := logging.LevelFromString("error") + if err != nil { + panic(err) + } +logging.SetLogLevel("foo", "info") +``` ## Contribute diff --git a/context.go b/context.go deleted file mode 100644 index b8ef5bc..0000000 --- a/context.go +++ /dev/null @@ -1,38 +0,0 @@ -package log - -import ( - "context" - "errors" -) - -type key int - -const metadataKey key = 0 - -// ContextWithLoggable returns a derived context which contains the provided -// Loggable. Any Events logged with the derived context will include the -// provided Loggable. -func ContextWithLoggable(ctx context.Context, l Loggable) context.Context { - existing, err := MetadataFromContext(ctx) - if err != nil { - // context does not contain meta. just set the new metadata - child := context.WithValue(ctx, metadataKey, Metadata(l.Loggable())) - return child - } - - merged := DeepMerge(existing, l.Loggable()) - child := context.WithValue(ctx, metadataKey, merged) - return child -} - -// MetadataFromContext extracts Matadata from a given context's value. -func MetadataFromContext(ctx context.Context) (Metadata, error) { - value := ctx.Value(metadataKey) - if value != nil { - metadata, ok := value.(Metadata) - if ok { - return metadata, nil - } - } - return nil, errors.New("context contains no metadata") -} diff --git a/context_test.go b/context_test.go deleted file mode 100644 index e47982a..0000000 --- a/context_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package log - -import ( - "context" - "testing" -) - -func TestContextContainsMetadata(t *testing.T) { - t.Parallel() - - m := Metadata{"foo": "bar"} - ctx := ContextWithLoggable(context.Background(), m) - got, err := MetadataFromContext(ctx) - if err != nil { - t.Fatal(err) - } - - _, exists := got["foo"] - if !exists { - t.Fail() - } -} - -func TestContextWithPreexistingMetadata(t *testing.T) { - t.Parallel() - - ctx := ContextWithLoggable(context.Background(), Metadata{"hello": "world"}) - ctx = ContextWithLoggable(ctx, Metadata{"goodbye": "earth"}) - - got, err := MetadataFromContext(ctx) - if err != nil { - t.Fatal(err) - } - - _, exists := got["hello"] - if !exists { - t.Fatal("original key not present") - } - _, exists = got["goodbye"] - if !exists { - t.Fatal("new key not present") - } -} diff --git a/entry.go b/entry.go deleted file mode 100644 index 63c0213..0000000 --- a/entry.go +++ /dev/null @@ -1,7 +0,0 @@ -package log - -type entry struct { - loggables []Loggable - system string - event string -} diff --git a/example_test.go b/example_test.go deleted file mode 100644 index 74d7bae..0000000 --- a/example_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package log - -import "context" - -func ExampleEventLogger() { - { - log := EventLogger(nil) - e := log.EventBegin(context.Background(), "dial") - e.Done() - } - { - log := EventLogger(nil) - e := log.EventBegin(context.Background(), "dial") - _ = e.Close() // implements io.Closer for convenience - } -} diff --git a/log.go b/log.go index 45d4333..02eb196 100644 --- a/log.go +++ b/log.go @@ -4,17 +4,8 @@ package log import ( - "bytes" - "context" - "encoding/json" - "path" - "runtime" "time" - writer "github.com/ipfs/go-log/writer" - - opentrace "github.com/opentracing/opentracing-go" - otExt "github.com/opentracing/opentracing-go/ext" "go.uber.org/zap" ) @@ -33,9 +24,6 @@ type StandardLogger interface { Infof(format string, args ...interface{}) Panic(args ...interface{}) Panicf(format string, args ...interface{}) - // Deprecated use Warn - Warning(args ...interface{}) - // Deprecated use Warnf Warningf(format string, args ...interface{}) Warn(args ...interface{}) Warnf(format string, args ...interface{}) @@ -45,122 +33,8 @@ type StandardLogger interface { // containing structured metadata type EventLogger interface { StandardLogger - - // Event merges structured data from the provided inputs into a single - // machine-readable log event. - // - // If the context contains metadata, a copy of this is used as the base - // metadata accumulator. - // - // If one or more loggable objects are provided, these are deep-merged into base blob. - // - // Next, the event name is added to the blob under the key "event". If - // the key "event" already exists, it will be over-written. - // - // Finally the timestamp and package name are added to the accumulator and - // the metadata is logged. - // DEPRECATED - // Deprecated: Stop using go-log for event logging - Event(ctx context.Context, event string, m ...Loggable) - - // DEPRECATED - // Deprecated: Stop using go-log for event logging - EventBegin(ctx context.Context, event string, m ...Loggable) *EventInProgress - - // Start starts an opentracing span with `name`, using - // any Span found within `ctx` as a ChildOfRef. If no such parent could be - // found, Start creates a root (parentless) Span. - // - // The return value is a context.Context object built around the - // returned Span. - // - // Example usage: - // - // SomeFunction(ctx context.Context, ...) { - // ctx := log.Start(ctx, "SomeFunction") - // defer log.Finish(ctx) - // ... - // } - // Deprecated: Stop using go-log for event logging - Start(ctx context.Context, name string) context.Context - - // StartFromParentState starts an opentracing span with `name`, using - // any Span found within `ctx` as a ChildOfRef. If no such parent could be - // found, StartSpanFromParentState creates a root (parentless) Span. - // - // StartFromParentState will attempt to deserialize a SpanContext from `parent`, - // using any Span found within to continue the trace - // - // The return value is a context.Context object built around the - // returned Span. - // - // An error is returned when `parent` cannot be deserialized to a SpanContext - // - // Example usage: - // - // SomeFunction(ctx context.Context, bParent []byte) { - // ctx := log.StartFromParentState(ctx, "SomeFunction", bParent) - // defer log.Finish(ctx) - // ... - // } - // Deprecated: Stop using go-log for event logging - StartFromParentState(ctx context.Context, name string, parent []byte) (context.Context, error) - - // Finish completes the span associated with `ctx`. - // - // Finish() must be the last call made to any span instance, and to do - // otherwise leads to undefined behavior. - // Finish will do its best to notify (log) when used in correctly - // .e.g called twice, or called on a spanless `ctx` - // Deprecated: Stop using go-log for event logging - Finish(ctx context.Context) - - // FinishWithErr completes the span associated with `ctx` and also calls - // SetErr if `err` is non-nil - // - // FinishWithErr() must be the last call made to any span instance, and to do - // otherwise leads to undefined behavior. - // FinishWithErr will do its best to notify (log) when used in correctly - // .e.g called twice, or called on a spanless `ctx` - // Deprecated: Stop using go-log for event logging - FinishWithErr(ctx context.Context, err error) - - // SetErr tags the span associated with `ctx` to reflect an error occured, and - // logs the value `err` under key `error`. - // Deprecated: Stop using go-log for event logging - SetErr(ctx context.Context, err error) - - // LogKV records key:value logging data about an event stored in `ctx` - // Eexample: - // log.LogKV( - // "error", "resolve failure", - // "type", "cache timeout", - // "waited.millis", 1500) - // Deprecated: Stop using go-log for event logging - LogKV(ctx context.Context, alternatingKeyValues ...interface{}) - - // SetTag tags key `k` and value `v` on the span associated with `ctx` - // Deprecated: Stop using go-log for event logging - SetTag(ctx context.Context, key string, value interface{}) - - // SetTags tags keys from the `tags` maps on the span associated with `ctx` - // Example: - // log.SetTags(ctx, map[string]{ - // "type": bizStruct, - // "request": req, - // }) - // Deprecated: Stop using go-log for event logging - SetTags(ctx context.Context, tags map[string]interface{}) - - // SerializeContext takes the SpanContext instance stored in `ctx` and Seralizes - // it to bytes. An error is returned if the `ctx` cannot be serialized to - // a bytes array - // Deprecated: Stop using go-log for event logging - SerializeContext(ctx context.Context) ([]byte, error) } -var _ EventLogger = Logger("test-logger") - // Logger retrieves an event logger by name func Logger(system string) *ZapEventLogger { if len(system) == 0 { @@ -177,256 +51,7 @@ func Logger(system string) *ZapEventLogger { // ZapEventLogger implements the EventLogger and wraps a go-logging Logger type ZapEventLogger struct { zap.SugaredLogger - system string - // TODO add log-level -} - -// Deprecated: use Warn -func (el *ZapEventLogger) Warning(args ...interface{}) { - el.Warn(args...) -} - -// Deprecated: use Warnf -func (el *ZapEventLogger) Warningf(format string, args ...interface{}) { - el.Warnf(format, args...) -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) Start(ctx context.Context, operationName string) context.Context { - span, ctx := opentrace.StartSpanFromContext(ctx, operationName) - span.SetTag("system", el.system) - return ctx -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) StartFromParentState(ctx context.Context, operationName string, parent []byte) (context.Context, error) { - sc, err := deserializeContext(parent) - if err != nil { - return nil, err - } - - //TODO RPCServerOption is probably not the best tag, as this is likely from a peer - span, ctx := opentrace.StartSpanFromContext(ctx, operationName, otExt.RPCServerOption(sc)) - span.SetTag("system", el.system) - return ctx, nil -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) SerializeContext(ctx context.Context) ([]byte, error) { - gTracer := opentrace.GlobalTracer() - b := make([]byte, 0) - carrier := bytes.NewBuffer(b) - span := opentrace.SpanFromContext(ctx) - if err := gTracer.Inject(span.Context(), opentrace.Binary, carrier); err != nil { - return nil, err - } - return carrier.Bytes(), nil -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) LogKV(ctx context.Context, alternatingKeyValues ...interface{}) { - span := opentrace.SpanFromContext(ctx) - if span == nil { - _, file, line, _ := runtime.Caller(1) - log.Errorf("LogKV with no Span in context called on %s:%d", path.Base(file), line) - return - } - span.LogKV(alternatingKeyValues...) -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) SetTag(ctx context.Context, k string, v interface{}) { - span := opentrace.SpanFromContext(ctx) - if span == nil { - _, file, line, _ := runtime.Caller(1) - log.Errorf("SetTag with no Span in context called on %s:%d", path.Base(file), line) - return - } - span.SetTag(k, v) -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) SetTags(ctx context.Context, tags map[string]interface{}) { - span := opentrace.SpanFromContext(ctx) - if span == nil { - _, file, line, _ := runtime.Caller(1) - log.Errorf("SetTags with no Span in context called on %s:%d", path.Base(file), line) - return - } - for k, v := range tags { - span.SetTag(k, v) - } -} - -func (el *ZapEventLogger) setErr(ctx context.Context, err error, skip int) { - span := opentrace.SpanFromContext(ctx) - if span == nil { - _, file, line, _ := runtime.Caller(skip) - log.Errorf("SetErr with no Span in context called on %s:%d", path.Base(file), line) - return - } - if err == nil { - return - } - - otExt.Error.Set(span, true) - span.LogKV("error", err.Error()) -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) SetErr(ctx context.Context, err error) { - el.setErr(ctx, err, 1) -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) Finish(ctx context.Context) { - span := opentrace.SpanFromContext(ctx) - if span == nil { - _, file, line, _ := runtime.Caller(1) - log.Errorf("Finish with no Span in context called on %s:%d", path.Base(file), line) - return - } - span.Finish() -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) FinishWithErr(ctx context.Context, err error) { - el.setErr(ctx, err, 2) - el.Finish(ctx) -} - -func deserializeContext(bCtx []byte) (opentrace.SpanContext, error) { - gTracer := opentrace.GlobalTracer() - carrier := bytes.NewReader(bCtx) - spanContext, err := gTracer.Extract(opentrace.Binary, carrier) - if err != nil { - log.Warning("Failed to deserialize context %s", err) - return nil, err - } - return spanContext, nil -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) EventBegin(ctx context.Context, event string, metadata ...Loggable) *EventInProgress { - ctx = el.Start(ctx, event) - - for _, m := range metadata { - for l, v := range m.Loggable() { - el.LogKV(ctx, l, v) - } - } - - eip := &EventInProgress{} - eip.doneFunc = func(additional []Loggable) { - // anything added during the operation - // e.g. deprecated methods event.Append(...) or event.SetError(...) - for _, m := range eip.loggables { - for l, v := range m.Loggable() { - el.LogKV(ctx, l, v) - } - } - el.Finish(ctx) - } - return eip -} - -// Deprecated: Stop using go-log for event logging -func (el *ZapEventLogger) Event(ctx context.Context, event string, metadata ...Loggable) { - - // short circuit if theres nothing to write to - if !writer.WriterGroup.Active() { - return - } - - // Collect loggables for later logging - var loggables []Loggable - - // get any existing metadata from the context - existing, err := MetadataFromContext(ctx) - if err != nil { - existing = Metadata{} - } - loggables = append(loggables, existing) - - for _, datum := range metadata { - loggables = append(loggables, datum) - } - - e := entry{ - loggables: loggables, - system: el.system, - event: event, - } - - accum := Metadata{} - for _, loggable := range e.loggables { - accum = DeepMerge(accum, loggable.Loggable()) - } - - // apply final attributes to reserved keys - // TODO accum["level"] = level - accum["event"] = e.event - accum["system"] = e.system - accum["time"] = FormatRFC3339(time.Now()) - - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetEscapeHTML(false) - err = encoder.Encode(accum) - if err != nil { - el.Errorf("ERROR FORMATTING EVENT ENTRY: %s", err) - return - } - - writer.WriterGroup.Write(buf.Bytes()) -} - -// DEPRECATED -// EventInProgress represent and event which is happening -// Deprecated: Stop using go-log for event logging -type EventInProgress struct { - loggables []Loggable - doneFunc func([]Loggable) -} - -// DEPRECATED use `LogKV` or `SetTag` -// Append adds loggables to be included in the call to Done -func (eip *EventInProgress) Append(l Loggable) { - eip.loggables = append(eip.loggables, l) -} - -// DEPRECATED use `SetError(ctx, error)` -// SetError includes the provided error -func (eip *EventInProgress) SetError(err error) { - eip.loggables = append(eip.loggables, LoggableMap{ - "error": err.Error(), - }) -} - -// Done creates a new Event entry that includes the duration and appended -// loggables. -// Deprecated: Stop using go-log for event logging -func (eip *EventInProgress) Done() { - eip.doneFunc(eip.loggables) // create final event with extra data -} - -// DEPRECATED use `FinishWithErr` -// DoneWithErr creates a new Event entry that includes the duration and appended -// loggables. DoneWithErr accepts an error, if err is non-nil, it is set on -// the EventInProgress. Otherwise the logic is the same as the `Done()` method -func (eip *EventInProgress) DoneWithErr(err error) { - if err != nil { - eip.SetError(err) - } - eip.doneFunc(eip.loggables) -} - -// Close is an alias for done -// Deprecated: Stop using go-log for event logging -func (eip *EventInProgress) Close() error { - eip.Done() - return nil } // FormatRFC3339 returns the given time in UTC with RFC3999Nano format. diff --git a/log_test.go b/log_test.go deleted file mode 100644 index 0ddfc8c..0000000 --- a/log_test.go +++ /dev/null @@ -1,449 +0,0 @@ -package log - -import ( - "context" - "encoding/json" - "errors" - "io" - "testing" - - tracer "github.com/ipfs/go-log/tracer" - writer "github.com/ipfs/go-log/writer" -) - -func assertEqual(t *testing.T, expected interface{}, actual interface{}) { - if expected != actual { - t.Fatalf("%s != %s", expected, actual) - } -} - -func assertNotZero(t *testing.T, a interface{}) { - if a == 0 { - t.Fatalf("%s = 0", a) - } -} - -// Test add remove writer -func TestChangeWriter(t *testing.T) { - - if writer.WriterGroup.Active() { - panic("here") - } - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event - ctx1 := lgr.Start(ctx, "event1") - - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - - ctx2 := lgr.Start(ctx1, "event2") - - lgr.Finish(ctx2) - lgr.Finish(ctx1) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder := json.NewDecoder(lgs) - evtDecoder.Decode(&ls) - - // event name and system should be - assertEqual(t, "event2", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.TraceID) - assertNotZero(t, ls.SpanID) -} - -func TestSingleEvent(t *testing.T) { - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event - ctx = lgr.Start(ctx, "event1") - - // finish the event - lgr.Finish(ctx) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder := json.NewDecoder(lgs) - evtDecoder.Decode(&ls) - - // event name and system should be - assertEqual(t, "event1", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.TraceID) - assertNotZero(t, ls.SpanID) -} - -func TestSingleEventWithErr(t *testing.T) { - - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event - ctx = lgr.Start(ctx, "event1") - - // finish the event - lgr.FinishWithErr(ctx, errors.New("rawer im an error")) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder := json.NewDecoder(lgs) - evtDecoder.Decode(&ls) - - // event name and system should be - assertEqual(t, "event1", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - assertEqual(t, true, ls.Tags["error"]) - assertEqual(t, ls.Logs[0].Field[0].Value, "rawer im an error") - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.TraceID) - assertNotZero(t, ls.SpanID) -} - -func TestEventWithTag(t *testing.T) { - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event - ctx = lgr.Start(ctx, "event1") - lgr.SetTag(ctx, "tk", "tv") - - // finish the event - lgr.Finish(ctx) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder := json.NewDecoder(lgs) - evtDecoder.Decode(&ls) - - // event name and system should be - assertEqual(t, "event1", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - assertEqual(t, "tv", ls.Tags["tk"]) - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.TraceID) - assertNotZero(t, ls.SpanID) -} - -func TestEventWithTags(t *testing.T) { - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event - ctx = lgr.Start(ctx, "event1") - lgr.SetTags(ctx, map[string]interface{}{ - "tk1": "tv1", - "tk2": "tv2", - }) - - // finish the event - lgr.Finish(ctx) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder := json.NewDecoder(lgs) - evtDecoder.Decode(&ls) - - // event name and system should be - assertEqual(t, "event1", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - assertEqual(t, "tv1", ls.Tags["tk1"]) - assertEqual(t, "tv2", ls.Tags["tk2"]) - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.TraceID) - assertNotZero(t, ls.SpanID) -} - -func TestEventWithLogs(t *testing.T) { - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event - ctx = lgr.Start(ctx, "event1") - lgr.LogKV(ctx, "log1", "logv1", "log2", "logv2") - lgr.LogKV(ctx, "treeLog", []string{"Pine", "Juniper", "Spruce", "Ginkgo"}) - - // finish the event - lgr.Finish(ctx) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder := json.NewDecoder(lgs) - evtDecoder.Decode(&ls) - - // event name and system should be - assertEqual(t, "event1", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - - assertEqual(t, "log1", ls.Logs[0].Field[0].Key) - assertEqual(t, "logv1", ls.Logs[0].Field[0].Value) - assertEqual(t, "log2", ls.Logs[0].Field[1].Key) - assertEqual(t, "logv2", ls.Logs[0].Field[1].Value) - - // Should be a differnt log (different timestamp) - assertEqual(t, "treeLog", ls.Logs[1].Field[0].Key) - assertEqual(t, "[Pine Juniper Spruce Ginkgo]", ls.Logs[1].Field[0].Value) - - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.TraceID) - assertNotZero(t, ls.SpanID) -} - -func TestMultiEvent(t *testing.T) { - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - evtDecoder := json.NewDecoder(lgs) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - ctx = lgr.Start(ctx, "root") - - doEvent(ctx, "e1", lgr) - doEvent(ctx, "e2", lgr) - - lgr.Finish(ctx) - - e1 := getEvent(evtDecoder) - assertEqual(t, "e1", e1.Operation) - assertEqual(t, "test", e1.Tags["system"]) - assertNotZero(t, e1.Duration) - assertNotZero(t, e1.Start) - - // I hope your clocks work... - e2 := getEvent(evtDecoder) - assertEqual(t, "e2", e2.Operation) - assertEqual(t, "test", e2.Tags["system"]) - assertNotZero(t, e2.Duration) - assertEqual(t, e1.TraceID, e2.TraceID) - - er := getEvent(evtDecoder) - assertEqual(t, "root", er.Operation) - assertEqual(t, "test", er.Tags["system"]) - assertNotZero(t, er.Start) - assertNotZero(t, er.TraceID) - assertNotZero(t, er.SpanID) - -} - -func TestEventSerialization(t *testing.T) { - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - evtDecoder := json.NewDecoder(lgs) - - // create a logger - lgr := Logger("test") - - // start an event - sndctx := lgr.Start(context.Background(), "send") - - // **imagine** that we are putting `bc` (byte context) into a protobuf message - // and send the message to another peer on the network - bc, err := lgr.SerializeContext(sndctx) - if err != nil { - t.Fatal(err) - } - - // now **imagine** some peer getting a protobuf message and extracting - // `bc` from the message to continue the operation - rcvctx, err := lgr.StartFromParentState(context.Background(), "recv", bc) - if err != nil { - t.Fatal(err) - } - - // at some point the sender completes their operation - lgr.Finish(sndctx) - e := getEvent(evtDecoder) - assertEqual(t, "send", e.Operation) - assertEqual(t, "test", e.Tags["system"]) - assertNotZero(t, e.Start) - assertNotZero(t, e.Start) - - // and then the receiver finishes theirs - lgr.Finish(rcvctx) - e = getEvent(evtDecoder) - assertEqual(t, "recv", e.Operation) - assertEqual(t, "test", e.Tags["system"]) - assertNotZero(t, e.Start) - assertNotZero(t, e.Duration) - assertNotZero(t, e.TraceID) - assertNotZero(t, e.SpanID) - -} - -func doEvent(ctx context.Context, name string, el EventLogger) context.Context { - ctx = el.Start(ctx, name) - defer func() { - el.Finish(ctx) - }() - return ctx -} - -func getEvent(ed *json.Decoder) tracer.LoggableSpan { - // decode the log event - var ls tracer.LoggableSpan - ed.Decode(&ls) - return ls -} - -// DEPRECATED methods tested below -func TestEventBegin(t *testing.T) { - - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - evtDecoder := json.NewDecoder(lgs) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event in progress with metadata - eip := lgr.EventBegin(ctx, "event", LoggableMap{"key": "val"}) - - // append more metadata - eip.Append(LoggableMap{"foo": "bar"}) - - // set an error - eip.SetError(errors.New("gerrr im an error")) - - // finish the event - eip.Done() - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder.Decode(&ls) - - assertEqual(t, "event", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - assertEqual(t, ls.Logs[0].Field[0].Value, "val") - assertEqual(t, ls.Logs[1].Field[0].Value, "bar") - assertEqual(t, ls.Logs[2].Field[0].Value, "gerrr im an error") - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.SpanID) - assertNotZero(t, ls.TraceID) -} - -func TestEventBeginWithErr(t *testing.T) { - - // Set up a pipe to use as backend and log stream - lgs, lgb := io.Pipe() - // event logs will be written to lgb - // event logs will be read from lgs - writer.WriterGroup.AddWriter(lgb) - evtDecoder := json.NewDecoder(lgs) - - // create a logger - lgr := Logger("test") - - // create a root context - ctx := context.Background() - - // start an event in progress with metadata - eip := lgr.EventBegin(ctx, "event", LoggableMap{"key": "val"}) - - // append more metadata - eip.Append(LoggableMap{"foo": "bar"}) - - // finish the event with an error - eip.DoneWithErr(errors.New("gerrr im an error")) - - // decode the log event - var ls tracer.LoggableSpan - evtDecoder.Decode(&ls) - - assertEqual(t, "event", ls.Operation) - assertEqual(t, "test", ls.Tags["system"]) - assertEqual(t, ls.Logs[0].Field[0].Value, "val") - assertEqual(t, ls.Logs[1].Field[0].Value, "bar") - assertEqual(t, ls.Logs[2].Field[0].Value, "gerrr im an error") - // greater than zero should work for now - assertNotZero(t, ls.Duration) - assertNotZero(t, ls.Start) - assertNotZero(t, ls.SpanID) - assertNotZero(t, ls.TraceID) -} diff --git a/loggable.go b/loggable.go deleted file mode 100644 index f4edb26..0000000 --- a/loggable.go +++ /dev/null @@ -1,42 +0,0 @@ -package log - -// Loggable describes objects that can be marshalled into Metadata for logging -type Loggable interface { - Loggable() map[string]interface{} -} - -// LoggableMap is just a generic map keyed by string. It -// implements the Loggable interface. -type LoggableMap map[string]interface{} - -// Loggable implements the Loggable interface for LoggableMap -func (l LoggableMap) Loggable() map[string]interface{} { - return l -} - -// LoggableF converts a func into a Loggable -type LoggableF func() map[string]interface{} - -// Loggable implements the Loggable interface by running -// the LoggableF function. -func (l LoggableF) Loggable() map[string]interface{} { - return l() -} - -// Deferred returns a LoggableF where the execution of the -// provided function is deferred. -func Deferred(key string, f func() string) Loggable { - function := func() map[string]interface{} { - return map[string]interface{}{ - key: f(), - } - } - return LoggableF(function) -} - -// Pair returns a Loggable where key is paired to Loggable. -func Pair(key string, l Loggable) Loggable { - return LoggableMap{ - key: l, - } -} diff --git a/metadata.go b/metadata.go deleted file mode 100644 index 07947b5..0000000 --- a/metadata.go +++ /dev/null @@ -1,77 +0,0 @@ -package log - -import ( - "encoding/json" - "errors" - "reflect" -) - -// Metadata is a convenience type for generic maps -type Metadata map[string]interface{} - -// DeepMerge merges the second Metadata parameter into the first. -// Nested Metadata are merged recursively. Primitives are over-written. -func DeepMerge(b, a Metadata) Metadata { - out := Metadata{} - for k, v := range b { - out[k] = v - } - for k, v := range a { - - maybe, err := Metadatify(v) - if err != nil { - // if the new value is not meta. just overwrite the dest vaue - if out[k] != nil { - log.Debugf("Overwriting key: %s, old: %s, new: %s", k, out[k], v) - } - out[k] = v - continue - } - - // it is meta. What about dest? - outv, exists := out[k] - if !exists { - // the new value is meta, but there's no dest value. just write it - out[k] = v - continue - } - - outMetadataValue, err := Metadatify(outv) - if err != nil { - // the new value is meta and there's a dest value, but the dest - // value isn't meta. just overwrite - out[k] = v - continue - } - - // both are meta. merge them. - out[k] = DeepMerge(outMetadataValue, maybe) - } - return out -} - -// Loggable implements the Loggable interface. -func (m Metadata) Loggable() map[string]interface{} { - // NB: method defined on value to avoid de-referencing nil Metadata - return m -} - -// JsonString returns the marshaled JSON string for the metadata. -func (m Metadata) JsonString() (string, error) { - // NB: method defined on value - b, err := json.Marshal(m) - return string(b), err -} - -// Metadatify converts maps into Metadata. -func Metadatify(i interface{}) (Metadata, error) { - value := reflect.ValueOf(i) - if value.Kind() == reflect.Map { - m := map[string]interface{}{} - for _, k := range value.MapKeys() { - m[k.String()] = value.MapIndex(k).Interface() - } - return Metadata(m), nil - } - return nil, errors.New("is not a map") -} diff --git a/metadata_test.go b/metadata_test.go deleted file mode 100644 index c181451..0000000 --- a/metadata_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package log - -import "testing" - -func TestOverwrite(t *testing.T) { - t.Parallel() - - under := Metadata{ - "a": Metadata{ - "b": Metadata{ - "c": Metadata{ - "d": "the original value", - "other": "SURVIVE", - }, - }, - }, - } - over := Metadata{ - "a": Metadata{ - "b": Metadata{ - "c": Metadata{ - "d": "a new value", - }, - }, - }, - } - - out := DeepMerge(under, over) - - dval := out["a"].(Metadata)["b"].(Metadata)["c"].(Metadata)["d"].(string) - if dval != "a new value" { - t.Fatal(dval) - } - surv := out["a"].(Metadata)["b"].(Metadata)["c"].(Metadata)["other"].(string) - if surv != "SURVIVE" { - t.Fatal(surv) - } -} - -func TestMarshalJSON(t *testing.T) { - t.Parallel() - bs, _ := Metadata{"a": "b"}.JsonString() - t.Log(bs) -} - -func TestMetadataIsLoggable(t *testing.T) { - t.Parallel() - func(l Loggable) { - }(Metadata{}) -} diff --git a/oldlog.go b/setup.go similarity index 87% rename from oldlog.go rename to setup.go index 42ff121..adce3a4 100644 --- a/oldlog.go +++ b/setup.go @@ -7,11 +7,6 @@ import ( "regexp" "sync" - tracer "github.com/ipfs/go-log/tracer" - lwriter "github.com/ipfs/go-log/writer" - - opentrace "github.com/opentracing/opentracing-go" - "go.uber.org/zap" "go.uber.org/zap/zapcore" ) @@ -94,22 +89,7 @@ func SetupLogging() { } zapCfg.Level.SetLevel(zapcore.Level(lvl)) - // TracerPlugins are instantiated after this, so use loggable tracer - // by default, if a TracerPlugin is added it will override this - lgblRecorder := tracer.NewLoggableRecorder() - lgblTracer := tracer.New(lgblRecorder) - opentrace.SetGlobalTracer(lgblTracer) - SetAllLoggers(lvl) - - if tracingfp := os.Getenv(envTracingFile); len(tracingfp) > 0 { - f, err := os.Create(tracingfp) - if err != nil { - log.Error("failed to create tracing file: %s", tracingfp) - } else { - lwriter.WriterGroup.AddWriter(f) - } - } } // SetDebugLogging calls SetAllLoggers with logging.DEBUG diff --git a/tracer/LICENSE b/tracer/LICENSE deleted file mode 100644 index 148509a..0000000 --- a/tracer/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2016 The OpenTracing Authors - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/tracer/context.go b/tracer/context.go deleted file mode 100644 index f1ebf61..0000000 --- a/tracer/context.go +++ /dev/null @@ -1,42 +0,0 @@ -package loggabletracer - -// SpanContext holds the basic Span metadata. -type SpanContext struct { - // A probabilistically unique identifier for a [multi-span] trace. - TraceID uint64 - - // A probabilistically unique identifier for a span. - SpanID uint64 - - // Whether the trace is sampled. - Sampled bool - - // The span's associated baggage. - Baggage map[string]string // initialized on first use -} - -// ForeachBaggageItem belongs to the opentracing.SpanContext interface -func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { - for k, v := range c.Baggage { - if !handler(k, v) { - break - } - } -} - -// WithBaggageItem returns an entirely new loggabletracer SpanContext with the -// given key:value baggage pair set. -func (c SpanContext) WithBaggageItem(key, val string) SpanContext { - var newBaggage map[string]string - if c.Baggage == nil { - newBaggage = map[string]string{key: val} - } else { - newBaggage = make(map[string]string, len(c.Baggage)+1) - for k, v := range c.Baggage { - newBaggage[k] = v - } - newBaggage[key] = val - } - // Use positional parameters so the compiler will help catch new fields. - return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage} -} diff --git a/tracer/debug.go b/tracer/debug.go deleted file mode 100644 index 8c302b3..0000000 --- a/tracer/debug.go +++ /dev/null @@ -1,78 +0,0 @@ -package loggabletracer - -import ( - "bytes" - "fmt" - "runtime" - "strconv" - "sync" -) - -const debugGoroutineIDTag = "_initial_goroutine" - -type errAssertionFailed struct { - span *spanImpl - msg string -} - -// Error implements the error interface. -func (err *errAssertionFailed) Error() string { - return fmt.Sprintf("%s:\n%+v", err.msg, err.span) -} - -func (s *spanImpl) Lock() { - s.Mutex.Lock() - s.maybeAssertSanityLocked() -} - -func (s *spanImpl) maybeAssertSanityLocked() { - if s.tracer == nil { - s.Mutex.Unlock() - panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"}) - } - if s.tracer.options.DebugAssertSingleGoroutine { - startID := curGoroutineID() - curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64) - if !ok { - // This is likely invoked in the context of the SetTag which sets - // debugGoroutineTag. - return - } - if startID != curID { - s.Mutex.Unlock() - panic(&errAssertionFailed{ - span: s, - msg: fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID), - }) - } - } -} - -var goroutineSpace = []byte("goroutine ") -var littleBuf = sync.Pool{ - New: func() interface{} { - buf := make([]byte, 64) - return &buf - }, -} - -// Credit to @bradfitz: -// https://github.com/golang/net/blob/master/http2/gotrack.go#L51 -func curGoroutineID() uint64 { - bp := littleBuf.Get().(*[]byte) - defer littleBuf.Put(bp) - b := *bp - b = b[:runtime.Stack(b, false)] - // Parse the 4707 out of "goroutine 4707 [" - b = bytes.TrimPrefix(b, goroutineSpace) - i := bytes.IndexByte(b, ' ') - if i < 0 { - panic(fmt.Sprintf("No space found in %q", b)) - } - b = b[:i] - n, err := strconv.ParseUint(string(b), 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) - } - return n -} diff --git a/tracer/event.go b/tracer/event.go deleted file mode 100644 index 9dbcb76..0000000 --- a/tracer/event.go +++ /dev/null @@ -1,62 +0,0 @@ -package loggabletracer - -import "github.com/opentracing/opentracing-go" - -// A SpanEvent is emitted when a mutating command is called on a Span. -type SpanEvent interface{} - -// EventCreate is emitted when a Span is created. -type EventCreate struct{ OperationName string } - -// EventTag is received when SetTag is called. -type EventTag struct { - Key string - Value interface{} -} - -// EventBaggage is received when SetBaggageItem is called. -type EventBaggage struct { - Key, Value string -} - -// EventLogFields is received when LogFields or LogKV is called. -type EventLogFields opentracing.LogRecord - -// EventLog is received when Log (or one of its derivatives) is called. -// -// DEPRECATED -type EventLog opentracing.LogData - -// EventFinish is received when Finish is called. -type EventFinish RawSpan - -func (s *spanImpl) onCreate(opName string) { - if s.event != nil { - s.event(EventCreate{OperationName: opName}) - } -} -func (s *spanImpl) onTag(key string, value interface{}) { - if s.event != nil { - s.event(EventTag{Key: key, Value: value}) - } -} -func (s *spanImpl) onLog(ld opentracing.LogData) { - if s.event != nil { - s.event(EventLog(ld)) - } -} -func (s *spanImpl) onLogFields(lr opentracing.LogRecord) { - if s.event != nil { - s.event(EventLogFields(lr)) - } -} -func (s *spanImpl) onBaggage(key, value string) { - if s.event != nil { - s.event(EventBaggage{Key: key, Value: value}) - } -} -func (s *spanImpl) onFinish(sp RawSpan) { - if s.event != nil { - s.event(EventFinish(sp)) - } -} diff --git a/tracer/propagation.go b/tracer/propagation.go deleted file mode 100644 index bb21065..0000000 --- a/tracer/propagation.go +++ /dev/null @@ -1,61 +0,0 @@ -package loggabletracer - -import opentracing "github.com/opentracing/opentracing-go" - -type accessorPropagator struct { - tracer *LoggableTracer -} - -// DelegatingCarrier is a flexible carrier interface which can be implemented -// by types which have a means of storing the trace metadata and already know -// how to serialize themselves (for example, protocol buffers). -type DelegatingCarrier interface { - SetState(traceID, spanID uint64, sampled bool) - State() (traceID, spanID uint64, sampled bool) - SetBaggageItem(key, value string) - GetBaggage(func(key, value string)) -} - -func (p *accessorPropagator) Inject( - spanContext opentracing.SpanContext, - carrier interface{}, -) error { - dc, ok := carrier.(DelegatingCarrier) - if !ok || dc == nil { - return opentracing.ErrInvalidCarrier - } - sc, ok := spanContext.(SpanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - dc.SetState(sc.TraceID, sc.SpanID, sc.Sampled) - for k, v := range sc.Baggage { - dc.SetBaggageItem(k, v) - } - return nil -} - -func (p *accessorPropagator) Extract( - carrier interface{}, -) (opentracing.SpanContext, error) { - dc, ok := carrier.(DelegatingCarrier) - if !ok || dc == nil { - return nil, opentracing.ErrInvalidCarrier - } - - traceID, spanID, sampled := dc.State() - sc := SpanContext{ - TraceID: traceID, - SpanID: spanID, - Sampled: sampled, - Baggage: nil, - } - dc.GetBaggage(func(k, v string) { - if sc.Baggage == nil { - sc.Baggage = map[string]string{} - } - sc.Baggage[k] = v - }) - - return sc, nil -} diff --git a/tracer/propagation_ot.go b/tracer/propagation_ot.go deleted file mode 100644 index 4a4782e..0000000 --- a/tracer/propagation_ot.go +++ /dev/null @@ -1,180 +0,0 @@ -package loggabletracer - -import ( - "encoding/binary" - "io" - "strconv" - "strings" - - "github.com/gogo/protobuf/proto" - "github.com/ipfs/go-log/tracer/wire" - opentracing "github.com/opentracing/opentracing-go" -) - -type textMapPropagator struct { - tracer *LoggableTracer -} -type binaryPropagator struct { - tracer *LoggableTracer -} - -const ( - prefixTracerState = "ot-tracer-" - prefixBaggage = "ot-baggage-" - - tracerStateFieldCount = 3 - fieldNameTraceID = prefixTracerState + "traceid" - fieldNameSpanID = prefixTracerState + "spanid" - fieldNameSampled = prefixTracerState + "sampled" -) - -func (p *textMapPropagator) Inject( - spanContext opentracing.SpanContext, - opaqueCarrier interface{}, -) error { - sc, ok := spanContext.(SpanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - carrier, ok := opaqueCarrier.(opentracing.TextMapWriter) - if !ok { - return opentracing.ErrInvalidCarrier - } - carrier.Set(fieldNameTraceID, strconv.FormatUint(sc.TraceID, 16)) - carrier.Set(fieldNameSpanID, strconv.FormatUint(sc.SpanID, 16)) - carrier.Set(fieldNameSampled, strconv.FormatBool(sc.Sampled)) - - for k, v := range sc.Baggage { - carrier.Set(prefixBaggage+k, v) - } - return nil -} - -func (p *textMapPropagator) Extract( - opaqueCarrier interface{}, -) (opentracing.SpanContext, error) { - carrier, ok := opaqueCarrier.(opentracing.TextMapReader) - if !ok { - return nil, opentracing.ErrInvalidCarrier - } - requiredFieldCount := 0 - var traceID, spanID uint64 - var sampled bool - var err error - decodedBaggage := make(map[string]string) - err = carrier.ForeachKey(func(k, v string) error { - switch strings.ToLower(k) { - case fieldNameTraceID: - traceID, err = strconv.ParseUint(v, 16, 64) - if err != nil { - return opentracing.ErrSpanContextCorrupted - } - case fieldNameSpanID: - spanID, err = strconv.ParseUint(v, 16, 64) - if err != nil { - return opentracing.ErrSpanContextCorrupted - } - case fieldNameSampled: - sampled, err = strconv.ParseBool(v) - if err != nil { - return opentracing.ErrSpanContextCorrupted - } - default: - lowercaseK := strings.ToLower(k) - if strings.HasPrefix(lowercaseK, prefixBaggage) { - decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v - } - // Balance off the requiredFieldCount++ just below... - requiredFieldCount-- - } - requiredFieldCount++ - return nil - }) - if err != nil { - return nil, err - } - if requiredFieldCount < tracerStateFieldCount { - if requiredFieldCount == 0 { - return nil, opentracing.ErrSpanContextNotFound - } - return nil, opentracing.ErrSpanContextCorrupted - } - - return SpanContext{ - TraceID: traceID, - SpanID: spanID, - Sampled: sampled, - Baggage: decodedBaggage, - }, nil -} - -func (p *binaryPropagator) Inject( - spanContext opentracing.SpanContext, - opaqueCarrier interface{}, -) error { - sc, ok := spanContext.(SpanContext) - if !ok { - return opentracing.ErrInvalidSpanContext - } - carrier, ok := opaqueCarrier.(io.Writer) - if !ok { - return opentracing.ErrInvalidCarrier - } - - state := wire.TracerState{} - state.TraceId = sc.TraceID - state.SpanId = sc.SpanID - state.Sampled = sc.Sampled - state.BaggageItems = sc.Baggage - - b, err := proto.Marshal(&state) - if err != nil { - return err - } - - // Write the length of the marshalled binary to the writer. - length := uint32(len(b)) - if err := binary.Write(carrier, binary.BigEndian, &length); err != nil { - return err - } - - _, err = carrier.Write(b) - return err -} - -func (p *binaryPropagator) Extract( - opaqueCarrier interface{}, -) (opentracing.SpanContext, error) { - carrier, ok := opaqueCarrier.(io.Reader) - if !ok { - return nil, opentracing.ErrInvalidCarrier - } - - // Read the length of marshalled binary. io.ReadAll isn't that performant - // since it keeps resizing the underlying buffer as it encounters more bytes - // to read. By reading the length, we can allocate a fixed sized buf and read - // the exact amount of bytes into it. - var length uint32 - if err := binary.Read(carrier, binary.BigEndian, &length); err != nil { - return nil, opentracing.ErrSpanContextCorrupted - } - buf := make([]byte, length) - if n, err := carrier.Read(buf); err != nil { - if n > 0 { - return nil, opentracing.ErrSpanContextCorrupted - } - return nil, opentracing.ErrSpanContextNotFound - } - - ctx := wire.TracerState{} - if err := proto.Unmarshal(buf, &ctx); err != nil { - return nil, opentracing.ErrSpanContextCorrupted - } - - return SpanContext{ - TraceID: ctx.TraceId, - SpanID: ctx.SpanId, - Sampled: ctx.Sampled, - Baggage: ctx.BaggageItems, - }, nil -} diff --git a/tracer/raw.go b/tracer/raw.go deleted file mode 100644 index 7594542..0000000 --- a/tracer/raw.go +++ /dev/null @@ -1,34 +0,0 @@ -package loggabletracer - -import ( - "time" - - opentracing "github.com/opentracing/opentracing-go" -) - -// RawSpan encapsulates all state associated with a (finished) Span. -type RawSpan struct { - // Those recording the RawSpan should also record the contents of its - // SpanContext. - Context SpanContext - - // The SpanID of this SpanContext's first intra-trace reference (i.e., - // "parent"), or 0 if there is no parent. - ParentSpanID uint64 - - // The name of the "operation" this span is an instance of. (Called a "span - // name" in some implementations) - Operation string - - // We store rather than so that only - // one of the timestamps has global clock uncertainty issues. - Start time.Time - Duration time.Duration - - // Essentially an extension mechanism. Can be used for many purposes, - // not to be enumerated here. - Tags opentracing.Tags - - // The span's "microlog". - Logs []opentracing.LogRecord -} diff --git a/tracer/recorder.go b/tracer/recorder.go deleted file mode 100644 index f298d63..0000000 --- a/tracer/recorder.go +++ /dev/null @@ -1,103 +0,0 @@ -package loggabletracer - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "os" - "time" - - writer "github.com/ipfs/go-log/writer" - opentrace "github.com/opentracing/opentracing-go" -) - -// A SpanRecorder handles all of the `RawSpan` data generated via an -// associated `Tracer` (see `NewStandardTracer`) instance. It also names -// the containing process and provides access to a straightforward tag map. -type SpanRecorder interface { - // Implementations must determine whether and where to store `span`. - RecordSpan(span RawSpan) -} - -type LoggableSpanRecorder struct{} - -// NewLoggableRecorder creates new LoggableSpanRecorder -func NewLoggableRecorder() *LoggableSpanRecorder { - return new(LoggableSpanRecorder) -} - -// Loggable Representation of a span, treated as an event log -type LoggableSpan struct { - TraceID uint64 `json:"TraceID"` - SpanID uint64 `json:"SpanID"` - ParentSpanID uint64 `json:"ParentSpanID"` - Operation string `json:"Operation"` - Start time.Time `json:"Start"` - Duration time.Duration `json:"Duration"` - Tags opentrace.Tags `json:"Tags"` - Logs []SpanLog `json:"Logs"` -} - -type SpanLog struct { - Timestamp time.Time `json:"Timestamp"` - Field []SpanField `json:"Fields"` -} - -type SpanField struct { - Key string `json:"Key"` - Value string `json:"Value"` -} - -// RecordSpan implements the respective method of SpanRecorder. -func (r *LoggableSpanRecorder) RecordSpan(span RawSpan) { - // short circuit if theres nothing to write to - if !writer.WriterGroup.Active() { - return - } - - sl := make([]SpanLog, len(span.Logs)) - for i := range span.Logs { - sl[i].Timestamp = span.Logs[i].Timestamp - sf := make([]SpanField, len(span.Logs[i].Fields)) - sl[i].Field = sf - for j := range span.Logs[i].Fields { - sf[j].Key = span.Logs[i].Fields[j].Key() - sf[j].Value = fmt.Sprint(span.Logs[i].Fields[j].Value()) - } - } - - tags := make(map[string]interface{}, len(span.Tags)) - for k, v := range span.Tags { - switch vt := v.(type) { - case bool, string, int, int8, int16, int32, int64, uint, uint8, uint16, uint64: - tags[k] = v - case []byte: - base64.StdEncoding.EncodeToString(vt) - default: - tags[k] = fmt.Sprint(v) - } - } - - spanlog := &LoggableSpan{ - TraceID: span.Context.TraceID, - SpanID: span.Context.SpanID, - ParentSpanID: span.ParentSpanID, - Operation: span.Operation, - Start: span.Start, - Duration: span.Duration, - Tags: tags, - Logs: sl, - } - - var buf bytes.Buffer - encoder := json.NewEncoder(&buf) - encoder.SetEscapeHTML(false) - err := encoder.Encode(spanlog) - if err != nil { - fmt.Fprintf(os.Stderr, "ERROR FORMATTING SPAN ENTRY: %s\n", err) - return - } - - writer.WriterGroup.Write(buf.Bytes()) -} diff --git a/tracer/recorder_test.go b/tracer/recorder_test.go deleted file mode 100644 index 94df8a3..0000000 --- a/tracer/recorder_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package loggabletracer - -import ( - "encoding/json" - "io" - "testing" - "time" - - writer "github.com/ipfs/go-log/writer" - opentrace "github.com/opentracing/opentracing-go" -) - -func assertEqual(t *testing.T, expected interface{}, actual interface{}) { - if expected != actual { - t.Fatalf("%s != %s", expected, actual) - } -} - -func TestSpanRecorder(t *testing.T) { - // Set up a writer to send spans to - pr, pw := io.Pipe() - writer.WriterGroup.AddWriter(pw) - - // create a span recorder - recorder := NewLoggableRecorder() - - // generate a span - var apiRecorder SpanRecorder = recorder - rt := opentrace.Tags{ - "key": "value", - } - rs := RawSpan{ - Context: SpanContext{}, - Operation: "test-span", - Start: time.Now(), - Duration: -1, - Tags: rt, - } - - // record the span - apiRecorder.RecordSpan(rs) - - // decode the LoggableSpan from - var ls LoggableSpan - evtDecoder := json.NewDecoder(pr) - evtDecoder.Decode(&ls) - - // validate - assertEqual(t, rs.Operation, ls.Operation) - assertEqual(t, rs.Duration, ls.Duration) - assertEqual(t, rs.Start.Nanosecond(), ls.Start.Nanosecond()) - assertEqual(t, rs.Tags["key"], ls.Tags["key"]) - -} diff --git a/tracer/span.go b/tracer/span.go deleted file mode 100644 index a23a57c..0000000 --- a/tracer/span.go +++ /dev/null @@ -1,274 +0,0 @@ -package loggabletracer - -import ( - "sync" - "time" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" -) - -// Span provides access to the essential details of the span, for use -// by loggabletracer consumers. These methods may only be called prior -// to (*opentracing.Span).Finish(). -type Span interface { - opentracing.Span - - // Operation names the work done by this span instance - Operation() string - - // Start indicates when the span began - Start() time.Time -} - -// Implements the `Span` interface. Created via LoggableTracer (see -// `loggabletracer.New()`). -type spanImpl struct { - tracer *LoggableTracer - event func(SpanEvent) - sync.Mutex // protects the fields below - raw RawSpan - // The number of logs dropped because of MaxLogsPerSpan. - numDroppedLogs int -} - -var spanPool = &sync.Pool{New: func() interface{} { - return &spanImpl{} -}} - -func (s *spanImpl) reset() { - s.tracer, s.event = nil, nil - // Note: Would like to do the following, but then the consumer of RawSpan - // (the recorder) needs to make sure that they're not holding on to the - // baggage or logs when they return (i.e. they need to copy if they care): - // - // logs, baggage := s.raw.Logs[:0], s.raw.Baggage - // for k := range baggage { - // delete(baggage, k) - // } - // s.raw.Logs, s.raw.Baggage = logs, baggage - // - // That's likely too much to ask for. But there is some magic we should - // be able to do with `runtime.SetFinalizer` to reclaim that memory into - // a buffer pool when GC considers them unreachable, which should ease - // some of the load. Hard to say how quickly that would be in practice - // though. - s.raw = RawSpan{ - Context: SpanContext{}, - } -} - -func (s *spanImpl) SetOperationName(operationName string) opentracing.Span { - s.Lock() - defer s.Unlock() - s.raw.Operation = operationName - return s -} - -func (s *spanImpl) trim() bool { - return !s.raw.Context.Sampled && s.tracer.options.TrimUnsampledSpans -} - -func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span { - defer s.onTag(key, value) - s.Lock() - defer s.Unlock() - if key == string(ext.SamplingPriority) { - if v, ok := value.(uint16); ok { - s.raw.Context.Sampled = v != 0 - return s - } - } - if s.trim() { - return s - } - - if s.raw.Tags == nil { - s.raw.Tags = opentracing.Tags{} - } - s.raw.Tags[key] = value - return s -} - -func (s *spanImpl) LogKV(keyValues ...interface{}) { - fields, err := log.InterleavedKVToFields(keyValues...) - if err != nil { - s.LogFields(log.Error(err), log.String("function", "LogKV")) - return - } - s.LogFields(fields...) -} - -func (s *spanImpl) appendLog(lr opentracing.LogRecord) { - maxLogs := s.tracer.options.MaxLogsPerSpan - if maxLogs == 0 || len(s.raw.Logs) < maxLogs { - s.raw.Logs = append(s.raw.Logs, lr) - return - } - - // We have too many logs. We don't touch the first numOld logs; we treat the - // rest as a circular buffer and overwrite the oldest log among those. - numOld := (maxLogs - 1) / 2 - numNew := maxLogs - numOld - s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr - s.numDroppedLogs++ -} - -func (s *spanImpl) LogFields(fields ...log.Field) { - lr := opentracing.LogRecord{ - Fields: fields, - } - defer s.onLogFields(lr) - s.Lock() - defer s.Unlock() - if s.trim() || s.tracer.options.DropAllLogs { - return - } - if lr.Timestamp.IsZero() { - lr.Timestamp = time.Now() - } - s.appendLog(lr) -} - -func (s *spanImpl) LogEvent(event string) { - s.Log(opentracing.LogData{ - Event: event, - }) -} - -func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) { - s.Log(opentracing.LogData{ - Event: event, - Payload: payload, - }) -} - -func (s *spanImpl) Log(ld opentracing.LogData) { - defer s.onLog(ld) - s.Lock() - defer s.Unlock() - if s.trim() || s.tracer.options.DropAllLogs { - return - } - - if ld.Timestamp.IsZero() { - ld.Timestamp = time.Now() - } - - s.appendLog(ld.ToLogRecord()) -} - -func (s *spanImpl) Finish() { - s.FinishWithOptions(opentracing.FinishOptions{}) -} - -// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at -// the end (i.e. pos circular left shifts). -func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { - // This algorithm is described in: - // http://www.cplusplus.com/reference/algorithm/rotate - for first, middle, next := 0, pos, pos; first != middle; { - buf[first], buf[next] = buf[next], buf[first] - first++ - next++ - if next == len(buf) { - next = middle - } else if first == middle { - middle = next - } - } -} - -func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) { - finishTime := opts.FinishTime - if finishTime.IsZero() { - finishTime = time.Now() - } - duration := finishTime.Sub(s.raw.Start) - - s.Lock() - defer s.Unlock() - - for _, lr := range opts.LogRecords { - s.appendLog(lr) - } - for _, ld := range opts.BulkLogData { - s.appendLog(ld.ToLogRecord()) - } - - if s.numDroppedLogs > 0 { - // We dropped some log events, which means that we used part of Logs as a - // circular buffer (see appendLog). De-circularize it. - numOld := (len(s.raw.Logs) - 1) / 2 - numNew := len(s.raw.Logs) - numOld - rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew) - - // Replace the log in the middle (the oldest "new" log) with information - // about the dropped logs. This means that we are effectively dropping one - // more "new" log. - numDropped := s.numDroppedLogs + 1 - s.raw.Logs[numOld] = opentracing.LogRecord{ - // Keep the timestamp of the last dropped event. - Timestamp: s.raw.Logs[numOld].Timestamp, - Fields: []log.Field{ - log.String("event", "dropped Span logs"), - log.Int("dropped_log_count", numDropped), - log.String("component", "loggabletracer"), - }, - } - } - - s.raw.Duration = duration - - s.onFinish(s.raw) - s.tracer.options.Recorder.RecordSpan(s.raw) - - // Last chance to get options before the span is possibly reset. - poolEnabled := s.tracer.options.EnableSpanPool - if s.tracer.options.DebugAssertUseAfterFinish { - // This makes it much more likely to catch a panic on any subsequent - // operation since s.tracer is accessed on every call to `Lock`. - // We don't call `reset()` here to preserve the logs in the Span - // which are printed when the assertion triggers. - s.tracer = nil - } - - if poolEnabled { - spanPool.Put(s) - } -} - -func (s *spanImpl) Tracer() opentracing.Tracer { - return s.tracer -} - -func (s *spanImpl) Context() opentracing.SpanContext { - return s.raw.Context -} - -func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span { - s.onBaggage(key, val) - if s.trim() { - return s - } - - s.Lock() - defer s.Unlock() - s.raw.Context = s.raw.Context.WithBaggageItem(key, val) - return s -} - -func (s *spanImpl) BaggageItem(key string) string { - s.Lock() - defer s.Unlock() - return s.raw.Context.Baggage[key] -} - -func (s *spanImpl) Operation() string { - return s.raw.Operation -} - -func (s *spanImpl) Start() time.Time { - return s.raw.Start -} diff --git a/tracer/tracer.go b/tracer/tracer.go deleted file mode 100644 index a6ea3a2..0000000 --- a/tracer/tracer.go +++ /dev/null @@ -1,280 +0,0 @@ -package loggabletracer - -import ( - "time" - - writer "github.com/ipfs/go-log/writer" - opentracing "github.com/opentracing/opentracing-go" -) - -// Tracer extends the opentracing.Tracer interface with methods to -// probe implementation state, for use by loggabletracer consumers. -type Tracer interface { - opentracing.Tracer - - // Options gets the Options used in New() or NewWithOptions(). - Options() Options -} - -// Options allows creating a customized Tracer via NewWithOptions. The object -// must not be updated when there is an active tracer using it. -type Options struct { - // ShouldSample is a function which is called when creating a new Span and - // determines whether that Span is sampled. The randomized TraceID is supplied - // to allow deterministic sampling decisions to be made across different nodes. - // For example, - // - // func(traceID uint64) { return traceID % 64 == 0 } - // - // samples every 64th trace on average. - ShouldSample func(traceID uint64) bool - // TrimUnsampledSpans turns potentially expensive operations on unsampled - // Spans into no-ops. More precisely, tags and log events are silently - // discarded. If NewSpanEventListener is set, the callbacks will still fire. - TrimUnsampledSpans bool - // Recorder receives Spans which have been finished. - Recorder SpanRecorder - // NewSpanEventListener can be used to enhance the tracer by effectively - // attaching external code to trace events. See NetTraceIntegrator for a - // practical example, and event.go for the list of possible events. - NewSpanEventListener func() func(SpanEvent) - // DropAllLogs turns log events on all Spans into no-ops. - // If NewSpanEventListener is set, the callbacks will still fire. - DropAllLogs bool - // MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero - // value). If a span has more logs than this value, logs are dropped as - // necessary (and replaced with a log describing how many were dropped). - // - // About half of the MaxLogPerSpan logs kept are the oldest logs, and about - // half are the newest logs. - // - // If NewSpanEventListener is set, the callbacks will still fire for all log - // events. This value is ignored if DropAllLogs is true. - MaxLogsPerSpan int - // DebugAssertSingleGoroutine internally records the ID of the goroutine - // creating each Span and verifies that no operation is carried out on - // it on a different goroutine. - // Provided strictly for development purposes. - // Passing Spans between goroutine without proper synchronization often - // results in use-after-Finish() errors. For a simple example, consider the - // following pseudocode: - // - // func (s *Server) Handle(req http.Request) error { - // sp := s.StartSpan("server") - // defer sp.Finish() - // wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req) - // select { - // case resp := <-wait: - // return resp.Error - // case <-time.After(10*time.Second): - // sp.LogEvent("timed out waiting for processing") - // return ErrTimedOut - // } - // } - // - // This looks reasonable at first, but a request which spends more than ten - // seconds in the queue is abandoned by the main goroutine and its trace - // finished, leading to use-after-finish when the request is finally - // processed. Note also that even joining on to a finished Span via - // StartSpanWithOptions constitutes an illegal operation. - // - // Code bases which do not require (or decide they do not want) Spans to - // be passed across goroutine boundaries can run with this flag enabled in - // tests to increase their chances of spotting wrong-doers. - DebugAssertSingleGoroutine bool - // DebugAssertUseAfterFinish is provided strictly for development purposes. - // When set, it attempts to exacerbate issues emanating from use of Spans - // after calling Finish by running additional assertions. - DebugAssertUseAfterFinish bool - // EnableSpanPool enables the use of a pool, so that the tracer reuses spans - // after Finish has been called on it. Adds a slight performance gain as it - // reduces allocations. However, if you have any use-after-finish race - // conditions the code may panic. - EnableSpanPool bool -} - -// DefaultOptions returns an Options object with a 1 in 64 sampling rate and -// all options disabled. A Recorder needs to be set manually before using the -// returned object with a Tracer. -func DefaultOptions() Options { - return Options{ - ShouldSample: func(traceID uint64) bool { return traceID%64 == 0 }, - MaxLogsPerSpan: 100, - } -} - -// NewWithOptions creates a customized Tracer. -func NewWithOptions(opts Options) opentracing.Tracer { - rval := &LoggableTracer{options: opts} - rval.accessorPropagator = &accessorPropagator{rval} - return rval -} - -// New creates and returns a standard Tracer which defers completed Spans to -// `recorder`. -// Spans created by this Tracer support the ext.SamplingPriority tag: Setting -// ext.SamplingPriority causes the Span to be Sampled from that point on. -func New(recorder SpanRecorder) opentracing.Tracer { - opts := DefaultOptions() - opts.Recorder = recorder - return NewWithOptions(opts) -} - -// Implements the `Tracer` interface. -type LoggableTracer struct { - options Options - textPropagator *textMapPropagator - binaryPropagator *binaryPropagator - accessorPropagator *accessorPropagator -} - -func (t *LoggableTracer) StartSpan( - operationName string, - opts ...opentracing.StartSpanOption, -) opentracing.Span { - - if !writer.WriterGroup.Active() { - return opentracing.NoopTracer.StartSpan(opentracing.NoopTracer{}, operationName) - } - - sso := opentracing.StartSpanOptions{} - for _, o := range opts { - o.Apply(&sso) - } - return t.StartSpanWithOptions(operationName, sso) -} - -func (t *LoggableTracer) getSpan() *spanImpl { - if t.options.EnableSpanPool { - sp := spanPool.Get().(*spanImpl) - sp.reset() - return sp - } - return &spanImpl{} -} - -func (t *LoggableTracer) StartSpanWithOptions( - operationName string, - opts opentracing.StartSpanOptions, -) opentracing.Span { - if !writer.WriterGroup.Active() { - return opentracing.NoopTracer.StartSpan(opentracing.NoopTracer{}, operationName) - } - // Start time. - startTime := opts.StartTime - if startTime.IsZero() { - startTime = time.Now() - } - - // Tags. - tags := opts.Tags - - // Build the new span. This is the only allocation: We'll return this as - // an opentracing.Span. - sp := t.getSpan() - // Look for a parent in the list of References. - // - // TODO: would be nice if loggabletracer did something with all - // References, not just the first one. -ReferencesLoop: - for _, ref := range opts.References { - switch ref.Type { - case opentracing.ChildOfRef, - opentracing.FollowsFromRef: - - refCtx, ok := ref.ReferencedContext.(SpanContext) - if !ok { - // Could be a noopSpanContext - // Ignore that parent. - continue - } - sp.raw.Context.TraceID = refCtx.TraceID - sp.raw.Context.SpanID = randomID() - sp.raw.Context.Sampled = refCtx.Sampled - sp.raw.ParentSpanID = refCtx.SpanID - - if l := len(refCtx.Baggage); l > 0 { - sp.raw.Context.Baggage = make(map[string]string, l) - for k, v := range refCtx.Baggage { - sp.raw.Context.Baggage[k] = v - } - } - break ReferencesLoop - } - } - if sp.raw.Context.TraceID == 0 { - // No parent Span found; allocate new trace and span ids and determine - // the Sampled status. - sp.raw.Context.TraceID, sp.raw.Context.SpanID = randomID2() - sp.raw.Context.Sampled = t.options.ShouldSample(sp.raw.Context.TraceID) - } - - return t.startSpanInternal( - sp, - operationName, - startTime, - tags, - ) -} - -func (t *LoggableTracer) startSpanInternal( - sp *spanImpl, - operationName string, - startTime time.Time, - tags opentracing.Tags, -) opentracing.Span { - sp.tracer = t - if t.options.NewSpanEventListener != nil { - sp.event = t.options.NewSpanEventListener() - } - sp.raw.Operation = operationName - sp.raw.Start = startTime - sp.raw.Duration = -1 - sp.raw.Tags = tags - if t.options.DebugAssertSingleGoroutine { - sp.SetTag(debugGoroutineIDTag, curGoroutineID()) - } - defer sp.onCreate(operationName) - return sp -} - -type delegatorType struct{} - -// Delegator is the format to use for DelegatingCarrier. -var Delegator delegatorType - -func (t *LoggableTracer) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error { - if !writer.WriterGroup.Active() { - return opentracing.NoopTracer.Inject(opentracing.NoopTracer{}, sc, format, carrier) - } - switch format { - case opentracing.TextMap, opentracing.HTTPHeaders: - return t.textPropagator.Inject(sc, carrier) - case opentracing.Binary: - return t.binaryPropagator.Inject(sc, carrier) - } - if _, ok := format.(delegatorType); ok { - return t.accessorPropagator.Inject(sc, carrier) - } - return opentracing.ErrUnsupportedFormat -} - -func (t *LoggableTracer) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { - if !writer.WriterGroup.Active() { - return opentracing.NoopTracer.Extract(opentracing.NoopTracer{}, format, carrier) - } - switch format { - case opentracing.TextMap, opentracing.HTTPHeaders: - return t.textPropagator.Extract(carrier) - case opentracing.Binary: - return t.binaryPropagator.Extract(carrier) - } - if _, ok := format.(delegatorType); ok { - return t.accessorPropagator.Extract(carrier) - } - return nil, opentracing.ErrUnsupportedFormat -} - -func (t *LoggableTracer) Options() Options { - return t.options -} diff --git a/tracer/util.go b/tracer/util.go deleted file mode 100644 index 279e2ac..0000000 --- a/tracer/util.go +++ /dev/null @@ -1,25 +0,0 @@ -package loggabletracer - -import ( - "math/rand" - "sync" - "time" -) - -var ( - seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) - // The golang rand generators are *not* intrinsically thread-safe. - seededIDLock sync.Mutex -) - -func randomID() uint64 { - seededIDLock.Lock() - defer seededIDLock.Unlock() - return uint64(seededIDGen.Int63()) -} - -func randomID2() (uint64, uint64) { - seededIDLock.Lock() - defer seededIDLock.Unlock() - return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) -} diff --git a/tracer/wire/Makefile b/tracer/wire/Makefile deleted file mode 100644 index 8677a37..0000000 --- a/tracer/wire/Makefile +++ /dev/null @@ -1,6 +0,0 @@ -pbgos := $(patsubst %.proto,%.pb.go,$(wildcard *.proto)) - -all: $(pbgos) - -%.pb.go: %.proto - protoc --gogofaster_out=. --proto_path=$(GOPATH)/src:. $< diff --git a/tracer/wire/carrier.go b/tracer/wire/carrier.go deleted file mode 100644 index 12ec98e..0000000 --- a/tracer/wire/carrier.go +++ /dev/null @@ -1,40 +0,0 @@ -package wire - -// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the -// the underlying datastructure. The reason for implementing DelagatingCarrier -// is to allow for end users to serialize the underlying protocol buffers using -// jsonpb or any other serialization forms they want. -type ProtobufCarrier TracerState - -// SetState set's the tracer state. -func (p *ProtobufCarrier) SetState(traceID, spanID uint64, sampled bool) { - p.TraceId = traceID - p.SpanId = spanID - p.Sampled = sampled -} - -// State returns the tracer state. -func (p *ProtobufCarrier) State() (traceID, spanID uint64, sampled bool) { - traceID = p.TraceId - spanID = p.SpanId - sampled = p.Sampled - return traceID, spanID, sampled -} - -// SetBaggageItem sets a baggage item. -func (p *ProtobufCarrier) SetBaggageItem(key, value string) { - if p.BaggageItems == nil { - p.BaggageItems = map[string]string{key: value} - return - } - - p.BaggageItems[key] = value -} - -// GetBaggage iterates over each baggage item and executes the callback with -// the key:value pair. -func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) { - for k, v := range p.BaggageItems { - f(k, v) - } -} diff --git a/tracer/wire/carrier_test.go b/tracer/wire/carrier_test.go deleted file mode 100644 index a29a821..0000000 --- a/tracer/wire/carrier_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package wire_test - -import ( - "testing" - - tracer "github.com/ipfs/go-log/tracer" - "github.com/ipfs/go-log/tracer/wire" -) - -func TestProtobufCarrier(t *testing.T) { - var carrier tracer.DelegatingCarrier = &wire.ProtobufCarrier{} - - var traceID, spanID uint64 = 1, 2 - sampled := true - baggageKey, expVal := "key1", "val1" - - carrier.SetState(traceID, spanID, sampled) - carrier.SetBaggageItem(baggageKey, expVal) - gotTraceID, gotSpanID, gotSampled := carrier.State() - if traceID != gotTraceID || spanID != gotSpanID || sampled != gotSampled { - t.Errorf("Wanted state %d %d %t, got %d %d %t", spanID, traceID, sampled, - gotTraceID, gotSpanID, gotSampled) - } - - gotBaggage := map[string]string{} - f := func(k, v string) { - gotBaggage[k] = v - } - - carrier.GetBaggage(f) - value, ok := gotBaggage[baggageKey] - if !ok { - t.Errorf("Expected baggage item %s to exist", baggageKey) - } - if value != expVal { - t.Errorf("Expected key %s to be %s, got %s", baggageKey, expVal, value) - } -} diff --git a/tracer/wire/gen.go b/tracer/wire/gen.go deleted file mode 100644 index 7d951fa..0000000 --- a/tracer/wire/gen.go +++ /dev/null @@ -1,6 +0,0 @@ -package wire - -//go:generate protoc --gogofaster_out=$GOPATH/src/github.com/ipfs/go-log/tracer/wire wire.proto - -// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the -// gogofaster generator binary. diff --git a/tracer/wire/wire.pb.go b/tracer/wire/wire.pb.go deleted file mode 100644 index 2399892..0000000 --- a/tracer/wire/wire.pb.go +++ /dev/null @@ -1,554 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: wire.proto - -package wire - -import ( - encoding_binary "encoding/binary" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - io "io" - math "math" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type TracerState struct { - TraceId uint64 `protobuf:"fixed64,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` - SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` - Sampled bool `protobuf:"varint,3,opt,name=sampled,proto3" json:"sampled,omitempty"` - BaggageItems map[string]string `protobuf:"bytes,4,rep,name=baggage_items,json=baggageItems,proto3" json:"baggage_items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` -} - -func (m *TracerState) Reset() { *m = TracerState{} } -func (m *TracerState) String() string { return proto.CompactTextString(m) } -func (*TracerState) ProtoMessage() {} -func (*TracerState) Descriptor() ([]byte, []int) { - return fileDescriptor_f2dcdddcdf68d8e0, []int{0} -} -func (m *TracerState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TracerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_TracerState.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *TracerState) XXX_Merge(src proto.Message) { - xxx_messageInfo_TracerState.Merge(m, src) -} -func (m *TracerState) XXX_Size() int { - return m.Size() -} -func (m *TracerState) XXX_DiscardUnknown() { - xxx_messageInfo_TracerState.DiscardUnknown(m) -} - -var xxx_messageInfo_TracerState proto.InternalMessageInfo - -func (m *TracerState) GetTraceId() uint64 { - if m != nil { - return m.TraceId - } - return 0 -} - -func (m *TracerState) GetSpanId() uint64 { - if m != nil { - return m.SpanId - } - return 0 -} - -func (m *TracerState) GetSampled() bool { - if m != nil { - return m.Sampled - } - return false -} - -func (m *TracerState) GetBaggageItems() map[string]string { - if m != nil { - return m.BaggageItems - } - return nil -} - -func init() { - proto.RegisterType((*TracerState)(nil), "loggabletracer.wire.TracerState") - proto.RegisterMapType((map[string]string)(nil), "loggabletracer.wire.TracerState.BaggageItemsEntry") -} - -func init() { proto.RegisterFile("wire.proto", fileDescriptor_f2dcdddcdf68d8e0) } - -var fileDescriptor_f2dcdddcdf68d8e0 = []byte{ - // 250 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, - 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xce, 0xc9, 0x4f, 0x4f, 0x4f, 0x4c, 0xca, 0x49, - 0x2d, 0x29, 0x4a, 0x4c, 0x4e, 0x2d, 0xd2, 0x03, 0x49, 0x29, 0x7d, 0x65, 0xe4, 0xe2, 0x0e, 0x01, - 0xf3, 0x83, 0x4b, 0x12, 0x4b, 0x52, 0x85, 0x24, 0xb9, 0x38, 0xc0, 0xd2, 0xf1, 0x99, 0x29, 0x12, - 0x8c, 0x0a, 0x8c, 0x1a, 0x6c, 0x41, 0xec, 0x60, 0xbe, 0x67, 0x8a, 0x90, 0x38, 0x17, 0x7b, 0x71, - 0x41, 0x62, 0x1e, 0x48, 0x86, 0x09, 0x2c, 0xc3, 0x06, 0xe2, 0x7a, 0xa6, 0x08, 0x49, 0x70, 0xb1, - 0x17, 0x27, 0xe6, 0x16, 0xe4, 0xa4, 0xa6, 0x48, 0x30, 0x2b, 0x30, 0x6a, 0x70, 0x04, 0xc1, 0xb8, - 0x42, 0xe1, 0x5c, 0xbc, 0x49, 0x89, 0xe9, 0xe9, 0x89, 0xe9, 0xa9, 0xf1, 0x99, 0x25, 0xa9, 0xb9, - 0xc5, 0x12, 0x2c, 0x0a, 0xcc, 0x1a, 0xdc, 0x46, 0x46, 0x7a, 0x58, 0x9c, 0xa2, 0x87, 0xe4, 0x0c, - 0x3d, 0x27, 0x88, 0x2e, 0x4f, 0x90, 0x26, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x9e, 0x24, 0x24, - 0x21, 0x29, 0x7b, 0x2e, 0x41, 0x0c, 0x25, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x60, 0x67, - 0x73, 0x06, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x60, 0x07, 0x73, - 0x06, 0x41, 0x38, 0x56, 0x4c, 0x16, 0x8c, 0x4e, 0x72, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, - 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, - 0x2c, 0xc7, 0x10, 0xc5, 0x02, 0x72, 0x4c, 0x12, 0x1b, 0x38, 0xcc, 0x8c, 0x01, 0x01, 0x00, 0x00, - 0xff, 0xff, 0xe4, 0x48, 0xf4, 0xf8, 0x41, 0x01, 0x00, 0x00, -} - -func (m *TracerState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TracerState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TraceId != 0 { - dAtA[i] = 0x9 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.TraceId)) - i += 8 - } - if m.SpanId != 0 { - dAtA[i] = 0x11 - i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(m.SpanId)) - i += 8 - } - if m.Sampled { - dAtA[i] = 0x18 - i++ - if m.Sampled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.BaggageItems) > 0 { - for k, _ := range m.BaggageItems { - dAtA[i] = 0x22 - i++ - v := m.BaggageItems[k] - mapSize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) - i = encodeVarintWire(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintWire(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintWire(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func encodeVarintWire(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *TracerState) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.TraceId != 0 { - n += 9 - } - if m.SpanId != 0 { - n += 9 - } - if m.Sampled { - n += 2 - } - if len(m.BaggageItems) > 0 { - for k, v := range m.BaggageItems { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) - n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize)) - } - } - return n -} - -func sovWire(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozWire(x uint64) (n int) { - return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *TracerState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TracerState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TracerState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) - } - m.TraceId = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.TraceId = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 2: - if wireType != 1 { - return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) - } - m.SpanId = 0 - if (iNdEx + 8) > l { - return io.ErrUnexpectedEOF - } - m.SpanId = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) - iNdEx += 8 - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sampled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Sampled = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BaggageItems", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthWire - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthWire - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BaggageItems == nil { - m.BaggageItems = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthWire - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthWire - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowWire - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthWire - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthWire - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipWire(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.BaggageItems[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipWire(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthWire - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipWire(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthWire - } - iNdEx += length - if iNdEx < 0 { - return 0, ErrInvalidLengthWire - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowWire - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipWire(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - if iNdEx < 0 { - return 0, ErrInvalidLengthWire - } - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") -) diff --git a/tracer/wire/wire.proto b/tracer/wire/wire.proto deleted file mode 100644 index 496fa19..0000000 --- a/tracer/wire/wire.proto +++ /dev/null @@ -1,10 +0,0 @@ -syntax = "proto3"; -package loggabletracer.wire; -option go_package = "wire"; - -message TracerState { - fixed64 trace_id = 1; - fixed64 span_id = 2; - bool sampled = 3; - map baggage_items = 4; -} diff --git a/writer/option.go b/writer/option.go deleted file mode 100644 index b65d3a0..0000000 --- a/writer/option.go +++ /dev/null @@ -1,4 +0,0 @@ -package log - -// WriterGroup is the global writer group for logs to output to -var WriterGroup = NewMirrorWriter() diff --git a/writer/writer.go b/writer/writer.go deleted file mode 100644 index ec5d729..0000000 --- a/writer/writer.go +++ /dev/null @@ -1,258 +0,0 @@ -package log - -import ( - "fmt" - "io" - "sync" - "sync/atomic" -) - -// MaxWriterBuffer specifies how big the writer buffer can get before -// killing the writer. -var MaxWriterBuffer = 512 * 1024 - -// MirrorWriter implements a WriteCloser which syncs incoming bytes to multiple -// [buffered] WriteClosers. They can be added with AddWriter(). -type MirrorWriter struct { - active uint32 - - // channel for incoming writers - writerAdd chan *writerAdd - - // slices of writer/sync-channel pairs - writers []*bufWriter - - // synchronization channel for incoming writes - msgSync chan []byte -} - -type writerSync struct { - w io.WriteCloser - br chan []byte -} - -// NewMirrorWriter initializes and returns a MirrorWriter. -func NewMirrorWriter() *MirrorWriter { - mw := &MirrorWriter{ - msgSync: make(chan []byte, 64), // sufficiently large buffer to avoid callers waiting - writerAdd: make(chan *writerAdd), - } - - go mw.logRoutine() - - return mw -} - -// Write broadcasts the written bytes to all Writers. -func (mw *MirrorWriter) Write(b []byte) (int, error) { - mycopy := make([]byte, len(b)) - copy(mycopy, b) - mw.msgSync <- mycopy - return len(b), nil -} - -// Close closes the MirrorWriter -func (mw *MirrorWriter) Close() error { - // it is up to the caller to ensure that write is not called during or - // after close is called. - close(mw.msgSync) - return nil -} - -func (mw *MirrorWriter) doClose() { - for _, w := range mw.writers { - w.writer.Close() - } -} - -func (mw *MirrorWriter) logRoutine() { - // rebind to avoid races on nilling out struct fields - msgSync := mw.msgSync - writerAdd := mw.writerAdd - - defer mw.doClose() - - for { - select { - case b, ok := <-msgSync: - if !ok { - return - } - - // write to all writers - dropped := mw.broadcastMessage(b) - - // consolidate the slice - if dropped { - mw.clearDeadWriters() - } - case wa := <-writerAdd: - mw.writers = append(mw.writers, newBufWriter(wa.w)) - - atomic.StoreUint32(&mw.active, 1) - close(wa.done) - } - } -} - -// broadcastMessage sends the given message to every writer -// if any writer is killed during the send, 'true' is returned -func (mw *MirrorWriter) broadcastMessage(b []byte) bool { - var dropped bool - for i, w := range mw.writers { - _, err := w.Write(b) - if err != nil { - mw.writers[i] = nil - dropped = true - } - } - return dropped -} - -func (mw *MirrorWriter) clearDeadWriters() { - writers := mw.writers - mw.writers = nil - for _, w := range writers { - if w != nil { - mw.writers = append(mw.writers, w) - } - } - if len(mw.writers) == 0 { - atomic.StoreUint32(&mw.active, 0) - } -} - -type writerAdd struct { - w io.WriteCloser - done chan struct{} -} - -// AddWriter attaches a new WriteCloser to this MirrorWriter. -// The new writer will start getting any bytes written to the mirror. -func (mw *MirrorWriter) AddWriter(w io.WriteCloser) { - wa := &writerAdd{ - w: w, - done: make(chan struct{}), - } - mw.writerAdd <- wa - <-wa.done -} - -// Active returns if there is at least one Writer -// attached to this MirrorWriter -func (mw *MirrorWriter) Active() (active bool) { - return atomic.LoadUint32(&mw.active) == 1 -} - -func newBufWriter(w io.WriteCloser) *bufWriter { - bw := &bufWriter{ - writer: w, - incoming: make(chan []byte, 1), - } - - go bw.loop() - return bw -} - -// writes incoming messages to a buffer and when it fills -// up, writes them to the writer -type bufWriter struct { - writer io.WriteCloser - - incoming chan []byte - - deathLock sync.Mutex - dead bool -} - -var errDeadWriter = fmt.Errorf("writer is dead") - -func (bw *bufWriter) Write(b []byte) (int, error) { - bw.deathLock.Lock() - dead := bw.dead - bw.deathLock.Unlock() - if dead { - if bw.incoming != nil { - close(bw.incoming) - bw.incoming = nil - } - return 0, errDeadWriter - } - - bw.incoming <- b - return len(b), nil -} - -func (bw *bufWriter) die() { - bw.deathLock.Lock() - bw.dead = true - bw.writer.Close() - bw.deathLock.Unlock() -} - -func (bw *bufWriter) loop() { - bufsize := 0 - bufBase := make([][]byte, 0, 16) // some initial memory - buffered := bufBase - nextCh := make(chan []byte) - - var nextMsg []byte - - go func() { - for b := range nextCh { - _, err := bw.writer.Write(b) - if err != nil { - // TODO: need a way to notify there was an error here - // wouldn't want to log here as it could casue an infinite loop - bw.die() - return - } - } - }() - - // collect and buffer messages - incoming := bw.incoming - for { - if nextMsg == nil || nextCh == nil { - // nextCh == nil implies we are 'dead' and draining the incoming channel - // until the caller notices and closes it for us - select { - case b, ok := <-incoming: - if !ok { - return - } - nextMsg = b - } - } - - select { - case b, ok := <-incoming: - if !ok { - return - } - bufsize += len(b) - buffered = append(buffered, b) - if bufsize > MaxWriterBuffer { - // if we have too many messages buffered, kill the writer - bw.die() - if nextCh != nil { - close(nextCh) - } - nextCh = nil - // explicity keep going here to drain incoming - } - case nextCh <- nextMsg: - nextMsg = nil - if len(buffered) > 0 { - nextMsg = buffered[0] - buffered = buffered[1:] - bufsize -= len(nextMsg) - } - - if len(buffered) == 0 { - // reset slice position - buffered = bufBase[:0] - } - } - } -} diff --git a/writer/writer_test.go b/writer/writer_test.go deleted file mode 100644 index 4bd0b52..0000000 --- a/writer/writer_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package log - -import ( - "fmt" - "hash/fnv" - "io" - "math/rand" - "sync" - "testing" - "time" -) - -type hangwriter struct { - c chan struct{} - closer *sync.Once -} - -func newHangWriter() *hangwriter { - return &hangwriter{ - c: make(chan struct{}), - closer: new(sync.Once), - } -} - -func (hw *hangwriter) Write([]byte) (int, error) { - <-hw.c - return 0, fmt.Errorf("write on closed writer") -} - -func (hw *hangwriter) Close() error { - hw.closer.Do(func() { - close(hw.c) - }) - - return nil -} - -func TestMirrorWriterHang(t *testing.T) { - mw := NewMirrorWriter() - - hw := newHangWriter() - pr, pw := io.Pipe() - - mw.AddWriter(hw) - mw.AddWriter(pw) - - msg := "Hello!" - mw.Write([]byte(msg)) - - // make sure writes through can happen even with one writer hanging - done := make(chan struct{}) - go func() { - buf := make([]byte, 10) - n, err := pr.Read(buf) - if err != nil { - t.Fatal(err) - } - - if n != len(msg) { - t.Fatal("read wrong amount") - } - - if string(buf[:n]) != msg { - t.Fatal("didnt read right content") - } - - done <- struct{}{} - }() - - select { - case <-time.After(time.Second * 5): - t.Fatal("write to mirrorwriter hung") - case <-done: - } - - if !mw.Active() { - t.Fatal("writer should still be active") - } - - pw.Close() - - if !mw.Active() { - t.Fatal("writer should still be active") - } - - // now we just have the hangwriter - - // write a bunch to it - buf := make([]byte, 8192) - for i := 0; i < 128; i++ { - mw.Write(buf) - } - - // wait for goroutines to sync up - time.Sleep(time.Millisecond * 500) - - // the hangwriter should have been killed, causing the mirrorwriter to be inactive now - if mw.Active() { - t.Fatal("should be inactive now") - } -} - -func TestStress(t *testing.T) { - mw := NewMirrorWriter() - - nreaders := 20 - - var readers []io.Reader - for i := 0; i < nreaders; i++ { - pr, pw := io.Pipe() - mw.AddWriter(pw) - readers = append(readers, pr) - } - - hashout := make(chan []byte) - - numwriters := 20 - writesize := 1024 - writecount := 300 - - f := func(r io.Reader) { - h := fnv.New64a() - sum, err := io.Copy(h, r) - if err != nil { - t.Fatal(err) - } - - if sum != int64(numwriters*writesize*writecount) { - t.Fatal("read wrong number of bytes") - } - - hashout <- h.Sum(nil) - } - - for _, r := range readers { - go f(r) - } - - work := sync.WaitGroup{} - for i := 0; i < numwriters; i++ { - work.Add(1) - go func() { - defer work.Done() - r := rand.New(rand.NewSource(time.Now().UnixNano())) - buf := make([]byte, writesize) - for j := 0; j < writecount; j++ { - r.Read(buf) - mw.Write(buf) - time.Sleep(time.Millisecond * 5) - } - }() - } - - work.Wait() - mw.Close() - - check := make(map[string]bool) - for i := 0; i < nreaders; i++ { - h := <-hashout - check[string(h)] = true - } - - if len(check) > 1 { - t.Fatal("writers received different data!") - } -}