Skip to content
This repository has been archived by the owner on Aug 30, 2019. It is now read-only.

Commit

Permalink
Merge pull request #354 from DataDog/benjamin/nitpicks
Browse files Browse the repository at this point in the history
Reduce debug log verbosity and reduce max sender memory usage
  • Loading branch information
LotharSee authored Jan 31, 2018
2 parents 76f654b + df07249 commit 1071b69
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 14 deletions.
12 changes: 6 additions & 6 deletions agent/trace-agent.ini
Original file line number Diff line number Diff line change
Expand Up @@ -85,8 +85,8 @@ max_spans_per_payload=1000
flush_period_seconds=5
# Maximum amount of trace data we can have queued up in case we are unable to send them to DD servers.
# A value <= 0 disables this limit
# Default: 256MB
queue_max_bytes=268435456
# Default: 64MB
queue_max_bytes=67108864

###################################################
# Service writer - extracts service info from
Expand All @@ -101,8 +101,8 @@ flush_period_seconds=5
queue_max_payloads=-1
# Maximum amount of service data we can have queued up in case we are unable to send it to DD servers.
# A value <= 0 disables this limit
# Default: 256MB
queue_max_bytes=268435456
# Default: 64MB
queue_max_bytes=67108864
# Maximum amount of time for which we keep service data enqueued in case we are unable to send it to DD servers.
# A value <= 0 disables this limit
# Default: -1
Expand All @@ -115,5 +115,5 @@ queue_max_age_seconds=-1
[trace.writer.stats]
# Maximum amount of stats data we can have queued up in case we are unable to send them to DD servers.
# A value <= 0 disables this limit
# Default: 256MB
queue_max_bytes=268435456
# Default: 64MB
queue_max_bytes=67108864
2 changes: 1 addition & 1 deletion model/normalizer.go
Original file line number Diff line number Diff line change
Expand Up @@ -76,7 +76,7 @@ func (s *Span) Normalize() error {
// root span's ``trace id = span id`` has been removed
if s.ParentID == s.TraceID && s.ParentID == s.SpanID {
s.ParentID = 0
log.Debugf("span.normalize: `ParentID`, `TraceID` and `SpanID` are the same; `ParentID` set to 0: %s", s.TraceID)
log.Debugf("span.normalize: `ParentID`, `TraceID` and `SpanID` are the same; `ParentID` set to 0: %d", s.TraceID)
}

// Start & Duration as nanoseconds timestamps
Expand Down
4 changes: 2 additions & 2 deletions writer/config/payload.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@ type QueuablePayloadSenderConf struct {
func DefaultQueuablePayloadSenderConf() QueuablePayloadSenderConf {
return QueuablePayloadSenderConf{
MaxAge: 20 * time.Minute,
MaxQueuedBytes: 256 * 1024 * 1024, // 256 MB
MaxQueuedPayloads: -1, // Unlimited
MaxQueuedBytes: 64 * 1024 * 1024, // 64 MB
MaxQueuedPayloads: -1, // Unlimited
ExponentialBackoff: backoff.DefaultExponentialConfig(),
}
}
6 changes: 3 additions & 3 deletions writer/payload.go
Original file line number Diff line number Diff line change
Expand Up @@ -199,7 +199,7 @@ func (s *QueuablePayloadSender) Run() {
select {
case payload := <-s.in:
if stats, err := s.sendOrQueue(payload); err != nil {
log.Errorf("Error while sending or queueing payload. err=%v", err)
log.Debugf("Error while sending or queueing payload. err=%v", err)
s.notifyError(payload, err, stats)
}
case <-s.backoffTimer.ReceiveTick():
Expand Down Expand Up @@ -304,7 +304,7 @@ func (s *QueuablePayloadSender) flushQueue() error {
if _, ok := err.(*RetriableError); ok {
// If send failed due to a retriable error, retry flush later
retryNum, delay := s.backoffTimer.ScheduleRetry(err)
log.Errorf("Got retriable error. Retrying flush later: retry=%d, delay=%s, err=%v",
log.Debugf("Got retriable error. Retrying flush later: retry=%d, delay=%s, err=%v",
retryNum, delay, err)
s.discardOldPayloads()
s.notifyRetry(payload, err, delay, retryNum)
Expand All @@ -313,7 +313,7 @@ func (s *QueuablePayloadSender) flushQueue() error {
}

// If send failed due to non-retriable error, notify error and drop it
log.Errorf("Dropping payload due to non-retriable error: err=%v, payload=%v", err, payload)
log.Debugf("Dropping payload due to non-retriable error: err=%v, payload=%v", err, payload)
s.notifyError(payload, err, stats)
next = s.removeQueuedPayload(e)
// Try sending next ones
Expand Down
3 changes: 1 addition & 2 deletions writer/trace_writer.go
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,6 @@ func (w *TraceWriter) Run() {
log.Debug("Flushing current traces")
w.flush()
case <-updateInfoTicker.C:
log.Debug("Updating info")
go w.updateInfo()
case <-w.exit:
log.Info("exiting trace writer, flushing all remaining traces")
Expand Down Expand Up @@ -170,7 +169,7 @@ func (w *TraceWriter) handleTrace(trace *model.Trace) {

w.traces = append(w.traces, trace.APITrace())
w.spansInBuffer += len(*trace)
log.Debugf("Added new trace to buffer. spansInBuffer=%d, len(w.traces)=%d", w.spansInBuffer, len(w.traces))
log.Tracef("Added new trace to buffer. spansInBuffer=%d, len(w.traces)=%d", w.spansInBuffer, len(w.traces))

if w.spansInBuffer == w.conf.MaxSpansPerPayload {
log.Debugf("Flushing because we reached max per payload")
Expand Down

0 comments on commit 1071b69

Please sign in to comment.