From ff2fc4fd1889d155e205a03b700f26021519856c Mon Sep 17 00:00:00 2001 From: Jonathan Hall Date: Tue, 20 Dec 2016 19:26:58 +0100 Subject: [PATCH 1/2] Switch from `select` to `reflect.Select` This in preparation for running in a loop to handle retries. --- sentry.go | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/sentry.go b/sentry.go index b043466..8803712 100644 --- a/sentry.go +++ b/sentry.go @@ -3,6 +3,7 @@ package logrus_sentry import ( "encoding/json" "fmt" + "reflect" "runtime" "sync" "time" @@ -35,6 +36,9 @@ type SentryHook struct { // consider the message correctly sent Timeout time.Duration StacktraceConfiguration StackTraceConfiguration + // If Retries is non-zero, packets will be resent in case of a 429 error, + // up to this many times, or until the timeout is reached. + Retries uint8 client *raven.Client levels []logrus.Level @@ -244,15 +248,26 @@ func (hook *SentryHook) Flush() { func (hook *SentryHook) sendPacket(packet *raven.Packet) error { _, errCh := hook.client.Capture(packet, nil) + cases := []reflect.SelectCase{ + reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(errCh), + }, + } timeout := hook.Timeout - if timeout != 0 { - timeoutCh := time.After(timeout) - select { - case err := <-errCh: - return err - case <-timeoutCh: - return fmt.Errorf("no response from sentry server in %s", timeout) - } + if timeout > 0 { + cases = append(cases, reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(time.After(timeout)), + }) + } + chosen, recv, _ := reflect.Select(cases) + switch chosen { + case 0: + err, _ := recv.Interface().(error) + return err + case 1: + return fmt.Errorf("no response from sentry server in %s", timeout) } return nil } From b37bcddfca32bb45e88c0e7af8c59ce8d04ced27 Mon Sep 17 00:00:00 2001 From: Jonathan Hall Date: Tue, 20 Dec 2016 19:56:08 +0100 Subject: [PATCH 2/2] Support retries --- sentry.go | 47 ++++++++++++++++++++++++++++++----------------- sentry_test.go | 45 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 17 deletions(-) diff --git a/sentry.go b/sentry.go index 8803712..60e6c46 100644 --- a/sentry.go +++ b/sentry.go @@ -36,8 +36,9 @@ type SentryHook struct { // consider the message correctly sent Timeout time.Duration StacktraceConfiguration StackTraceConfiguration - // If Retries is non-zero, packets will be resent in case of a 429 error, - // up to this many times, or until the timeout is reached. + // If Retries is non-zero, packets will be resent in case of a 429 error + // (too many requests) up to this many times, or until the timeout is + // reached. Retries uint8 client *raven.Client @@ -247,13 +248,7 @@ func (hook *SentryHook) Flush() { } func (hook *SentryHook) sendPacket(packet *raven.Packet) error { - _, errCh := hook.client.Capture(packet, nil) - cases := []reflect.SelectCase{ - reflect.SelectCase{ - Dir: reflect.SelectRecv, - Chan: reflect.ValueOf(errCh), - }, - } + cases := make([]reflect.SelectCase, 1, 2) timeout := hook.Timeout if timeout > 0 { cases = append(cases, reflect.SelectCase{ @@ -261,15 +256,33 @@ func (hook *SentryHook) sendPacket(packet *raven.Packet) error { Chan: reflect.ValueOf(time.After(timeout)), }) } - chosen, recv, _ := reflect.Select(cases) - switch chosen { - case 0: - err, _ := recv.Interface().(error) - return err - case 1: - return fmt.Errorf("no response from sentry server in %s", timeout) + var err error + for i := 0; i < int(hook.Retries)+1; i++ { + _, errCh := hook.client.Capture(packet, nil) + cases[0] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(errCh), + } + + chosen, recv, _ := reflect.Select(cases) + switch chosen { + case 0: + var ok bool + err, ok = recv.Interface().(error) + if !ok { + // Success! + return nil + } + if err.Error() == "raven: got http status 429" { // Too many requests + continue + } + return err + case 1: + return fmt.Errorf("no response from sentry server in %s", timeout) + } } - return nil + // Retries count exceeded, return the error + return err } func (hook *SentryHook) findStacktraceAndCause(err error) (*raven.Stacktrace, error) { diff --git a/sentry_test.go b/sentry_test.go index 2ad9864..b910d92 100644 --- a/sentry_test.go +++ b/sentry_test.go @@ -450,3 +450,48 @@ func (myStacktracerError) GetStacktrace() *raven.Stacktrace { }, } } + +func TestRetries(t *testing.T) { + failures := 8 + s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + // This HTTP handler will respond with a 429 3 times, then success, + // ignoring the request otherwise. + defer req.Body.Close() + if failures > 0 { + rw.WriteHeader(429) // Too many requests + failures-- + return + } + rw.WriteHeader(http.StatusOK) + })) + defer s.Close() + fragments := strings.SplitN(s.URL, "://", 2) + dsn := fmt.Sprintf( + "%s://public:secret@%s/sentry/project-id", + fragments[0], + fragments[1], + ) + logger := getTestLogger() + + hook, err := NewSentryHook(dsn, []logrus.Level{ + logrus.ErrorLevel, + }) + + if err != nil { + t.Fatal(err.Error()) + } + logger.Hooks.Add(hook) + hook.Retries = 5 + if err := hook.Fire(&logrus.Entry{}); err == nil { + t.Errorf("Expected failure") + } + if failures != 2 { // Ensure the failure counter was properly decremented + t.Errorf("Expected failure counter to be 3, got %d", failures) + } + if err := hook.Fire(&logrus.Entry{}); err != nil { + t.Errorf("Expected success, got: %s", err) + } + if failures != 0 { // Ensure the failure counter was properly decremented + t.Errorf("Expected failure counter to be 3, got %d", failures) + } +}