From e38241f267c6f28c15c5632d94be4e2ee08265a5 Mon Sep 17 00:00:00 2001 From: "peter.csala" Date: Wed, 20 Sep 2023 13:42:21 +0200 Subject: [PATCH 1/4] Add 2 anti-patterns to retry --- docs/strategies/retry.md | 75 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 75 insertions(+) diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index 65f29ffddff..489b3efe2ee 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -105,3 +105,78 @@ new ResiliencePipelineBuilder().AddRetry(new RetryStrategyOptions | `UseJitter` | False | Allows adding jitter to retry delays. | | `DelayGenerator` | `null` | Used for generating custom delays for retries. | | `OnRetry` | `null` | Action executed when retry occurs. | + +## Patterns and Anti-patterns +Throughout the years many people have used Polly in so many different ways. Some reoccuring patterns are suboptimal. So, this section shows the donts and dos. + +### 1 - Overusing builder methods + +❌ DON'T +Use more than one `Handle/HandleResult` +```cs +var retryPolicy = new ResiliencePipelineBuilder() + .AddRetry(new() { + ShouldHandle = new PredicateBuilder() + .Handle() + .Handle() + .Handle() + .Handle() + .Handle(), + MaxRetryAttempts = ..., + }) + .Build(); +``` +**Reasoning**: +- Even though this builder method signature is quite concise you repeat the same thing over and over (_please trigger retry if the to-be-decorated code throws XYZ exception_). +- A better approach would be to tell _please trigger retry if the to-be-decorated code throws one of the retriable exceptions_. + +✅ DO +Use collections and predicates +```cs +ImmutableArray networkExceptions = new[] { + typeof(SocketException), + typeof(HttpRequestException), +}.ToImmutableArray(); + +ImmutableArray policyExceptions = new[] { + typeof(TimeoutRejectedException), + typeof(BrokenCircuitException), + typeof(RateLimitRejectedException), +}.ToImmutableArray(); + +var retryPolicy = new ResiliencePipelineBuilder() + .AddRetry(new() { + ShouldHandle = ex => new ValueTask( + networkExceptions.Union(policyExceptions).Contains(ex.GetType())), + MaxRetryAttempts = ..., + }) + .Build(); +``` +**Reasoning**: +- This approach embraces re-usability. + - For instance the `networkExceptions` can be reused across many the strategies (retry, circuit breaker, etc..). + +### 2 - Using retry as a periodical executor + +The retry strategy can be defined in a way to run forever in a given frequency. + +❌ DON'T +```cs +var retryForeverDaily = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = _ => ValueTask.FromResult(true), + Delay = TimeSpan.FromHours(24), + }) + .Build(); +``` +**Reasoning**: +- The sleep period can be blocking or non-blocking depending on how you define your strategy/pipeline. +- Even if it is used in a non-blocking manner it consumes (_unnecessarily_) memory which can't be garbage collected. + +✅ DO +Use appropriate tool to schedule recurring jobs like *Quartz.Net*, *Hangfire* or similar. + +**Reasoning**: +- Polly was never design to support this use case rather than its main aim is to help you overcome **short** transient failures. +- Dedicated job scheduler tools are more efficient (in terms of memory) and can be configured to withstand machine failure by utilizing persistence storage. From 24c4e699dcee1a52a546fdf69abd14cc97c944a1 Mon Sep 17 00:00:00 2001 From: "peter.csala" Date: Wed, 20 Sep 2023 14:27:29 +0200 Subject: [PATCH 2/4] Add 2 more anti-patterns to retry --- docs/strategies/retry.md | 104 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 99 insertions(+), 5 deletions(-) diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index 489b3efe2ee..db453215f4f 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -112,9 +112,10 @@ Throughout the years many people have used Polly in so many different ways. Some ### 1 - Overusing builder methods ❌ DON'T + Use more than one `Handle/HandleResult` ```cs -var retryPolicy = new ResiliencePipelineBuilder() +var retry = new ResiliencePipelineBuilder() .AddRetry(new() { ShouldHandle = new PredicateBuilder() .Handle() @@ -131,7 +132,8 @@ var retryPolicy = new ResiliencePipelineBuilder() - A better approach would be to tell _please trigger retry if the to-be-decorated code throws one of the retriable exceptions_. ✅ DO -Use collections and predicates + +Use collections and simple predicate functions ```cs ImmutableArray networkExceptions = new[] { typeof(SocketException), @@ -144,7 +146,7 @@ ImmutableArray policyExceptions = new[] { typeof(RateLimitRejectedException), }.ToImmutableArray(); -var retryPolicy = new ResiliencePipelineBuilder() +var retry = new ResiliencePipelineBuilder() .AddRetry(new() { ShouldHandle = ex => new ValueTask( networkExceptions.Union(policyExceptions).Contains(ex.GetType())), @@ -158,11 +160,13 @@ var retryPolicy = new ResiliencePipelineBuilder() ### 2 - Using retry as a periodical executor -The retry strategy can be defined in a way to run forever in a given frequency. + ❌ DON'T + +Define a retry strategy to run forever in a given frequency ```cs -var retryForeverDaily = new ResiliencePipelineBuilder() +var retry = new ResiliencePipelineBuilder() .AddRetry(new() { ShouldHandle = _ => ValueTask.FromResult(true), @@ -175,8 +179,98 @@ var retryForeverDaily = new ResiliencePipelineBuilder() - Even if it is used in a non-blocking manner it consumes (_unnecessarily_) memory which can't be garbage collected. ✅ DO + Use appropriate tool to schedule recurring jobs like *Quartz.Net*, *Hangfire* or similar. **Reasoning**: - Polly was never design to support this use case rather than its main aim is to help you overcome **short** transient failures. - Dedicated job scheduler tools are more efficient (in terms of memory) and can be configured to withstand machine failure by utilizing persistence storage. + +### 3 - Combining multiple sleep duration strategies + +❌ DON'T + +Mix the ever increasing values with constant ones +```cs +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() { + DelayGenerator = args => + { + var delay = args.AttemptNumber switch + { + <= 5 => TimeSpan.FromSeconds(Math.Pow(2, args.AttemptNumber)), + _ => TimeSpan.FromMinutes(3) + }; + return new ValueTask(delay); + } + }) + .Build(); +``` +Reasoning: +- By changing the behaviour based on state we basically created here a state machine +- Even though it is a really compact/concise way to express sleep durations there are three main drawbacks + - This approach does not embrace re-usability (you can't re-use only the quick retries) + - The sleep duration logic is tightly coupled to the `AttemptNumber` + - It is harder to unit test + +✅ DO + +Define two separate retry strategy options and chain them +```cs +var slowRetries = new RetryStrategyOptions { + MaxRetryAttempts = 5, + Delay = TimeSpan.FromMinutes(3), + BackoffType = DelayBackoffType.Constant +}; + +var quickRetries = new RetryStrategyOptions { + MaxRetryAttempts = 5, + Delay = TimeSpan.FromSeconds(1), + UseJitter = true, + BackoffType = DelayBackoffType.Exponential +}; + +var retry = new ResiliencePipelineBuilder() + .AddRetry(slowRetries) + .AddRetry(quickRetries) + .Build(); +} +``` +Reasoning: +- Even though this approach is a bit more verbose (compared to the previous one) it is more flexible +- You can compose the retry strategies in any order (slower is the outer and quicker is the inner or vice versa) +- You can define different triggers for the retry policies + - Which allows you to switch back and forth between the policies based on the thrown exception or on the result + - There is no strict order so, quick and slow retries can interleave + +### 4 - Branching retry logic based on request url + +Lets suppose you have an `HttpClient` and you want to decorate it with a retry only if a request is against a certain endpoint + +❌ DON'T + +Use `ResiliencePipeline.Empty` and `?:` operator +```cs +var retry = + shouldRetry(req.RequestUri) + ? new ResiliencePipelineBuilder().AddRetry(...).Build() + : ResiliencePipeline.Empty; +``` +**Reasoning**: +- In this case the triggering conditions/logic are scattered in multiple places +- From extensibility perspective it is also not desirable since it can easily become less and less legible as you add more conditions + +✅ DO + +Use the `ShouldHandle` clause to define triggering logic +```cs +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() { + ShouldHandle = _ => ValueTask.FromResult(shouldRetry(req.RequestUri)) + }) + .Build(); +``` +**Reasoning**: +- The triggering conditions are located in a single, well-known place +- There is no need to cover _"what to do when policy shouldn't trigger"_ + From 90bd10b02a76204975927af5bb7ad2918592f95f Mon Sep 17 00:00:00 2001 From: "peter.csala" Date: Wed, 20 Sep 2023 15:23:30 +0200 Subject: [PATCH 3/4] Add 3 more anti-patterns to retry --- docs/strategies/retry.md | 144 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 142 insertions(+), 2 deletions(-) diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index db453215f4f..cbe32a99a48 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -252,7 +252,7 @@ Lets suppose you have an `HttpClient` and you want to decorate it with a retry o Use `ResiliencePipeline.Empty` and `?:` operator ```cs var retry = - shouldRetry(req.RequestUri) + IsRetryable(req.RequestUri) ? new ResiliencePipelineBuilder().AddRetry(...).Build() : ResiliencePipeline.Empty; ``` @@ -266,7 +266,7 @@ Use the `ShouldHandle` clause to define triggering logic ```cs var retry = new ResiliencePipelineBuilder() .AddRetry(new() { - ShouldHandle = _ => ValueTask.FromResult(shouldRetry(req.RequestUri)) + ShouldHandle = _ => ValueTask.FromResult(IsRetryable(req.RequestUri)) }) .Build(); ``` @@ -274,3 +274,143 @@ var retry = new ResiliencePipelineBuilder() - The triggering conditions are located in a single, well-known place - There is no need to cover _"what to do when policy shouldn't trigger"_ +### 5 - Calling a given method before/after each retry attempt + +❌ DON'T + +Call explicitly a given method before `Execute`/`ExecuteAsync` +```cs +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() { + OnRetry = args => { BeforeEachAttempt(); return ValueTask.CompletedTask; }, + }) + .Build(); +... +BeforeEachAttempt(); +await retry.ExecuteAsync(DoSomething); +``` +**Reasoning**: +- The `OnRetry` is called before each **retry** attempt. + - So, it won't be called before the very first initial attempt (because that is not a retry) +- If this strategy is used in multiple places it is quite likely that you will forgot to call `BeforeEachAttempt` before every `Execute` calls +- Here the naming is very explicit but in real world scenario your method might not be prefixed with `Before` + - So, one might call it after the `Execute` call which is not the intended usage + +✅ DO + +Decorate the two method calls together +```cs +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() {...}) + .Build(); +... +await retry.ExecuteAsync(ct => { + BeforeEachAttempt(); + return DoSomething(ct); +}); +``` +**Reasoning**: +- If the `DoSomething` and `BeforeEachRetry` coupled together then decorate them together + - Or create a simple wrapper to call them in the desired order + +### 6 - Having a single policy to cover multiple failures + +Lets suppose we have an `HttpClient` which issues a request and then we try to parse a large Json + +❌ DON'T + +Have a single policy to cover everything +```cs +var builder = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder().Handle(), + MaxRetryAttempts = ... + }); + +builder.AddTimeout(TimeSpan.FromMinutes(1)); + +var pipeline = builder.Build(); +await pipeline.ExecuteAsync(async (ct) => +{ + var stream = await httpClient.GetStreamAsync(endpoint, ct); + var foo = await JsonSerializer.DeserializeAsync(stream, cancellationToken: ct); + ... +}); +``` +**Reasoning**: +- In the previous point it was suggested that _if the `X` and `Y` coupled together then decorate them together_ + - only if they are all part of the same failure domain + - in other words a pipeline should cover one failure domain + +✅ DO + +Define a strategy per failure domain +```cs +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder().Handle(), + MaxRetryAttempts = ... + }) + .Build(); + +var stream = await retry.ExecuteAsync((ct) => httpClient.GetStreamAsync(endpoint, ct)); + +var timeout = new ResiliencePipelineBuilder() + .AddTimeout(TimeSpan.FromMinutes(1)) + .Build(); + +var foo = await timeout.ExecuteAsync((ct) => JsonSerializer.DeserializeAsync(stream, cancellationToken: ct)); +``` +**Reasoning**: +- Network call's failure domain is different than deserialization's failures + - Having dedicated strategies makes the application more robust against different transient failures + +### 7 - Cancelling retry in case of given exception + +After you receive a `TimeoutException` you don't want to perform any more retries + +❌ DON'T + +Add cancellation logic inside `OnRetry` +```cs +... +.WaitAndRetryAsync( +var ctsKey = new ResiliencePropertyKey("cts"); +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() { + OnRetry = args => + { + if(args.Outcome.Exception is TimeoutException) + { + if(args.Context.Properties.TryGetValue(ctsKey, out var cts)) + { + cts.Cancel(); + } + } + return ValueTask.CompletedTask; + }, + ... + }) + .Build(); +``` +**Reasoning**: +- The triggering logic/conditions should be placed inside `ShouldHandle` +- "Jumping out from a strategy" from a user-defined delegate either via an `Exception` or by a `CancellationToken` just complicates the control flow unnecessarily + +✅ DO + +Define triggering logic inside `ShouldHandle` +```cs +... +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() { + ShouldHandle = args => ValueTask.FromResult(args.Outcome.Exception is not TimeoutException), + ... + }) + .Build(); +``` +**Reasoning**: +- As it was stated above please use the dedicated place to define triggering condition + - Try to rephrase your original exit condition in a way to express _when should a retry trigger_ From ef6a9a5b3c09a60f0703a28e4046adf1f25d13f2 Mon Sep 17 00:00:00 2001 From: "peter.csala" Date: Wed, 20 Sep 2023 17:19:34 +0200 Subject: [PATCH 4/4] Extract code from markdown to snippets file --- docs/strategies/retry.md | 152 ++++++++++++++------ src/Snippets/Docs/Retry.cs | 287 ++++++++++++++++++++++++++++++++++++- 2 files changed, 393 insertions(+), 46 deletions(-) diff --git a/docs/strategies/retry.md b/docs/strategies/retry.md index cbe32a99a48..e7e50a535b8 100644 --- a/docs/strategies/retry.md +++ b/docs/strategies/retry.md @@ -114,19 +114,24 @@ Throughout the years many people have used Polly in so many different ways. Some ❌ DON'T Use more than one `Handle/HandleResult` + + ```cs var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { + .AddRetry(new() + { ShouldHandle = new PredicateBuilder() .Handle() .Handle() .Handle() .Handle() .Handle(), - MaxRetryAttempts = ..., + MaxRetryAttempts = 3, }) .Build(); ``` + + **Reasoning**: - Even though this builder method signature is quite concise you repeat the same thing over and over (_please trigger retry if the to-be-decorated code throws XYZ exception_). - A better approach would be to tell _please trigger retry if the to-be-decorated code throws one of the retriable exceptions_. @@ -134,37 +139,46 @@ var retry = new ResiliencePipelineBuilder() ✅ DO Use collections and simple predicate functions + + ```cs -ImmutableArray networkExceptions = new[] { +ImmutableArray networkExceptions = new[] +{ typeof(SocketException), typeof(HttpRequestException), }.ToImmutableArray(); -ImmutableArray policyExceptions = new[] { +ImmutableArray policyExceptions = new[] +{ typeof(TimeoutRejectedException), typeof(BrokenCircuitException), typeof(RateLimitRejectedException), }.ToImmutableArray(); +ImmutableArray retryableExceptions = networkExceptions.Union(policyExceptions) + .ToImmutableArray(); + var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { - ShouldHandle = ex => new ValueTask( - networkExceptions.Union(policyExceptions).Contains(ex.GetType())), - MaxRetryAttempts = ..., + .AddRetry(new() + { + ShouldHandle = ex => new ValueTask(retryableExceptions.Contains(ex.GetType())), + MaxRetryAttempts = 3, }) .Build(); ``` + + **Reasoning**: - This approach embraces re-usability. - For instance the `networkExceptions` can be reused across many the strategies (retry, circuit breaker, etc..). ### 2 - Using retry as a periodical executor - - ❌ DON'T Define a retry strategy to run forever in a given frequency + + ```cs var retry = new ResiliencePipelineBuilder() .AddRetry(new() @@ -174,6 +188,8 @@ var retry = new ResiliencePipelineBuilder() }) .Build(); ``` + + **Reasoning**: - The sleep period can be blocking or non-blocking depending on how you define your strategy/pipeline. - Even if it is used in a non-blocking manner it consumes (_unnecessarily_) memory which can't be garbage collected. @@ -191,21 +207,26 @@ Use appropriate tool to schedule recurring jobs like *Quartz.Net*, *Hangfire* or ❌ DON'T Mix the ever increasing values with constant ones + + ```cs var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { + .AddRetry(new() + { DelayGenerator = args => { var delay = args.AttemptNumber switch { <= 5 => TimeSpan.FromSeconds(Math.Pow(2, args.AttemptNumber)), - _ => TimeSpan.FromMinutes(3) + _ => TimeSpan.FromMinutes(3) }; return new ValueTask(delay); } }) .Build(); ``` + + Reasoning: - By changing the behaviour based on state we basically created here a state machine - Even though it is a really compact/concise way to express sleep durations there are three main drawbacks @@ -216,14 +237,18 @@ Reasoning: ✅ DO Define two separate retry strategy options and chain them + + ```cs -var slowRetries = new RetryStrategyOptions { +var slowRetries = new RetryStrategyOptions +{ MaxRetryAttempts = 5, Delay = TimeSpan.FromMinutes(3), BackoffType = DelayBackoffType.Constant }; -var quickRetries = new RetryStrategyOptions { +var quickRetries = new RetryStrategyOptions +{ MaxRetryAttempts = 5, Delay = TimeSpan.FromSeconds(1), UseJitter = true, @@ -234,8 +259,9 @@ var retry = new ResiliencePipelineBuilder() .AddRetry(slowRetries) .AddRetry(quickRetries) .Build(); -} ``` + + Reasoning: - Even though this approach is a bit more verbose (compared to the previous one) it is more flexible - You can compose the retry strategies in any order (slower is the outer and quicker is the inner or vice versa) @@ -250,12 +276,16 @@ Lets suppose you have an `HttpClient` and you want to decorate it with a retry o ❌ DON'T Use `ResiliencePipeline.Empty` and `?:` operator + + ```cs var retry = IsRetryable(req.RequestUri) - ? new ResiliencePipelineBuilder().AddRetry(...).Build() + ? new ResiliencePipelineBuilder().AddRetry(new()).Build() : ResiliencePipeline.Empty; ``` + + **Reasoning**: - In this case the triggering conditions/logic are scattered in multiple places - From extensibility perspective it is also not desirable since it can easily become less and less legible as you add more conditions @@ -263,13 +293,18 @@ var retry = ✅ DO Use the `ShouldHandle` clause to define triggering logic + + ```cs -var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { ShouldHandle = _ => ValueTask.FromResult(IsRetryable(req.RequestUri)) }) .Build(); ``` + + **Reasoning**: - The triggering conditions are located in a single, well-known place - There is no need to cover _"what to do when policy shouldn't trigger"_ @@ -279,16 +314,25 @@ var retry = new ResiliencePipelineBuilder() ❌ DON'T Call explicitly a given method before `Execute`/`ExecuteAsync` + + ```cs -var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { - OnRetry = args => { BeforeEachAttempt(); return ValueTask.CompletedTask; }, +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + OnRetry = args => + { + BeforeEachAttempt(); + return ValueTask.CompletedTask; + }, }) .Build(); -... + BeforeEachAttempt(); await retry.ExecuteAsync(DoSomething); ``` + + **Reasoning**: - The `OnRetry` is called before each **retry** attempt. - So, it won't be called before the very first initial attempt (because that is not a retry) @@ -299,16 +343,21 @@ await retry.ExecuteAsync(DoSomething); ✅ DO Decorate the two method calls together + + ```cs -var retry = new ResiliencePipelineBuilder() - .AddRetry(new() {...}) +var retry = new ResiliencePipelineBuilder() + .AddRetry(new()) .Build(); -... -await retry.ExecuteAsync(ct => { + +await retry.ExecuteAsync(ct => +{ BeforeEachAttempt(); return DoSomething(ct); }); ``` + + **Reasoning**: - If the `DoSomething` and `BeforeEachRetry` coupled together then decorate them together - Or create a simple wrapper to call them in the desired order @@ -320,12 +369,14 @@ Lets suppose we have an `HttpClient` which issues a request and then we try to p ❌ DON'T Have a single policy to cover everything + + ```cs var builder = new ResiliencePipelineBuilder() .AddRetry(new() { ShouldHandle = new PredicateBuilder().Handle(), - MaxRetryAttempts = ... + MaxRetryAttempts = 3 }); builder.AddTimeout(TimeSpan.FromMinutes(1)); @@ -333,11 +384,12 @@ builder.AddTimeout(TimeSpan.FromMinutes(1)); var pipeline = builder.Build(); await pipeline.ExecuteAsync(async (ct) => { - var stream = await httpClient.GetStreamAsync(endpoint, ct); - var foo = await JsonSerializer.DeserializeAsync(stream, cancellationToken: ct); - ... + var stream = await httpClient.GetStreamAsync(new Uri("endpoint"), ct); + var foo = await JsonSerializer.DeserializeAsync(stream, cancellationToken: ct); }); ``` + + **Reasoning**: - In the previous point it was suggested that _if the `X` and `Y` coupled together then decorate them together_ - only if they are all part of the same failure domain @@ -346,16 +398,18 @@ await pipeline.ExecuteAsync(async (ct) => ✅ DO Define a strategy per failure domain + + ```cs var retry = new ResiliencePipelineBuilder() .AddRetry(new() { ShouldHandle = new PredicateBuilder().Handle(), - MaxRetryAttempts = ... + MaxRetryAttempts = 3 }) .Build(); -var stream = await retry.ExecuteAsync((ct) => httpClient.GetStreamAsync(endpoint, ct)); +var stream = await retry.ExecuteAsync(async (ct) => await httpClient.GetStreamAsync(new Uri("endpoint"), ct)); var timeout = new ResiliencePipelineBuilder() .AddTimeout(TimeSpan.FromMinutes(1)) @@ -363,6 +417,8 @@ var timeout = new ResiliencePipelineBuilder() var foo = await timeout.ExecuteAsync((ct) => JsonSerializer.DeserializeAsync(stream, cancellationToken: ct)); ``` + + **Reasoning**: - Network call's failure domain is different than deserialization's failures - Having dedicated strategies makes the application more robust against different transient failures @@ -374,27 +430,30 @@ After you receive a `TimeoutException` you don't want to perform any more retrie ❌ DON'T Add cancellation logic inside `OnRetry` + + ```cs -... -.WaitAndRetryAsync( var ctsKey = new ResiliencePropertyKey("cts"); -var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { OnRetry = args => { - if(args.Outcome.Exception is TimeoutException) + if (args.Outcome.Exception is TimeoutException) { - if(args.Context.Properties.TryGetValue(ctsKey, out var cts)) + if (args.Context.Properties.TryGetValue(ctsKey, out var cts)) { cts.Cancel(); } } + return ValueTask.CompletedTask; - }, - ... + } }) .Build(); ``` + + **Reasoning**: - The triggering logic/conditions should be placed inside `ShouldHandle` - "Jumping out from a strategy" from a user-defined delegate either via an `Exception` or by a `CancellationToken` just complicates the control flow unnecessarily @@ -402,15 +461,18 @@ var retry = new ResiliencePipelineBuilder() ✅ DO Define triggering logic inside `ShouldHandle` + + ```cs -... -var retry = new ResiliencePipelineBuilder() - .AddRetry(new() { - ShouldHandle = args => ValueTask.FromResult(args.Outcome.Exception is not TimeoutException), - ... +var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = args => ValueTask.FromResult(args.Outcome.Exception is not TimeoutException) }) .Build(); ``` + + **Reasoning**: - As it was stated above please use the dedicated place to define triggering condition - Try to rephrase your original exit condition in a way to express _when should a retry trigger_ diff --git a/src/Snippets/Docs/Retry.cs b/src/Snippets/Docs/Retry.cs index e784be442cb..1386d6a93ea 100644 --- a/src/Snippets/Docs/Retry.cs +++ b/src/Snippets/Docs/Retry.cs @@ -1,6 +1,12 @@ -using System.Globalization; +using System.Collections.Immutable; +using System.Globalization; using System.Net.Http; +using System.Net.Sockets; +using System.Text.Json; +using Polly.CircuitBreaker; +using Polly.RateLimit; using Polly.Retry; +using Polly.Timeout; using Snippets.Docs.Utils; namespace Snippets.Docs; @@ -102,4 +108,283 @@ private static bool TryGetDelay(HttpResponseMessage response, out TimeSpan delay delay = TimeSpan.Zero; return false; } + + public static void AntiPattern_1() + { + #region retry-anti-pattern-1 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder() + .Handle() + .Handle() + .Handle() + .Handle() + .Handle(), + MaxRetryAttempts = 3, + }) + .Build(); + + #endregion + } + + public static void Pattern_1() + { + #region retry-pattern-1 + + ImmutableArray networkExceptions = new[] + { + typeof(SocketException), + typeof(HttpRequestException), + }.ToImmutableArray(); + + ImmutableArray policyExceptions = new[] + { + typeof(TimeoutRejectedException), + typeof(BrokenCircuitException), + typeof(RateLimitRejectedException), + }.ToImmutableArray(); + + ImmutableArray retryableExceptions = networkExceptions.Union(policyExceptions) + .ToImmutableArray(); + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = ex => new ValueTask(retryableExceptions.Contains(ex.GetType())), + MaxRetryAttempts = 3, + }) + .Build(); + + #endregion + } + + public static void AntiPattern_2() + { + #region retry-anti-pattern-2 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = _ => ValueTask.FromResult(true), + Delay = TimeSpan.FromHours(24), + }) + .Build(); + + #endregion + } + + public static void AntiPattern_3() + { + #region retry-anti-pattern-3 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + DelayGenerator = args => + { + var delay = args.AttemptNumber switch + { + <= 5 => TimeSpan.FromSeconds(Math.Pow(2, args.AttemptNumber)), + _ => TimeSpan.FromMinutes(3) + }; + return new ValueTask(delay); + } + }) + .Build(); + + #endregion + } + + public static void Pattern_3() + { + #region retry-pattern-3 + + var slowRetries = new RetryStrategyOptions + { + MaxRetryAttempts = 5, + Delay = TimeSpan.FromMinutes(3), + BackoffType = DelayBackoffType.Constant + }; + + var quickRetries = new RetryStrategyOptions + { + MaxRetryAttempts = 5, + Delay = TimeSpan.FromSeconds(1), + UseJitter = true, + BackoffType = DelayBackoffType.Exponential + }; + + var retry = new ResiliencePipelineBuilder() + .AddRetry(slowRetries) + .AddRetry(quickRetries) + .Build(); + + #endregion + } + + public static void AntiPattern_4() + { + HttpRequestMessage req = new HttpRequestMessage(HttpMethod.Get, ""); + static bool IsRetryable(Uri? uri) => true; + #region retry-anti-pattern-4 + + var retry = + IsRetryable(req.RequestUri) + ? new ResiliencePipelineBuilder().AddRetry(new()).Build() + : ResiliencePipeline.Empty; + + #endregion + } + + public static void Pattern_4() + { + HttpRequestMessage req = new HttpRequestMessage(HttpMethod.Get, ""); + static bool IsRetryable(Uri? uri) => true; + #region retry-pattern-4 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = _ => ValueTask.FromResult(IsRetryable(req.RequestUri)) + }) + .Build(); + + #endregion + } + + public static async Task AntiPattern_5() + { + static void BeforeEachAttempt() => Debug.WriteLine("Before attempt"); + static ValueTask DoSomething(CancellationToken ct) => ValueTask.CompletedTask; + + #region retry-anti-pattern-5 + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + OnRetry = args => + { + BeforeEachAttempt(); + return ValueTask.CompletedTask; + }, + }) + .Build(); + + BeforeEachAttempt(); + await retry.ExecuteAsync(DoSomething); + + #endregion + } + + public static async Task Pattern_5() + { + static void BeforeEachAttempt() => Debug.WriteLine("Before attempt"); + static ValueTask DoSomething(CancellationToken ct) => ValueTask.CompletedTask; + + #region retry-pattern-5 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new()) + .Build(); + + await retry.ExecuteAsync(ct => + { + BeforeEachAttempt(); + return DoSomething(ct); + }); + + #endregion + } + + private record struct Foo; + public static async Task AntiPattern_6() + { + var httpClient = new HttpClient(); + + #region retry-anti-pattern-6 + + var builder = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder().Handle(), + MaxRetryAttempts = 3 + }); + + builder.AddTimeout(TimeSpan.FromMinutes(1)); + + var pipeline = builder.Build(); + await pipeline.ExecuteAsync(async (ct) => + { + var stream = await httpClient.GetStreamAsync(new Uri("endpoint"), ct); + var foo = await JsonSerializer.DeserializeAsync(stream, cancellationToken: ct); + }); + + #endregion + } + + public static async Task Pattern_6() + { + var httpClient = new HttpClient(); + + #region retry-pattern-6 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = new PredicateBuilder().Handle(), + MaxRetryAttempts = 3 + }) + .Build(); + + var stream = await retry.ExecuteAsync(async (ct) => await httpClient.GetStreamAsync(new Uri("endpoint"), ct)); + + var timeout = new ResiliencePipelineBuilder() + .AddTimeout(TimeSpan.FromMinutes(1)) + .Build(); + + var foo = await timeout.ExecuteAsync((ct) => JsonSerializer.DeserializeAsync(stream, cancellationToken: ct)); + + #endregion + } + + public static void AntiPattern_7() + { + #region retry-anti-pattern-7 + + var ctsKey = new ResiliencePropertyKey("cts"); + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + OnRetry = args => + { + if (args.Outcome.Exception is TimeoutException) + { + if (args.Context.Properties.TryGetValue(ctsKey, out var cts)) + { + cts.Cancel(); + } + } + + return ValueTask.CompletedTask; + } + }) + .Build(); + + #endregion + } + + public static void Pattern_7() + { + #region retry-pattern-7 + + var retry = new ResiliencePipelineBuilder() + .AddRetry(new() + { + ShouldHandle = args => ValueTask.FromResult(args.Outcome.Exception is not TimeoutException) + }) + .Build(); + + #endregion + } }