From bd644b99c9dfd1d4f1fadc7fe2bed7888d0121ed Mon Sep 17 00:00:00 2001
From: Nathan Johnson <nathan@nathanjohnson.org>
Date: Fri, 15 Jul 2022 21:48:56 -0500
Subject: [PATCH 1/3] Update random picker to use math/rand's Intn function

math/rand's Intn function is 4x faster faster and more portable.

fixes #871
---
 route/picker.go      | 16 ++++++----------
 route/picker_test.go | 27 +++++++++++++++++++++++++++
 2 files changed, 33 insertions(+), 10 deletions(-)

diff --git a/route/picker.go b/route/picker.go
index fac12cb42..6a18aa3a0 100644
--- a/route/picker.go
+++ b/route/picker.go
@@ -1,8 +1,8 @@
 package route
 
 import (
+	"math/rand"
 	"sync/atomic"
-	"time"
 )
 
 // picker selects a target from a list of targets.
@@ -27,17 +27,13 @@ func rrPicker(r *Route) *Target {
 	return u
 }
 
-// stubbed out for testing
-// we implement the randIntN function using the nanosecond time counter
-// since it is 15x faster than using the pseudo random number generator
-// (12 ns vs 190 ns) Most HW does not seem to provide clocks with ns
-// resolution but seem to be good enough for µs resolution. Since
-// requests are usually handled within several ms we should have enough
-// variation. Within 1 ms we have 1000 µs to distribute among a smaller
-// set of entities (<< 100)
+// as it turns out, math/rand's Intn is now way faster (4x) than the previous implementation using
+// time.UnixNano().  As a bonus, this actually works properly on 32 bit platforms.
 var randIntn = func(n int) int {
 	if n == 0 {
 		return 0
 	}
-	return int(time.Now().UnixNano()/int64(time.Microsecond)) % n
+	return rand.Intn(n)
 }
+
+
diff --git a/route/picker_test.go b/route/picker_test.go
index c55d77308..42ebd3d01 100644
--- a/route/picker_test.go
+++ b/route/picker_test.go
@@ -4,6 +4,7 @@ import (
 	"net/url"
 	"reflect"
 	"testing"
+	"time"
 )
 
 var (
@@ -56,3 +57,29 @@ func TestRRPicker(t *testing.T) {
 		}
 	}
 }
+
+// This is an improved version of the previous UnixNano implementation
+// This one does not overflow on 32 bit platforms, it casts to int after
+// doing mod.  doing it before caused overflows.
+var oldRandInt = func(n int) int {
+	if n == 0 {
+		return 0
+	}
+	return int(time.Now().UnixNano()/int64(time.Microsecond) % int64(n))
+}
+
+var result int // prevent compiler optimization
+func BenchmarkOldRandIntn(b *testing.B) {
+	var r int // more shields against compiler optimization
+	for i := 0; i < b.N; i++ {
+		r = oldRandInt(i)
+	}
+	result = r
+}
+func BenchmarkMathRandIntn(b *testing.B) {
+	var r int // more shields against compiler optimization
+	for i := 0; i < b.N; i++ {
+		r = randIntn(i)
+	}
+	result = r
+}

From d7878857ca73dbe7fafedc501f87b00f12844275 Mon Sep 17 00:00:00 2001
From: Nathan Johnson <nathan@nathanjohnson.org>
Date: Fri, 15 Jul 2022 22:04:25 -0500
Subject: [PATCH 2/3] seed prng if rnd picker is in use

---
 main.go | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/main.go b/main.go
index e16e18247..b95c0f86b 100644
--- a/main.go
+++ b/main.go
@@ -9,6 +9,7 @@ import (
 	gkm "github.com/go-kit/kit/metrics"
 	"io"
 	"log"
+	"math/rand"
 	"net"
 	"net/http"
 	"os"
@@ -67,6 +68,9 @@ func main() {
 		fmt.Printf("%s %s\n", version, runtime.Version())
 		return
 	}
+	if cfg.Proxy.Strategy == "rnd" {
+		rand.Seed(time.Now().UnixNano())
+	}
 
 	log.Printf("[INFO] Setting log level to %s", logOutput.Level())
 	if !logOutput.SetLevel(cfg.Log.Level) {

From 566d224d5a70b1c09d7669b1b849aab829bd10a0 Mon Sep 17 00:00:00 2001
From: Nathan Johnson <nathan@nathanjohnson.org>
Date: Fri, 15 Jul 2022 22:11:55 -0500
Subject: [PATCH 3/3] move rand seed to synd.Once

---
 main.go         | 4 ----
 route/picker.go | 6 ++++++
 2 files changed, 6 insertions(+), 4 deletions(-)

diff --git a/main.go b/main.go
index b95c0f86b..e16e18247 100644
--- a/main.go
+++ b/main.go
@@ -9,7 +9,6 @@ import (
 	gkm "github.com/go-kit/kit/metrics"
 	"io"
 	"log"
-	"math/rand"
 	"net"
 	"net/http"
 	"os"
@@ -68,9 +67,6 @@ func main() {
 		fmt.Printf("%s %s\n", version, runtime.Version())
 		return
 	}
-	if cfg.Proxy.Strategy == "rnd" {
-		rand.Seed(time.Now().UnixNano())
-	}
 
 	log.Printf("[INFO] Setting log level to %s", logOutput.Level())
 	if !logOutput.SetLevel(cfg.Log.Level) {
diff --git a/route/picker.go b/route/picker.go
index 6a18aa3a0..7828d7ad1 100644
--- a/route/picker.go
+++ b/route/picker.go
@@ -2,7 +2,9 @@ package route
 
 import (
 	"math/rand"
+	"sync"
 	"sync/atomic"
+	"time"
 )
 
 // picker selects a target from a list of targets.
@@ -29,7 +31,11 @@ func rrPicker(r *Route) *Target {
 
 // as it turns out, math/rand's Intn is now way faster (4x) than the previous implementation using
 // time.UnixNano().  As a bonus, this actually works properly on 32 bit platforms.
+var rndOnce sync.Once
 var randIntn = func(n int) int {
+	rndOnce.Do(func() {
+		rand.Seed(time.Now().UnixNano())
+	})
 	if n == 0 {
 		return 0
 	}