From c0d1f48687b24a533634758104185cbf771cab8a Mon Sep 17 00:00:00 2001 From: Junchao Lyu Date: Wed, 15 Jan 2020 15:11:56 -0800 Subject: [PATCH] add local cache stats in the integration test --- src/redis/local_cache_stats.go | 43 ++++++ src/server/server_impl.go | 12 +- src/service_cmd/runner/runner.go | 10 +- test/common/common.go | 31 ++++ test/integration/integration_test.go | 216 +++++++++++++++------------ test/redis/cache_impl_test.go | 59 +++++++- 6 files changed, 263 insertions(+), 108 deletions(-) create mode 100644 src/redis/local_cache_stats.go diff --git a/src/redis/local_cache_stats.go b/src/redis/local_cache_stats.go new file mode 100644 index 00000000..60a94194 --- /dev/null +++ b/src/redis/local_cache_stats.go @@ -0,0 +1,43 @@ +package redis + +import ( + "github.com/coocood/freecache" + stats "github.com/lyft/gostats" +) + +type localCacheStats struct { + cache *freecache.Cache + evacuateCount stats.Gauge + expiredCount stats.Gauge + entryCount stats.Gauge + averageAccessTime stats.Gauge + hitCount stats.Gauge + missCount stats.Gauge + lookupCount stats.Gauge + overwriteCount stats.Gauge +} + +func NewLocalCacheStats(localCache *freecache.Cache, scope stats.Scope) stats.StatGenerator { + return localCacheStats{ + cache: localCache, + evacuateCount: scope.NewGauge("evacuateCount"), + expiredCount: scope.NewGauge("expiredCount"), + entryCount: scope.NewGauge("entryCount"), + averageAccessTime: scope.NewGauge("averageAccessTime"), + hitCount: scope.NewGauge("hitCount"), + missCount: scope.NewGauge("missCount"), + lookupCount: scope.NewGauge("lookupCount"), + overwriteCount: scope.NewGauge("overwriteCount"), + } +} + +func (stats localCacheStats) GenerateStats() { + stats.evacuateCount.Set(uint64(stats.cache.EvacuateCount())) + stats.expiredCount.Set(uint64(stats.cache.ExpiredCount())) + stats.entryCount.Set(uint64(stats.cache.EntryCount())) + stats.averageAccessTime.Set(uint64(stats.cache.AverageAccessTime())) + stats.hitCount.Set(uint64(stats.cache.HitCount())) + stats.missCount.Set(uint64(stats.cache.MissCount())) + stats.lookupCount.Set(uint64(stats.cache.LookupCount())) + stats.overwriteCount.Set(uint64(stats.cache.OverwriteCount())) +} diff --git a/src/server/server_impl.go b/src/server/server_impl.go index a0803f2a..d49f2d09 100644 --- a/src/server/server_impl.go +++ b/src/server/server_impl.go @@ -3,6 +3,7 @@ package server import ( "expvar" "fmt" + "github.com/lyft/ratelimit/src/redis" "io" "net/http" "net/http/pprof" @@ -14,6 +15,7 @@ import ( "net" + "github.com/coocood/freecache" "github.com/gorilla/mux" reuseport "github.com/kavu/go_reuseport" "github.com/lyft/goruntime/loader" @@ -99,11 +101,12 @@ func (server *server) Runtime() loader.IFace { return server.runtime } -func NewServer(name string, store stats.Store, opts ...settings.Option) Server { - return newServer(name, store, opts...) + +func NewServer(name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) Server { + return newServer(name, store, localCache, opts...) } -func newServer(name string, store stats.Store, opts ...settings.Option) *server { +func newServer(name string, store stats.Store, localCache *freecache.Cache, opts ...settings.Option) *server { s := settings.NewSettings() for _, opt := range opts { @@ -122,6 +125,9 @@ func newServer(name string, store stats.Store, opts ...settings.Option) *server ret.store = store ret.scope = ret.store.Scope(name) ret.store.AddStatGenerator(stats.NewRuntimeStats(ret.scope.Scope("go"))) + if localCache != nil { + ret.store.AddStatGenerator(redis.NewLocalCacheStats(localCache, ret.scope.Scope("localcache"))) + } // setup runtime loaderOpts := make([]loader.Option, 0, 1) diff --git a/src/service_cmd/runner/runner.go b/src/service_cmd/runner/runner.go index 30cdde73..21c07d47 100644 --- a/src/service_cmd/runner/runner.go +++ b/src/service_cmd/runner/runner.go @@ -42,8 +42,12 @@ func (runner *Runner) Run() { } else { logger.SetLevel(logLevel) } + var localCache *freecache.Cache + if s.LocalCacheSizeInBytes != 0 { + localCache = freecache.NewCache(s.LocalCacheSizeInBytes) + } - srv := server.NewServer("ratelimit", runner.statsStore, settings.GrpcUnaryInterceptor(nil)) + srv := server.NewServer("ratelimit", runner.statsStore, localCache, settings.GrpcUnaryInterceptor(nil)) var perSecondPool redis.Pool if s.RedisPerSecond { @@ -52,10 +56,6 @@ func (runner *Runner) Run() { var otherPool redis.Pool otherPool = redis.NewPoolImpl(srv.Scope().Scope("redis_pool"), s.RedisTls, s.RedisAuth, s.RedisUrl, s.RedisPoolSize) - var localCache *freecache.Cache - if s.LocalCacheSizeInBytes != 0 { - localCache = freecache.NewCache(s.LocalCacheSizeInBytes) - } service := ratelimit.NewService( srv.Runtime(), redis.NewRateLimitCacheImpl( diff --git a/test/common/common.go b/test/common/common.go index 549ccb20..ba3e7904 100644 --- a/test/common/common.go +++ b/test/common/common.go @@ -1,11 +1,42 @@ package common import ( + "sync" + pb_struct "github.com/envoyproxy/go-control-plane/envoy/api/v2/ratelimit" pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" pb_legacy "github.com/lyft/ratelimit/proto/ratelimit" ) +type TestStatSink struct { + sync.Mutex + Record map[string]interface{} +} + +func (s *TestStatSink) Clear() { + s.Lock() + s.Record = map[string]interface{}{} + s.Unlock() +} + +func (s *TestStatSink) FlushCounter(name string, value uint64) { + s.Lock() + s.Record[name] = value + s.Unlock() +} + +func (s *TestStatSink) FlushGauge(name string, value uint64) { + s.Lock() + s.Record[name] = value + s.Unlock() +} + +func (s *TestStatSink) FlushTimer(name string, value float64) { + s.Lock() + s.Record[name] = value + s.Unlock() +} + func NewRateLimitRequest(domain string, descriptors [][][2]string, hitsAddend uint32) *pb.RateLimitRequest { request := &pb.RateLimitRequest{} request.Domain = domain diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go index 3f083dad..96958c9f 100644 --- a/test/integration/integration_test.go +++ b/test/integration/integration_test.go @@ -112,7 +112,7 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit") os.Setenv("REDIS_PERSECOND_SOCKET_TYPE", "tcp") os.Setenv("REDIS_SOCKET_TYPE", "tcp") - os.Setenv("LOCAL_CACHE_SIZE", local_cache_size) + os.Setenv("LOCAL_CACHE_SIZE_IN_BYTES", local_cache_size) local_cache_size_val, _ := strconv.Atoi(local_cache_size) enable_local_cache := local_cache_size_val > 0 @@ -181,6 +181,19 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu assert.NoError(err) key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) assert.Equal(i+1, int(key2HitCounter.Value())) + key2OverlimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit", getCacheKey("key2", enable_local_cache))) + if i >= 20 { + assert.Equal(i-19, int(key2OverlimitCounter.Value())) + } else { + assert.Equal(0, int(key2OverlimitCounter.Value())) + } + + key2LocalCacheOverLimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit_with_local_cache", getCacheKey("key2", enable_local_cache))) + if enable_local_cache && i >= 20 { + assert.Equal(i-20, int(key2LocalCacheOverLimitCounter.Value())) + } else { + assert.Equal(0, int(key2LocalCacheOverLimitCounter.Value())) + } } // Limit now against 2 keys in the same domain. @@ -212,123 +225,134 @@ func testBasicBaseConfig(grpcPort, perSecond string, local_cache_size string) fu assert.NoError(err) key2HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key2", enable_local_cache))) assert.Equal(i+26, int(key2HitCounter.Value())) + key2OverlimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit", getCacheKey("key2", enable_local_cache))) + assert.Equal(5, int(key2OverlimitCounter.Value())) + key2LocalCacheOverLimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit_with_local_cache", getCacheKey("key2", enable_local_cache))) + if enable_local_cache { + assert.Equal(4, int(key2LocalCacheOverLimitCounter.Value())) + } else { + assert.Equal(0, int(key2LocalCacheOverLimitCounter.Value())) + } key3HitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.total_hits", getCacheKey("key3", enable_local_cache))) assert.Equal(i+1, int(key3HitCounter.Value())) + key3OverlimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit", getCacheKey("key3", enable_local_cache))) + if i >= 10 { + assert.Equal(i-9, int(key3OverlimitCounter.Value())) + } else { + assert.Equal(0, int(key3OverlimitCounter.Value())) + } + key3LocalCacheOverLimitCounter := runner.GetStatsStore().NewCounter(fmt.Sprintf("ratelimit.service.rate_limit.another.%s.over_limit_with_local_cache", getCacheKey("key3", enable_local_cache))) + if enable_local_cache && i >= 10 { + assert.Equal(i-10, int(key3LocalCacheOverLimitCounter.Value())) + } else { + assert.Equal(0, int(key3LocalCacheOverLimitCounter.Value())) + } + } } } func TestBasicConfigLegacy(t *testing.T) { - t.Run("testBasicConfigLegacy", testBasicConfigLegacy("0")) - t.Run("testBasicConfigLegacyWithLocalCache", testBasicConfigLegacy("1000")) -} - -func testBasicConfigLegacy(local_cache_size string) func(*testing.T) { - return func(t *testing.T) { - os.Setenv("PORT", "8082") - os.Setenv("GRPC_PORT", "8083") - os.Setenv("DEBUG_PORT", "8084") - os.Setenv("RUNTIME_ROOT", "runtime/current") - os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit") - - os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") - os.Setenv("REDIS_URL", "localhost:6379") - os.Setenv("REDIS_TLS", "false") - os.Setenv("REDIS_AUTH", "") - os.Setenv("REDIS_PERSECOND_TLS", "false") - os.Setenv("REDIS_PERSECOND_AUTH", "") - os.Setenv("LOCAL_CACHE_SIZE", local_cache_size) - local_cache_size_val, _ := strconv.Atoi(local_cache_size) - enable_local_cache := local_cache_size_val > 0 - - runner := runner.NewRunner() - go func() { - runner.Run() - }() + os.Setenv("PORT", "8082") + os.Setenv("GRPC_PORT", "8083") + os.Setenv("DEBUG_PORT", "8084") + os.Setenv("RUNTIME_ROOT", "runtime/current") + os.Setenv("RUNTIME_SUBDIRECTORY", "ratelimit") - // HACK: Wait for the server to come up. Make a hook that we can wait on. - time.Sleep(100 * time.Millisecond) - - assert := assert.New(t) - conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure()) - assert.NoError(err) - defer conn.Close() - c := pb_legacy.NewRateLimitServiceClient(conn) + os.Setenv("REDIS_PERSECOND_URL", "localhost:6380") + os.Setenv("REDIS_URL", "localhost:6379") + os.Setenv("REDIS_TLS", "false") + os.Setenv("REDIS_AUTH", "") + os.Setenv("REDIS_PERSECOND_TLS", "false") + os.Setenv("REDIS_PERSECOND_AUTH", "") - response, err := c.ShouldRateLimit( + runner := runner.NewRunner() + go func() { + runner.Run() + }() + + // HACK: Wait for the server to come up. Make a hook that we can wait on. + time.Sleep(100 * time.Millisecond) + + assert := assert.New(t) + conn, err := grpc.Dial("localhost:8083", grpc.WithInsecure()) + assert.NoError(err) + defer conn.Close() + c := pb_legacy.NewRateLimitServiceClient(conn) + + response, err := c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequestLegacy("foo", [][][2]string{{{"hello", "world"}}}, 1)) + assert.Equal( + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OK, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + response) + assert.NoError(err) + + response, err = c.ShouldRateLimit( + context.Background(), + common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{"key1", "foo"}}}, 1)) + assert.Equal( + &pb_legacy.RateLimitResponse{ + OverallCode: pb_legacy.RateLimitResponse_OK, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimit_SECOND, 49)}}, + response) + assert.NoError(err) + + // Now come up with a random key, and go over limit for a minute limit which should always work. + r := rand.New(rand.NewSource(time.Now().UnixNano())) + randomInt := r.Int() + for i := 0; i < 25; i++ { + response, err = c.ShouldRateLimit( context.Background(), - common.NewRateLimitRequestLegacy("foo", [][][2]string{{{getCacheKey("hello", enable_local_cache), "world"}}}, 1)) + common.NewRateLimitRequestLegacy( + "another", [][][2]string{{{"key2", strconv.Itoa(randomInt)}}}, 1)) + + status := pb_legacy.RateLimitResponse_OK + limitRemaining := uint32(20 - (i + 1)) + if i >= 20 { + status = pb_legacy.RateLimitResponse_OVER_LIMIT + limitRemaining = 0 + } + assert.Equal( &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OK, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{{Code: pb_legacy.RateLimitResponse_OK, CurrentLimit: nil, LimitRemaining: 0}}}, + OverallCode: status, + Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ + newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimit_MINUTE, limitRemaining)}}, response) assert.NoError(err) + } + // Limit now against 2 keys in the same domain. + randomInt = r.Int() + for i := 0; i < 15; i++ { response, err = c.ShouldRateLimit( context.Background(), - common.NewRateLimitRequestLegacy("basic_legacy", [][][2]string{{{getCacheKey("key1", enable_local_cache), "foo"}}}, 1)) + common.NewRateLimitRequestLegacy( + "another_legacy", + [][][2]string{ + {{"key2", strconv.Itoa(randomInt)}}, + {{"key3", strconv.Itoa(randomInt)}}}, 1)) + + status := pb_legacy.RateLimitResponse_OK + limitRemaining1 := uint32(20 - (i + 1)) + limitRemaining2 := uint32(10 - (i + 1)) + if i >= 10 { + status = pb_legacy.RateLimitResponse_OVER_LIMIT + limitRemaining2 = 0 + } + assert.Equal( &pb_legacy.RateLimitResponse{ - OverallCode: pb_legacy.RateLimitResponse_OK, + OverallCode: status, Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 50, pb_legacy.RateLimit_SECOND, 49)}}, + newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimit_MINUTE, limitRemaining1), + newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimit_HOUR, limitRemaining2)}}, response) assert.NoError(err) - - // Now come up with a random key, and go over limit for a minute limit which should always work. - r := rand.New(rand.NewSource(time.Now().UnixNano())) - randomInt := r.Int() - for i := 0; i < 25; i++ { - response, err = c.ShouldRateLimit( - context.Background(), - common.NewRateLimitRequestLegacy( - "another", [][][2]string{{{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}}}, 1)) - - status := pb_legacy.RateLimitResponse_OK - limitRemaining := uint32(20 - (i + 1)) - if i >= 20 { - status = pb_legacy.RateLimitResponse_OVER_LIMIT - limitRemaining = 0 - } - - assert.Equal( - &pb_legacy.RateLimitResponse{ - OverallCode: status, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(status, 20, pb_legacy.RateLimit_MINUTE, limitRemaining)}}, - response) - assert.NoError(err) - } - - // Limit now against 2 keys in the same domain. - randomInt = r.Int() - for i := 0; i < 15; i++ { - response, err = c.ShouldRateLimit( - context.Background(), - common.NewRateLimitRequestLegacy( - "another_legacy", - [][][2]string{ - {{getCacheKey("key2", enable_local_cache), strconv.Itoa(randomInt)}}, - {{getCacheKey("key3", enable_local_cache), strconv.Itoa(randomInt)}}}, 1)) - - status := pb_legacy.RateLimitResponse_OK - limitRemaining1 := uint32(20 - (i + 1)) - limitRemaining2 := uint32(10 - (i + 1)) - if i >= 10 { - status = pb_legacy.RateLimitResponse_OVER_LIMIT - limitRemaining2 = 0 - } - - assert.Equal( - &pb_legacy.RateLimitResponse{ - OverallCode: status, - Statuses: []*pb_legacy.RateLimitResponse_DescriptorStatus{ - newDescriptorStatusLegacy(pb_legacy.RateLimitResponse_OK, 20, pb_legacy.RateLimit_MINUTE, limitRemaining1), - newDescriptorStatusLegacy(status, 10, pb_legacy.RateLimit_HOUR, limitRemaining2)}}, - response) - assert.NoError(err) - } } } diff --git a/test/redis/cache_impl_test.go b/test/redis/cache_impl_test.go index b168dbb1..57581f38 100644 --- a/test/redis/cache_impl_test.go +++ b/test/redis/cache_impl_test.go @@ -1,9 +1,10 @@ package redis_test import ( - "github.com/coocood/freecache" "testing" + "github.com/coocood/freecache" + pb "github.com/envoyproxy/go-control-plane/envoy/service/ratelimit/v2" stats "github.com/lyft/gostats" "github.com/lyft/ratelimit/src/config" @@ -20,7 +21,6 @@ import ( func TestRedis(t *testing.T) { t.Run("WithoutPerSecondRedis", testRedis(false)) t.Run("WithPerSecondRedis", testRedis(true)) - } func testRedis(usePerSecondRedis bool) func(*testing.T) { @@ -152,6 +152,42 @@ func testRedis(usePerSecondRedis bool) func(*testing.T) { } } +func testLocalCacheStats(localCacheStats stats.StatGenerator, statsStore stats.Store, sink *common.TestStatSink, + expectedHitCount int, expectedMissCount int, expectedLookUpCount int, expectedExpiredCount int, + expectedEntryCount int) func(*testing.T) { + return func(t *testing.T) { + localCacheStats.GenerateStats() + statsStore.Flush() + + // Check whether all local_cache related stats are available. + _, ok := sink.Record["averageAccessTime"] + assert.Equal(t, true, ok) + hitCount, ok := sink.Record["hitCount"] + assert.Equal(t, true, ok) + missCount, ok := sink.Record["missCount"] + assert.Equal(t, true, ok) + lookupCount, ok := sink.Record["lookupCount"] + assert.Equal(t, true, ok) + _, ok = sink.Record["overwriteCount"] + assert.Equal(t, true, ok) + _, ok = sink.Record["evacuateCount"] + assert.Equal(t, true, ok) + expiredCount, ok := sink.Record["expiredCount"] + assert.Equal(t, true, ok) + entryCount, ok := sink.Record["entryCount"] + assert.Equal(t, true, ok) + + // Check the correctness of hitCount, missCount, lookupCount, expiredCount and entryCount + assert.Equal(t, expectedHitCount, hitCount.(int)) + assert.Equal(t, expectedMissCount, missCount.(int)) + assert.Equal(t, expectedLookUpCount, lookupCount.(int)) + assert.Equal(t, expectedExpiredCount, expiredCount.(int)) + assert.Equal(t, expectedEntryCount, entryCount.(int)) + + sink.Clear() + } +} + func TestOverLimitWithLocalCache(t *testing.T) { assert := assert.New(t) controller := gomock.NewController(t) @@ -161,8 +197,11 @@ func TestOverLimitWithLocalCache(t *testing.T) { timeSource := mock_redis.NewMockTimeSource(controller) connection := mock_redis.NewMockConnection(controller) response := mock_redis.NewMockResponse(controller) - cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, freecache.NewCache(100)) - statsStore := stats.NewStore(stats.NewNullSink(), false) + localCache := freecache.NewCache(100) + cache := redis.NewRateLimitCacheImpl(pool, nil, timeSource, rand.New(rand.NewSource(1)), 0, localCache) + sink := &common.TestStatSink{} + statsStore := stats.NewStore(sink, true) + localCacheStats := redis.NewLocalCacheStats(localCache, statsStore.Scope("localcache")) // Test Near Limit Stats. Under Near Limit Ratio pool.EXPECT().Get().Return(connection) @@ -189,6 +228,9 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(0), limits[0].Stats.NearLimit.Value()) + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 1, 1, 0, 0) + // Test Near Limit Stats. At Near Limit Ratio, still OK pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) @@ -209,6 +251,9 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 2, 0, 0) + // Test Over limit stats pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) @@ -229,6 +274,9 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(0), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 0, 2, 3, 0, 1) + // Test Over limit stats with local cache pool.EXPECT().Get().Return(connection) timeSource.EXPECT().UnixNow().Return(int64(1000000)) @@ -246,6 +294,9 @@ func TestOverLimitWithLocalCache(t *testing.T) { assert.Equal(uint64(2), limits[0].Stats.OverLimit.Value()) assert.Equal(uint64(1), limits[0].Stats.OverLimitWithLocalCache.Value()) assert.Equal(uint64(1), limits[0].Stats.NearLimit.Value()) + + // Check the local cache stats. + testLocalCacheStats(localCacheStats, statsStore, sink, 1, 3, 4, 0, 1) } func TestNearLimit(t *testing.T) {