diff --git a/pkg/store/cache.go b/pkg/store/cache.go index 16cad121f5..5acd2c104c 100644 --- a/pkg/store/cache.go +++ b/pkg/store/cache.go @@ -43,6 +43,7 @@ type indexCache struct { added *prometheus.CounterVec current *prometheus.GaugeVec currentSize *prometheus.GaugeVec + overflow *prometheus.CounterVec } // newIndexCache creates a new LRU cache for index entries and ensures the total cache @@ -66,6 +67,11 @@ func newIndexCache(reg prometheus.Registerer, maxBytes uint64) (*indexCache, err Help: "Total number of requests to the cache.", }, []string{"item_type"}) + c.overflow = prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "thanos_store_index_cache_items_overflowed_total", + Help: "Total number of items that could not be added to the cache due to being too big.", + }, []string{"item_type"}) + c.hits = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "thanos_store_index_cache_hits_total", Help: "Total number of requests to the cache that were a hit.", @@ -115,10 +121,16 @@ func newIndexCache(reg prometheus.Registerer, maxBytes uint64) (*indexCache, err return c, nil } -func (c *indexCache) ensureFits(b []byte) { +// ensureFits tries to make sure that the passed slice will fit into the LRU cache. +// Returns true if it will fit. +func (c *indexCache) ensureFits(b []byte) bool { + if uint64(len(b)) > c.maxSize { + return false + } for c.curSize+uint64(len(b)) > c.maxSize { c.lru.RemoveOldest() } + return true } func (c *indexCache) setPostings(b ulid.ULID, l labels.Label, v []byte) { @@ -127,7 +139,10 @@ func (c *indexCache) setPostings(b ulid.ULID, l labels.Label, v []byte) { c.mtx.Lock() defer c.mtx.Unlock() - c.ensureFits(v) + if !c.ensureFits(v) { + c.overflow.WithLabelValues(cacheTypePostings).Inc() + return + } // The caller may be passing in a sub-slice of a huge array. Copy the data // to ensure we don't waste huge amounts of space for something small. @@ -158,7 +173,10 @@ func (c *indexCache) setSeries(b ulid.ULID, id uint64, v []byte) { c.mtx.Lock() defer c.mtx.Unlock() - c.ensureFits(v) + if !c.ensureFits(v) { + c.overflow.WithLabelValues(cacheTypeSeries).Inc() + return + } // The caller may be passing in a sub-slice of a huge array. Copy the data // to ensure we don't waste huge amounts of space for something small. diff --git a/pkg/store/cache_test.go b/pkg/store/cache_test.go new file mode 100644 index 0000000000..2846513a0f --- /dev/null +++ b/pkg/store/cache_test.go @@ -0,0 +1,22 @@ +// Tests out the index cache implementation. +package store + +import ( + "testing" + + "github.com/improbable-eng/thanos/pkg/testutil" + "github.com/prometheus/client_golang/prometheus" +) + +// TestIndexCacheEdge tests the index cache edge cases. +func TestIndexCacheEdge(t *testing.T) { + metrics := prometheus.NewRegistry() + cache, err := newIndexCache(metrics, 1) + testutil.Ok(t, err) + + fits := cache.ensureFits([]byte{42, 24}) + testutil.Equals(t, fits, false) + + fits = cache.ensureFits([]byte{42}) + testutil.Equals(t, fits, true) +}