Skip to content

Commit

Permalink
Using pool from callee instead of inside helper function
Browse files Browse the repository at this point in the history
Signed-off-by: Pedro Tanaka <[email protected]>
  • Loading branch information
pedro-stanaka committed Sep 22, 2022
1 parent 2701f11 commit b82dbed
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 11 deletions.
20 changes: 10 additions & 10 deletions pkg/store/bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -885,12 +885,15 @@ func blockSeries(
}

func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Aggr, save func([]byte) ([]byte, error), calculateChecksum bool) error {
hasher := hashPool.Get().(hash.Hash64)
defer hashPool.Put(hasher)

if in.Encoding() == chunkenc.EncXOR {
b, err := save(in.Bytes())
if err != nil {
return err
}
out.Raw = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(b, calculateChecksum)}
out.Raw = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(hasher, b, calculateChecksum)}
return nil
}
if in.Encoding() != downsample.ChunkEncAggr {
Expand All @@ -910,7 +913,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag
if err != nil {
return err
}
out.Count = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(b, calculateChecksum)}
out.Count = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(hasher, b, calculateChecksum)}
case storepb.Aggr_SUM:
x, err := ac.Get(downsample.AggrSum)
if err != nil {
Expand All @@ -920,7 +923,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag
if err != nil {
return err
}
out.Sum = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(b, calculateChecksum)}
out.Sum = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(hasher, b, calculateChecksum)}
case storepb.Aggr_MIN:
x, err := ac.Get(downsample.AggrMin)
if err != nil {
Expand All @@ -930,7 +933,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag
if err != nil {
return err
}
out.Min = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(b, calculateChecksum)}
out.Min = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(hasher, b, calculateChecksum)}
case storepb.Aggr_MAX:
x, err := ac.Get(downsample.AggrMax)
if err != nil {
Expand All @@ -940,7 +943,7 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag
if err != nil {
return err
}
out.Max = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(b, calculateChecksum)}
out.Max = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(hasher, b, calculateChecksum)}
case storepb.Aggr_COUNTER:
x, err := ac.Get(downsample.AggrCounter)
if err != nil {
Expand All @@ -950,16 +953,13 @@ func populateChunk(out *storepb.AggrChunk, in chunkenc.Chunk, aggrs []storepb.Ag
if err != nil {
return err
}
out.Counter = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(b, calculateChecksum)}
out.Counter = &storepb.Chunk{Type: storepb.Chunk_XOR, Data: b, Hash: hashChunk(hasher, b, calculateChecksum)}
}
}
return nil
}

func hashChunk(b []byte, doHash bool) uint64 {
hasher := hashPool.Get().(hash.Hash64)
defer hashPool.Put(hasher)

func hashChunk(hasher hash.Hash64, b []byte, doHash bool) uint64 {
if !doHash {
return 0
}
Expand Down
5 changes: 4 additions & 1 deletion pkg/store/tsdb.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package store

import (
"context"
"hash"
"io"
"math"
"sort"
Expand Down Expand Up @@ -160,6 +161,8 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer

shardMatcher := r.ShardInfo.Matcher(&s.buffers)
defer shardMatcher.Close()
hasher := hashPool.Get().(hash.Hash64)
defer hashPool.Put(hasher)
// Stream at most one series per frame; series may be split over multiple frames according to maxBytesInFrame.
for set.Next() {
series := set.At()
Expand Down Expand Up @@ -197,7 +200,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer
Raw: &storepb.Chunk{
Type: storepb.Chunk_Encoding(chk.Chunk.Encoding() - 1), // Proto chunk encoding is one off to TSDB one.
Data: chk.Chunk.Bytes(),
Hash: hashChunk(chk.Chunk.Bytes(), r.CalculateChunkChecksums),
Hash: hashChunk(hasher, chk.Chunk.Bytes(), r.CalculateChunkChecksums),
},
}
frameBytesLeft -= c.Size()
Expand Down

0 comments on commit b82dbed

Please sign in to comment.