From fa4e1c9f7d5a18695ea46250becdd6baa0939598 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 6 Jun 2023 20:16:51 +0530 Subject: [PATCH 01/75] Update protos for streaming Signed-off-by: Ganesh Vernekar --- pkg/storegateway/storepb/custom.go | 17 + pkg/storegateway/storepb/rpc.pb.go | 430 +++++++++++++++++--- pkg/storegateway/storepb/rpc.proto | 9 + pkg/storegateway/storepb/types.pb.go | 584 +++++++++++++++++++++++++-- pkg/storegateway/storepb/types.proto | 9 + 5 files changed, 957 insertions(+), 92 deletions(-) diff --git a/pkg/storegateway/storepb/custom.go b/pkg/storegateway/storepb/custom.go index bf3eb29f2ab..90b82985e33 100644 --- a/pkg/storegateway/storepb/custom.go +++ b/pkg/storegateway/storepb/custom.go @@ -39,6 +39,23 @@ func NewStatsResponse(indexBytesFetched int) *SeriesResponse { } } +func NewStreamSeriesResponse(series *StreamSeriesBatch, endOfStream bool) *SeriesResponse { + return &SeriesResponse{ + Result: &SeriesResponse_StreamingSeries{ + StreamingSeries: series, + }, + IsEndOfSeriesStream: endOfStream, + } +} + +func NewStreamSeriesChunksResponse(series *StreamSeriesChunks) *SeriesResponse { + return &SeriesResponse{ + Result: &SeriesResponse_StreamingSeriesChunks{ + StreamingSeriesChunks: series, + }, + } +} + type emptySeriesSet struct{} func (emptySeriesSet) Next() bool { return false } diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index 2c6c900684a..4b5ee4eddae 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -40,6 +40,8 @@ type SeriesRequest struct { // The content of this field and whether it's supported depends on the // implementation of a specific store. Hints *types.Any `protobuf:"bytes,9,opt,name=hints,proto3" json:"hints,omitempty"` + // Why 100? TBD, it is just copied from ingesters and need to see if we still need 100 here. + StreamingChunksBatchSize uint64 `protobuf:"varint,100,opt,name=streaming_chunks_batch_size,json=streamingChunksBatchSize,proto3" json:"streaming_chunks_batch_size,omitempty"` } func (m *SeriesRequest) Reset() { *m = SeriesRequest{} } @@ -117,7 +119,10 @@ type SeriesResponse struct { // *SeriesResponse_Warning // *SeriesResponse_Hints // *SeriesResponse_Stats - Result isSeriesResponse_Result `protobuf_oneof:"result"` + // *SeriesResponse_StreamingSeries + // *SeriesResponse_StreamingSeriesChunks + Result isSeriesResponse_Result `protobuf_oneof:"result"` + IsEndOfSeriesStream bool `protobuf:"varint,7,opt,name=is_end_of_series_stream,json=isEndOfSeriesStream,proto3" json:"is_end_of_series_stream,omitempty"` } func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } @@ -171,11 +176,19 @@ type SeriesResponse_Hints struct { type SeriesResponse_Stats struct { Stats *Stats `protobuf:"bytes,4,opt,name=stats,proto3,oneof"` } +type SeriesResponse_StreamingSeries struct { + StreamingSeries *StreamSeriesBatch `protobuf:"bytes,5,opt,name=streaming_series,json=streamingSeries,proto3,oneof"` +} +type SeriesResponse_StreamingSeriesChunks struct { + StreamingSeriesChunks *StreamSeriesChunks `protobuf:"bytes,6,opt,name=streaming_series_chunks,json=streamingSeriesChunks,proto3,oneof"` +} -func (*SeriesResponse_Series) isSeriesResponse_Result() {} -func (*SeriesResponse_Warning) isSeriesResponse_Result() {} -func (*SeriesResponse_Hints) isSeriesResponse_Result() {} -func (*SeriesResponse_Stats) isSeriesResponse_Result() {} +func (*SeriesResponse_Series) isSeriesResponse_Result() {} +func (*SeriesResponse_Warning) isSeriesResponse_Result() {} +func (*SeriesResponse_Hints) isSeriesResponse_Result() {} +func (*SeriesResponse_Stats) isSeriesResponse_Result() {} +func (*SeriesResponse_StreamingSeries) isSeriesResponse_Result() {} +func (*SeriesResponse_StreamingSeriesChunks) isSeriesResponse_Result() {} func (m *SeriesResponse) GetResult() isSeriesResponse_Result { if m != nil { @@ -212,6 +225,20 @@ func (m *SeriesResponse) GetStats() *Stats { return nil } +func (m *SeriesResponse) GetStreamingSeries() *StreamSeriesBatch { + if x, ok := m.GetResult().(*SeriesResponse_StreamingSeries); ok { + return x.StreamingSeries + } + return nil +} + +func (m *SeriesResponse) GetStreamingSeriesChunks() *StreamSeriesChunks { + if x, ok := m.GetResult().(*SeriesResponse_StreamingSeriesChunks); ok { + return x.StreamingSeriesChunks + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*SeriesResponse) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -219,6 +246,8 @@ func (*SeriesResponse) XXX_OneofWrappers() []interface{} { (*SeriesResponse_Warning)(nil), (*SeriesResponse_Hints)(nil), (*SeriesResponse_Stats)(nil), + (*SeriesResponse_StreamingSeries)(nil), + (*SeriesResponse_StreamingSeriesChunks)(nil), } } @@ -402,51 +431,58 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 692 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0x31, 0x6f, 0xd3, 0x40, - 0x14, 0xc7, 0x7d, 0xf1, 0xd9, 0xb9, 0x5c, 0x9a, 0xca, 0xbd, 0x96, 0xca, 0x35, 0xd2, 0x35, 0xb2, - 0x84, 0x14, 0x21, 0x70, 0x51, 0x91, 0x40, 0x8c, 0x4d, 0x25, 0x54, 0x2c, 0x60, 0x70, 0x11, 0x03, - 0x4b, 0xe4, 0xa4, 0xd7, 0xc4, 0x6a, 0x62, 0x07, 0x9f, 0x0d, 0xcd, 0xc6, 0x47, 0xe0, 0x63, 0x20, - 0x31, 0xf3, 0x05, 0x98, 0xba, 0xd1, 0xb1, 0x13, 0x22, 0xee, 0xc2, 0xd8, 0x99, 0x09, 0xf9, 0xce, - 0x69, 0x6a, 0x14, 0x54, 0x2a, 0xb1, 0xe5, 0xfd, 0xff, 0x2f, 0x77, 0xef, 0xff, 0x7b, 0x67, 0x5c, - 0x8b, 0xc7, 0x3d, 0x67, 0x1c, 0x47, 0x49, 0x44, 0xf4, 0x64, 0xe0, 0x87, 0x11, 0xb7, 0xea, 0xc9, - 0x64, 0xcc, 0xb8, 0x14, 0xad, 0xfb, 0xfd, 0x20, 0x19, 0xa4, 0x5d, 0xa7, 0x17, 0x8d, 0xb6, 0xfa, - 0x51, 0x3f, 0xda, 0x12, 0x72, 0x37, 0x3d, 0x14, 0x95, 0x28, 0xc4, 0xaf, 0xa2, 0x7d, 0xa3, 0x1f, - 0x45, 0xfd, 0x21, 0x9b, 0x77, 0xf9, 0xe1, 0x44, 0x5a, 0xf6, 0x2f, 0x80, 0x1b, 0xfb, 0x2c, 0x0e, - 0x18, 0xf7, 0xd8, 0xdb, 0x94, 0xf1, 0x84, 0x6c, 0x60, 0x34, 0x0a, 0xc2, 0x4e, 0x12, 0x8c, 0x98, - 0x09, 0x9a, 0xa0, 0xa5, 0x7a, 0xd5, 0x51, 0x10, 0xbe, 0x0a, 0x46, 0x4c, 0x58, 0xfe, 0xb1, 0xb4, - 0x2a, 0x85, 0xe5, 0x1f, 0x0b, 0xeb, 0x51, 0x6e, 0x25, 0xbd, 0x01, 0x8b, 0xb9, 0xa9, 0x36, 0xd5, - 0x56, 0x7d, 0x7b, 0xcd, 0x91, 0x93, 0x3b, 0xcf, 0xfd, 0x2e, 0x1b, 0xbe, 0x90, 0x66, 0x1b, 0x9e, - 0x7c, 0xdf, 0x54, 0xbc, 0xcb, 0x5e, 0xb2, 0x89, 0xeb, 0xfc, 0x28, 0x18, 0x77, 0x7a, 0x83, 0x34, - 0x3c, 0xe2, 0x26, 0x6a, 0x82, 0x16, 0xf2, 0x70, 0x2e, 0xed, 0x0a, 0x85, 0xdc, 0xc5, 0xda, 0x20, - 0x08, 0x13, 0x6e, 0xd6, 0x9a, 0x40, 0x9c, 0x2a, 0xb3, 0x38, 0xb3, 0x2c, 0xce, 0x4e, 0x38, 0xf1, - 0x64, 0x8b, 0x0b, 0x11, 0x34, 0x34, 0x17, 0x22, 0xcd, 0xd0, 0x5d, 0x88, 0x74, 0xa3, 0xea, 0x42, - 0x54, 0x35, 0x90, 0x0b, 0x11, 0x36, 0xea, 0x2e, 0x44, 0x75, 0x63, 0xc9, 0x85, 0x68, 0xc9, 0x68, - 0xb8, 0x10, 0x35, 0x8c, 0x65, 0xfb, 0x31, 0xd6, 0xf6, 0x13, 0x3f, 0xe1, 0xc4, 0xc1, 0xab, 0x87, - 0x2c, 0x9f, 0xe8, 0xa0, 0x13, 0x84, 0x07, 0xec, 0xb8, 0xd3, 0x9d, 0x24, 0x8c, 0x8b, 0xf8, 0xd0, - 0x5b, 0x29, 0xac, 0x67, 0xb9, 0xd3, 0xce, 0x0d, 0xfb, 0x0b, 0xc0, 0xcb, 0x33, 0x6a, 0x7c, 0x1c, - 0x85, 0x9c, 0x91, 0x16, 0xd6, 0xb9, 0x50, 0xc4, 0xbf, 0xea, 0xdb, 0xcb, 0xb3, 0xf8, 0xb2, 0x6f, - 0x4f, 0xf1, 0x0a, 0x9f, 0x58, 0xb8, 0xfa, 0xde, 0x8f, 0xc3, 0x20, 0xec, 0x0b, 0x88, 0xb5, 0x3d, - 0xc5, 0x9b, 0x09, 0xe4, 0xde, 0x2c, 0xad, 0xfa, 0xf7, 0xb4, 0x7b, 0x4a, 0x91, 0x97, 0xdc, 0xc1, - 0x1a, 0xcf, 0xe7, 0x37, 0xa1, 0xe8, 0x6e, 0x5c, 0x5e, 0x99, 0x8b, 0x79, 0x9b, 0x70, 0xdb, 0x08, - 0xeb, 0x31, 0xe3, 0xe9, 0x30, 0xb1, 0x3f, 0x03, 0xbc, 0x22, 0xd6, 0xf1, 0xd2, 0x1f, 0xcd, 0x37, - 0xbe, 0x26, 0x8e, 0x89, 0x13, 0x71, 0xa9, 0xea, 0xc9, 0x82, 0x18, 0x58, 0x65, 0xe1, 0x81, 0x38, - 0x5a, 0xf5, 0xf2, 0x9f, 0xf3, 0x55, 0x68, 0xd7, 0xae, 0xa2, 0xf4, 0x1e, 0xf4, 0x7f, 0x7f, 0x0f, - 0x2e, 0x44, 0xc0, 0xa8, 0xb8, 0x10, 0x55, 0x0c, 0xd5, 0x8e, 0x31, 0xb9, 0x3a, 0x6c, 0x01, 0x7a, - 0x0d, 0x6b, 0x61, 0x2e, 0x98, 0xa0, 0xa9, 0xb6, 0x6a, 0x9e, 0x2c, 0x88, 0x85, 0x51, 0xc1, 0x90, - 0x9b, 0x15, 0x61, 0x5c, 0xd6, 0xf3, 0xb9, 0xd5, 0x6b, 0xe7, 0xb6, 0xbf, 0x82, 0xe2, 0xd2, 0xd7, - 0xfe, 0x30, 0x2d, 0x21, 0x1a, 0xe6, 0xaa, 0x58, 0x6e, 0xcd, 0x93, 0xc5, 0x1c, 0x1c, 0x5c, 0x00, - 0x4e, 0x5b, 0x00, 0x4e, 0xbf, 0x19, 0xb8, 0xea, 0x8d, 0xc0, 0x55, 0x0c, 0xd5, 0x85, 0x48, 0x35, - 0xa0, 0x9d, 0xe2, 0xd5, 0x52, 0x86, 0x82, 0xdc, 0x3a, 0xd6, 0xdf, 0x09, 0xa5, 0x40, 0x57, 0x54, - 0xff, 0x8b, 0xdd, 0xf6, 0x37, 0x90, 0x7f, 0x4f, 0x51, 0xcc, 0xc8, 0x13, 0xac, 0xcb, 0x67, 0x4f, - 0x6e, 0x95, 0x3f, 0x83, 0x82, 0xa7, 0xb5, 0xfe, 0xa7, 0x2c, 0x47, 0x7c, 0x00, 0xc8, 0x2e, 0xc6, - 0xf3, 0xa5, 0x93, 0x8d, 0x52, 0xf6, 0xab, 0xaf, 0xd6, 0xb2, 0x16, 0x59, 0x45, 0xd2, 0xa7, 0xb8, - 0x7e, 0x05, 0x00, 0x29, 0xb7, 0x96, 0x36, 0x6b, 0xdd, 0x5e, 0xe8, 0xc9, 0x73, 0xda, 0x3b, 0x27, - 0x53, 0xaa, 0x9c, 0x4e, 0xa9, 0x72, 0x36, 0xa5, 0xca, 0xc5, 0x94, 0x82, 0x0f, 0x19, 0x05, 0x9f, - 0x32, 0x0a, 0x4e, 0x32, 0x0a, 0x4e, 0x33, 0x0a, 0x7e, 0x64, 0x14, 0xfc, 0xcc, 0xa8, 0x72, 0x91, - 0x51, 0xf0, 0xf1, 0x9c, 0x2a, 0xa7, 0xe7, 0x54, 0x39, 0x3b, 0xa7, 0xca, 0x9b, 0x2a, 0xcf, 0x41, - 0x8c, 0xbb, 0x5d, 0x5d, 0x90, 0x7a, 0xf8, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x55, 0x20, 0x90, - 0xd3, 0x05, 0x00, 0x00, + // 814 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xc7, 0x3d, 0xf1, 0xd8, 0x99, 0x4c, 0x2e, 0x8b, 0x6f, 0x76, 0xef, 0xce, 0xf1, 0x49, 0xbe, + 0x28, 0x12, 0x52, 0x84, 0x20, 0x87, 0x16, 0x04, 0xa2, 0xa0, 0xb8, 0x9c, 0x38, 0x05, 0x8b, 0x1f, + 0x92, 0xf7, 0x44, 0x41, 0x63, 0x39, 0xc9, 0x24, 0xb1, 0x2e, 0x19, 0x07, 0x8f, 0x03, 0x9b, 0xab, + 0x68, 0xe9, 0xf8, 0x33, 0x90, 0xf8, 0x0b, 0x68, 0xa9, 0xb6, 0x63, 0xcb, 0xab, 0x10, 0xc9, 0x36, + 0x94, 0xfb, 0x27, 0xa0, 0xf9, 0xe1, 0x38, 0x59, 0x05, 0x2d, 0x2b, 0xd1, 0xf9, 0x7d, 0xbf, 0x6f, + 0xde, 0xbc, 0xf9, 0xcc, 0x1b, 0xe3, 0x5a, 0xb6, 0x18, 0x76, 0x17, 0x59, 0x9a, 0xa7, 0xc4, 0xce, + 0xa7, 0x31, 0x4b, 0xb9, 0x57, 0xcf, 0x57, 0x0b, 0xca, 0x95, 0xe8, 0xbd, 0x37, 0x49, 0xf2, 0xe9, + 0x72, 0xd0, 0x1d, 0xa6, 0xf3, 0xa7, 0x93, 0x74, 0x92, 0x3e, 0x95, 0xf2, 0x60, 0x39, 0x96, 0x91, + 0x0c, 0xe4, 0x97, 0x4e, 0x6f, 0x4e, 0xd2, 0x74, 0x32, 0xa3, 0x65, 0x56, 0xcc, 0x56, 0xca, 0x6a, + 0xff, 0x56, 0xc1, 0x8d, 0x33, 0x9a, 0x25, 0x94, 0x87, 0xf4, 0xbb, 0x25, 0xe5, 0x39, 0x69, 0x62, + 0x34, 0x4f, 0x58, 0x94, 0x27, 0x73, 0xea, 0x82, 0x16, 0xe8, 0x98, 0x61, 0x75, 0x9e, 0xb0, 0x97, + 0xc9, 0x9c, 0x4a, 0x2b, 0x3e, 0x57, 0x56, 0x45, 0x5b, 0xf1, 0xb9, 0xb4, 0x3e, 0x12, 0x56, 0x3e, + 0x9c, 0xd2, 0x8c, 0xbb, 0x66, 0xcb, 0xec, 0xd4, 0x4f, 0x4f, 0xba, 0xaa, 0xf3, 0xee, 0x17, 0xf1, + 0x80, 0xce, 0xbe, 0x54, 0x66, 0x0f, 0x5e, 0xfc, 0xf9, 0xc4, 0x08, 0xb7, 0xb9, 0xe4, 0x09, 0xae, + 0xf3, 0x57, 0xc9, 0x22, 0x1a, 0x4e, 0x97, 0xec, 0x15, 0x77, 0x51, 0x0b, 0x74, 0x50, 0x88, 0x85, + 0xf4, 0x5c, 0x2a, 0xe4, 0x1d, 0x6c, 0x4d, 0x13, 0x96, 0x73, 0xb7, 0xd6, 0x02, 0xb2, 0xaa, 0x3a, + 0x4b, 0xb7, 0x38, 0x4b, 0xf7, 0x19, 0x5b, 0x85, 0x2a, 0x85, 0x7c, 0x8a, 0x1f, 0xf3, 0x3c, 0xa3, + 0xf1, 0x3c, 0x61, 0x13, 0x5d, 0x31, 0x1a, 0x88, 0x9d, 0x22, 0x9e, 0xbc, 0xa6, 0xee, 0xa8, 0x05, + 0x3a, 0x30, 0x74, 0xb7, 0x29, 0x6a, 0x87, 0x9e, 0x48, 0x38, 0x4b, 0x5e, 0xd3, 0x00, 0x22, 0xe8, + 0x58, 0x01, 0x44, 0x96, 0x63, 0x07, 0x10, 0xd9, 0x4e, 0x35, 0x80, 0xa8, 0xea, 0xa0, 0x00, 0x22, + 0xec, 0xd4, 0x03, 0x88, 0xea, 0xce, 0xbd, 0x00, 0xa2, 0x7b, 0x4e, 0x23, 0x80, 0xa8, 0xe1, 0x1c, + 0xb5, 0x3f, 0xc6, 0xd6, 0x59, 0x1e, 0xe7, 0x9c, 0x74, 0xf1, 0xf1, 0x98, 0x8a, 0x03, 0x8d, 0xa2, + 0x84, 0x8d, 0xe8, 0x79, 0x34, 0x58, 0xe5, 0x94, 0x4b, 0x7a, 0x30, 0xbc, 0xaf, 0xad, 0xcf, 0x85, + 0xd3, 0x13, 0x46, 0xfb, 0x27, 0x13, 0x1f, 0x15, 0xd0, 0xf9, 0x22, 0x65, 0x9c, 0x92, 0x0e, 0xb6, + 0xb9, 0x54, 0xe4, 0xaa, 0xfa, 0xe9, 0x51, 0x41, 0x4f, 0xe5, 0xf5, 0x8d, 0x50, 0xfb, 0xc4, 0xc3, + 0xd5, 0x1f, 0xe2, 0x8c, 0x25, 0x6c, 0x22, 0xef, 0xa0, 0xd6, 0x37, 0xc2, 0x42, 0x20, 0xef, 0x16, + 0xb0, 0xcc, 0x7f, 0x87, 0xd5, 0x37, 0x0a, 0x5c, 0x6f, 0x63, 0x8b, 0x8b, 0xfe, 0x5d, 0x28, 0xb3, + 0x1b, 0xdb, 0x2d, 0x85, 0x28, 0xd2, 0xa4, 0x4b, 0x5e, 0x60, 0xa7, 0xa4, 0xaa, 0x9b, 0xb4, 0xe4, + 0x8a, 0x66, 0xb9, 0x42, 0xf8, 0xaa, 0x55, 0xc9, 0xb3, 0x6f, 0x84, 0x6f, 0x6d, 0x17, 0x29, 0x9d, + 0xbc, 0xc4, 0x8f, 0x6e, 0xd6, 0x29, 0xae, 0xdd, 0x96, 0xe5, 0xbc, 0x43, 0xe5, 0xd4, 0x25, 0xf5, + 0x8d, 0xf0, 0xc1, 0x8d, 0x7a, 0x7a, 0x3e, 0x3e, 0xc4, 0x8f, 0x12, 0x1e, 0x51, 0x36, 0x8a, 0xd2, + 0x71, 0x51, 0x55, 0x65, 0xba, 0x55, 0x39, 0x4c, 0xc7, 0x09, 0xff, 0x8c, 0x8d, 0xbe, 0x1e, 0xab, + 0x55, 0xaa, 0x74, 0x0f, 0x61, 0x3b, 0xa3, 0x7c, 0x39, 0xcb, 0xdb, 0xbf, 0x02, 0x7c, 0x5f, 0x4e, + 0xe8, 0x57, 0xf1, 0xbc, 0x7c, 0x04, 0x27, 0x12, 0x4d, 0x96, 0x4b, 0x90, 0x66, 0xa8, 0x02, 0xe2, + 0x60, 0x93, 0xb2, 0x91, 0xc4, 0x65, 0x86, 0xe2, 0xb3, 0x9c, 0x4e, 0xeb, 0xf6, 0xe9, 0xdc, 0x7d, + 0x22, 0xf6, 0x7f, 0x7f, 0x22, 0x01, 0x44, 0xc0, 0xa9, 0x04, 0x10, 0x55, 0x1c, 0xb3, 0x9d, 0x61, + 0xb2, 0xdb, 0xac, 0x1e, 0x9e, 0x13, 0x6c, 0x31, 0x21, 0xb8, 0xa0, 0x65, 0x76, 0x6a, 0xa1, 0x0a, + 0x88, 0x87, 0x91, 0x9e, 0x0b, 0xee, 0x56, 0xa4, 0xb1, 0x8d, 0xcb, 0xbe, 0xcd, 0x5b, 0xfb, 0x6e, + 0xff, 0x0e, 0xf4, 0xa6, 0xdf, 0xc4, 0xb3, 0xe5, 0x1e, 0xa2, 0x99, 0x50, 0xe5, 0xc0, 0xd6, 0x42, + 0x15, 0x94, 0xe0, 0xe0, 0x01, 0x70, 0xd6, 0x01, 0x70, 0xf6, 0xdd, 0xc0, 0x55, 0xef, 0x04, 0xae, + 0xe2, 0x98, 0x01, 0x44, 0xa6, 0x03, 0xdb, 0x4b, 0x7c, 0xbc, 0x77, 0x06, 0x4d, 0xee, 0x21, 0xb6, + 0xbf, 0x97, 0x8a, 0x46, 0xa7, 0xa3, 0xff, 0x8b, 0xdd, 0xe9, 0x1f, 0x40, 0xfc, 0x23, 0xd2, 0x8c, + 0x92, 0x4f, 0xb0, 0xad, 0xdf, 0xc1, 0x83, 0xfd, 0xa7, 0xad, 0x79, 0x7a, 0x0f, 0x6f, 0xca, 0xaa, + 0xc5, 0xf7, 0x01, 0x79, 0x8e, 0x71, 0x79, 0xe9, 0xa4, 0xb9, 0x77, 0xf6, 0xdd, 0xa9, 0xf5, 0xbc, + 0x43, 0x96, 0x3e, 0xe9, 0x0b, 0x5c, 0xdf, 0x01, 0x40, 0xf6, 0x53, 0xf7, 0x6e, 0xd6, 0x7b, 0x7c, + 0xd0, 0x53, 0x75, 0x7a, 0xcf, 0x2e, 0xd6, 0xbe, 0x71, 0xb9, 0xf6, 0x8d, 0x37, 0x6b, 0xdf, 0xb8, + 0x5e, 0xfb, 0xe0, 0xc7, 0x8d, 0x0f, 0x7e, 0xd9, 0xf8, 0xe0, 0x62, 0xe3, 0x83, 0xcb, 0x8d, 0x0f, + 0xfe, 0xda, 0xf8, 0xe0, 0xef, 0x8d, 0x6f, 0x5c, 0x6f, 0x7c, 0xf0, 0xf3, 0x95, 0x6f, 0x5c, 0x5e, + 0xf9, 0xc6, 0x9b, 0x2b, 0xdf, 0xf8, 0xb6, 0xca, 0x05, 0x88, 0xc5, 0x60, 0x60, 0x4b, 0x52, 0x1f, + 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xa4, 0x20, 0xe0, 0xe6, 0x06, 0x00, 0x00, } func (this *SeriesRequest) Equal(that interface{}) bool { @@ -488,6 +524,9 @@ func (this *SeriesRequest) Equal(that interface{}) bool { if !this.Hints.Equal(that1.Hints) { return false } + if this.StreamingChunksBatchSize != that1.StreamingChunksBatchSize { + return false + } return true } func (this *Stats) Equal(that interface{}) bool { @@ -542,6 +581,9 @@ func (this *SeriesResponse) Equal(that interface{}) bool { } else if !this.Result.Equal(that1.Result) { return false } + if this.IsEndOfSeriesStream != that1.IsEndOfSeriesStream { + return false + } return true } func (this *SeriesResponse_Series) Equal(that interface{}) bool { @@ -640,6 +682,54 @@ func (this *SeriesResponse_Stats) Equal(that interface{}) bool { } return true } +func (this *SeriesResponse_StreamingSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_StreamingSeries) + if !ok { + that2, ok := that.(SeriesResponse_StreamingSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamingSeries.Equal(that1.StreamingSeries) { + return false + } + return true +} +func (this *SeriesResponse_StreamingSeriesChunks) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SeriesResponse_StreamingSeriesChunks) + if !ok { + that2, ok := that.(SeriesResponse_StreamingSeriesChunks) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StreamingSeriesChunks.Equal(that1.StreamingSeriesChunks) { + return false + } + return true +} func (this *LabelNamesRequest) Equal(that interface{}) bool { if that == nil { return this == nil @@ -803,7 +893,7 @@ func (this *SeriesRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&storepb.SeriesRequest{") s = append(s, "MinTime: "+fmt.Sprintf("%#v", this.MinTime)+",\n") s = append(s, "MaxTime: "+fmt.Sprintf("%#v", this.MaxTime)+",\n") @@ -818,6 +908,7 @@ func (this *SeriesRequest) GoString() string { if this.Hints != nil { s = append(s, "Hints: "+fmt.Sprintf("%#v", this.Hints)+",\n") } + s = append(s, "StreamingChunksBatchSize: "+fmt.Sprintf("%#v", this.StreamingChunksBatchSize)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -835,11 +926,12 @@ func (this *SeriesResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 11) s = append(s, "&storepb.SeriesResponse{") if this.Result != nil { s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") } + s = append(s, "IsEndOfSeriesStream: "+fmt.Sprintf("%#v", this.IsEndOfSeriesStream)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -875,6 +967,22 @@ func (this *SeriesResponse_Stats) GoString() string { `Stats:` + fmt.Sprintf("%#v", this.Stats) + `}`}, ", ") return s } +func (this *SeriesResponse_StreamingSeries) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&storepb.SeriesResponse_StreamingSeries{` + + `StreamingSeries:` + fmt.Sprintf("%#v", this.StreamingSeries) + `}`}, ", ") + return s +} +func (this *SeriesResponse_StreamingSeriesChunks) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&storepb.SeriesResponse_StreamingSeriesChunks{` + + `StreamingSeriesChunks:` + fmt.Sprintf("%#v", this.StreamingSeriesChunks) + `}`}, ", ") + return s +} func (this *LabelNamesRequest) GoString() string { if this == nil { return "nil" @@ -1177,6 +1285,13 @@ func (m *SeriesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StreamingChunksBatchSize != 0 { + i = encodeVarintRpc(dAtA, i, uint64(m.StreamingChunksBatchSize)) + i-- + dAtA[i] = 0x6 + i-- + dAtA[i] = 0xa0 + } if m.Hints != nil { { size, err := m.Hints.MarshalToSizedBuffer(dAtA[:i]) @@ -1274,6 +1389,16 @@ func (m *SeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.IsEndOfSeriesStream { + i-- + if m.IsEndOfSeriesStream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } if m.Result != nil { { size := m.Result.Size() @@ -1359,6 +1484,46 @@ func (m *SeriesResponse_Stats) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *SeriesResponse_StreamingSeries) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *SeriesResponse_StreamingSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingSeries != nil { + { + size, err := m.StreamingSeries.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + return len(dAtA) - i, nil +} +func (m *SeriesResponse_StreamingSeriesChunks) MarshalTo(dAtA []byte) (int, error) { + return m.MarshalToSizedBuffer(dAtA[:m.Size()]) +} + +func (m *SeriesResponse_StreamingSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.StreamingSeriesChunks != nil { + { + size, err := m.StreamingSeriesChunks.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintRpc(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} func (m *LabelNamesRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1626,6 +1791,9 @@ func (m *SeriesRequest) Size() (n int) { l = m.Hints.Size() n += 1 + l + sovRpc(uint64(l)) } + if m.StreamingChunksBatchSize != 0 { + n += 2 + sovRpc(uint64(m.StreamingChunksBatchSize)) + } return n } @@ -1650,6 +1818,9 @@ func (m *SeriesResponse) Size() (n int) { if m.Result != nil { n += m.Result.Size() } + if m.IsEndOfSeriesStream { + n += 2 + } return n } @@ -1699,6 +1870,30 @@ func (m *SeriesResponse_Stats) Size() (n int) { } return n } +func (m *SeriesResponse_StreamingSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingSeries != nil { + l = m.StreamingSeries.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} +func (m *SeriesResponse_StreamingSeriesChunks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.StreamingSeriesChunks != nil { + l = m.StreamingSeriesChunks.Size() + n += 1 + l + sovRpc(uint64(l)) + } + return n +} func (m *LabelNamesRequest) Size() (n int) { if m == nil { return 0 @@ -1824,6 +2019,7 @@ func (this *SeriesRequest) String() string { `Matchers:` + repeatedStringForMatchers + `,`, `SkipChunks:` + fmt.Sprintf("%v", this.SkipChunks) + `,`, `Hints:` + strings.Replace(fmt.Sprintf("%v", this.Hints), "Any", "types.Any", 1) + `,`, + `StreamingChunksBatchSize:` + fmt.Sprintf("%v", this.StreamingChunksBatchSize) + `,`, `}`, }, "") return s @@ -1844,6 +2040,7 @@ func (this *SeriesResponse) String() string { } s := strings.Join([]string{`&SeriesResponse{`, `Result:` + fmt.Sprintf("%v", this.Result) + `,`, + `IsEndOfSeriesStream:` + fmt.Sprintf("%v", this.IsEndOfSeriesStream) + `,`, `}`, }, "") return s @@ -1888,6 +2085,26 @@ func (this *SeriesResponse_Stats) String() string { }, "") return s } +func (this *SeriesResponse_StreamingSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeriesResponse_StreamingSeries{`, + `StreamingSeries:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeries), "StreamSeriesBatch", "StreamSeriesBatch", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SeriesResponse_StreamingSeriesChunks) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SeriesResponse_StreamingSeriesChunks{`, + `StreamingSeriesChunks:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeriesChunks), "StreamSeriesChunks", "StreamSeriesChunks", 1) + `,`, + `}`, + }, "") + return s +} func (this *LabelNamesRequest) String() string { if this == nil { return "nil" @@ -2114,6 +2331,25 @@ func (m *SeriesRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 100: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunksBatchSize", wireType) + } + m.StreamingChunksBatchSize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.StreamingChunksBatchSize |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) @@ -2376,6 +2612,96 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { } m.Result = &SeriesResponse_Stats{v} iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeries", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamSeriesBatch{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &SeriesResponse_StreamingSeries{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeriesChunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthRpc + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthRpc + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &StreamSeriesChunks{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Result = &SeriesResponse_StreamingSeriesChunks{v} + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsEndOfSeriesStream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRpc + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsEndOfSeriesStream = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index 915753cacde..4a28a836fbf 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -79,6 +79,9 @@ message SeriesRequest { // Thanos shard_info. reserved 13; + + // Why 100? TBD, it is just copied from ingesters and need to see if we still need 100 here. + uint64 streaming_chunks_batch_size = 100; } message Stats { @@ -105,7 +108,13 @@ message SeriesResponse { /// stats is a object containing stats for a series response from the store-gateways so that we can collect stats /// related to the processing the series response on store-gateways did available to the querier and query-frontends. Stats stats = 4; + + StreamSeriesBatch streaming_series = 5; + + StreamSeriesChunks streaming_series_chunks = 6; } + + bool is_end_of_series_stream = 7; } message LabelNamesRequest { diff --git a/pkg/storegateway/storepb/types.pb.go b/pkg/storegateway/storepb/types.pb.go index 4a53330df89..c4169a385b1 100644 --- a/pkg/storegateway/storepb/types.pb.go +++ b/pkg/storegateway/storepb/types.pb.go @@ -76,7 +76,7 @@ var LabelMatcher_Type_value = map[string]int32{ } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3, 0} + return fileDescriptor_d938547f84707355, []int{5, 0} } type Chunk struct { @@ -153,6 +153,79 @@ func (m *Series) XXX_DiscardUnknown() { var xxx_messageInfo_Series proto.InternalMessageInfo +type StreamSeriesBatch struct { + Series []*Series `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` +} + +func (m *StreamSeriesBatch) Reset() { *m = StreamSeriesBatch{} } +func (*StreamSeriesBatch) ProtoMessage() {} +func (*StreamSeriesBatch) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} +func (m *StreamSeriesBatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamSeriesBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamSeriesBatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamSeriesBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamSeriesBatch.Merge(m, src) +} +func (m *StreamSeriesBatch) XXX_Size() int { + return m.Size() +} +func (m *StreamSeriesBatch) XXX_DiscardUnknown() { + xxx_messageInfo_StreamSeriesBatch.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamSeriesBatch proto.InternalMessageInfo + +type StreamSeriesChunks struct { + SeriesIndex uint64 `protobuf:"varint,1,opt,name=series_index,json=seriesIndex,proto3" json:"series_index,omitempty"` + Chunks []AggrChunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` +} + +func (m *StreamSeriesChunks) Reset() { *m = StreamSeriesChunks{} } +func (*StreamSeriesChunks) ProtoMessage() {} +func (*StreamSeriesChunks) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{3} +} +func (m *StreamSeriesChunks) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamSeriesChunks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamSeriesChunks.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamSeriesChunks) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamSeriesChunks.Merge(m, src) +} +func (m *StreamSeriesChunks) XXX_Size() int { + return m.Size() +} +func (m *StreamSeriesChunks) XXX_DiscardUnknown() { + xxx_messageInfo_StreamSeriesChunks.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamSeriesChunks proto.InternalMessageInfo + type AggrChunk struct { MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` @@ -162,7 +235,7 @@ type AggrChunk struct { func (m *AggrChunk) Reset() { *m = AggrChunk{} } func (*AggrChunk) ProtoMessage() {} func (*AggrChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{2} + return fileDescriptor_d938547f84707355, []int{4} } func (m *AggrChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -201,7 +274,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{5} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -235,6 +308,8 @@ func init() { proto.RegisterEnum("thanos.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterType((*Chunk)(nil), "thanos.Chunk") proto.RegisterType((*Series)(nil), "thanos.Series") + proto.RegisterType((*StreamSeriesBatch)(nil), "thanos.StreamSeriesBatch") + proto.RegisterType((*StreamSeriesChunks)(nil), "thanos.StreamSeriesChunks") proto.RegisterType((*AggrChunk)(nil), "thanos.AggrChunk") proto.RegisterType((*LabelMatcher)(nil), "thanos.LabelMatcher") } @@ -242,43 +317,46 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 567 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xf6, 0x26, 0x8e, 0xe3, 0x6c, 0x5b, 0x58, 0xb6, 0x15, 0x72, 0x7b, 0xd8, 0x46, 0x3e, 0x45, - 0x48, 0x75, 0xa0, 0x70, 0x41, 0xe2, 0xd2, 0xa0, 0xa0, 0x2a, 0xe2, 0xa7, 0x75, 0x8b, 0x84, 0x10, - 0x52, 0xb5, 0x76, 0x37, 0xce, 0xaa, 0xf1, 0x8f, 0xd6, 0x1b, 0x68, 0x6f, 0x7d, 0x04, 0x5e, 0x81, - 0x1b, 0x2f, 0x82, 0x94, 0x1b, 0x39, 0x56, 0x1c, 0x2a, 0xe2, 0x5c, 0x38, 0xf6, 0x11, 0x90, 0xd7, - 0x0e, 0x24, 0xea, 0xa5, 0x27, 0xcf, 0xcc, 0xf7, 0xcd, 0x7c, 0x9f, 0x67, 0x6c, 0xb8, 0x22, 0x2f, - 0x12, 0x96, 0x3a, 0x89, 0x88, 0x65, 0x8c, 0x0d, 0x39, 0xa0, 0x51, 0x9c, 0x6e, 0xed, 0x04, 0x5c, - 0x0e, 0x46, 0x9e, 0xe3, 0xc7, 0x61, 0x3b, 0x88, 0x83, 0xb8, 0xad, 0x60, 0x6f, 0xd4, 0x57, 0x99, - 0x4a, 0x54, 0x54, 0xb4, 0x6d, 0x3d, 0x5e, 0xa4, 0x0b, 0xda, 0xa7, 0x11, 0x6d, 0x87, 0x3c, 0xe4, - 0xa2, 0x9d, 0x9c, 0x05, 0x45, 0x94, 0x78, 0xc5, 0xb3, 0xe8, 0xb0, 0x7f, 0x02, 0x58, 0x7b, 0x39, - 0x18, 0x45, 0x67, 0xf8, 0x11, 0xd4, 0x73, 0x07, 0x16, 0x68, 0x82, 0xd6, 0xbd, 0xdd, 0x87, 0x4e, - 0xe1, 0xc0, 0x51, 0xa0, 0xd3, 0x8d, 0xfc, 0xf8, 0x94, 0x47, 0x81, 0xab, 0x38, 0xf8, 0x00, 0xea, - 0xa7, 0x54, 0x52, 0xab, 0xd2, 0x04, 0xad, 0xd5, 0xce, 0x8b, 0xf1, 0xf5, 0xb6, 0xf6, 0xeb, 0x7a, - 0xfb, 0xd9, 0x5d, 0xd4, 0x9d, 0xf7, 0x51, 0x4a, 0xfb, 0xac, 0x73, 0x21, 0xd9, 0xd1, 0x90, 0xfb, - 0xcc, 0x55, 0x93, 0xec, 0x7d, 0x68, 0xce, 0x35, 0xf0, 0x1a, 0x6c, 0x28, 0xd5, 0x93, 0x0f, 0xef, - 0x5c, 0xa4, 0xe1, 0x75, 0x78, 0xbf, 0x48, 0xf7, 0x79, 0x2a, 0xe3, 0x40, 0xd0, 0x10, 0x01, 0x6c, - 0xc1, 0x8d, 0xa2, 0xf8, 0x6a, 0x18, 0x53, 0xf9, 0x1f, 0xa9, 0xd8, 0xdf, 0x00, 0x34, 0x8e, 0x98, - 0xe0, 0x2c, 0xc5, 0x7d, 0x68, 0x0c, 0xa9, 0xc7, 0x86, 0xa9, 0x05, 0x9a, 0xd5, 0xd6, 0xca, 0xee, - 0xba, 0xe3, 0xc7, 0x42, 0xb2, 0xf3, 0xc4, 0x73, 0x5e, 0xe7, 0xf5, 0x03, 0xca, 0x45, 0xe7, 0x79, - 0xe9, 0xfe, 0xc9, 0x9d, 0xdc, 0xab, 0xbe, 0xbd, 0x53, 0x9a, 0x48, 0x26, 0xdc, 0x72, 0x3a, 0x6e, - 0x43, 0xc3, 0xcf, 0xcd, 0xa4, 0x56, 0x45, 0xe9, 0x3c, 0x98, 0x2f, 0x6f, 0x2f, 0x08, 0x84, 0xb2, - 0xd9, 0xd1, 0x73, 0x15, 0xb7, 0xa4, 0xd9, 0x97, 0x00, 0x36, 0xfe, 0x61, 0x78, 0x13, 0x9a, 0x21, - 0x8f, 0x4e, 0x24, 0x0f, 0x8b, 0xed, 0x57, 0xdd, 0x7a, 0xc8, 0xa3, 0x63, 0x1e, 0x32, 0x05, 0xd1, - 0xf3, 0x02, 0xaa, 0x94, 0x10, 0x3d, 0x57, 0xd0, 0x36, 0xac, 0x0a, 0xfa, 0xc5, 0xaa, 0x36, 0x41, - 0x6b, 0x65, 0x77, 0x6d, 0xe9, 0x5c, 0x6e, 0x8e, 0xf4, 0x74, 0x53, 0x47, 0xb5, 0x9e, 0x6e, 0xd6, - 0x90, 0xd1, 0xd3, 0x4d, 0x03, 0xd5, 0x7b, 0xba, 0x59, 0x47, 0x66, 0x4f, 0x37, 0x4d, 0xd4, 0xb0, - 0x7f, 0x00, 0xb8, 0xaa, 0x5e, 0xe6, 0x0d, 0x95, 0xfe, 0x80, 0x09, 0xbc, 0xb3, 0x74, 0xff, 0xcd, - 0xf9, 0xc0, 0x45, 0x8e, 0x73, 0x7c, 0x91, 0xb0, 0xf2, 0x13, 0xc0, 0x50, 0x8f, 0x68, 0xe9, 0xaa, - 0xe1, 0xaa, 0x18, 0x6f, 0xc0, 0xda, 0x67, 0x3a, 0x1c, 0x31, 0x65, 0xaa, 0xe1, 0x16, 0x89, 0xfd, - 0x09, 0xea, 0x79, 0x5f, 0x7e, 0xc7, 0xc5, 0x61, 0x27, 0xdd, 0x43, 0xa4, 0xe1, 0x0d, 0x88, 0x96, - 0x8a, 0x6f, 0xbb, 0x87, 0x08, 0xdc, 0xa2, 0xba, 0x5d, 0x54, 0xb9, 0x4d, 0x75, 0xbb, 0xa8, 0xda, - 0xd9, 0x1b, 0x4f, 0x89, 0x36, 0x99, 0x12, 0xed, 0x6a, 0x4a, 0xb4, 0x9b, 0x29, 0x01, 0x97, 0x19, - 0x01, 0xdf, 0x33, 0x02, 0xc6, 0x19, 0x01, 0x93, 0x8c, 0x80, 0xdf, 0x19, 0x01, 0x7f, 0x32, 0xa2, - 0xdd, 0x64, 0x04, 0x7c, 0x9d, 0x11, 0x6d, 0x32, 0x23, 0xda, 0xd5, 0x8c, 0x68, 0x1f, 0xeb, 0xa9, - 0x8c, 0x05, 0x4b, 0x3c, 0xcf, 0x50, 0xbf, 0xc2, 0xd3, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x08, - 0x77, 0xfb, 0x5f, 0x82, 0x03, 0x00, 0x00, + // 624 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0xf5, 0x24, 0x8e, 0xe3, 0x4c, 0xda, 0x7e, 0xee, 0xb4, 0xfa, 0x94, 0x76, 0x31, 0x0d, 0x5e, + 0xa0, 0x08, 0xa9, 0x0e, 0x14, 0x36, 0x08, 0x36, 0x0d, 0x0a, 0x2a, 0x11, 0x3f, 0xad, 0x5b, 0x24, + 0x84, 0x90, 0xa2, 0x71, 0x32, 0x71, 0x46, 0x8d, 0x7f, 0x34, 0x9e, 0x40, 0xba, 0xeb, 0x23, 0xf0, + 0x0a, 0xec, 0x78, 0x11, 0xa4, 0xee, 0xe8, 0xb2, 0x62, 0x51, 0x11, 0x77, 0xc3, 0xb2, 0x8f, 0x80, + 0x3c, 0xe3, 0x40, 0xa2, 0x6e, 0xca, 0xca, 0xf7, 0xde, 0x73, 0xee, 0xb9, 0x47, 0xd7, 0x77, 0x60, + 0x55, 0x9c, 0xc4, 0x34, 0x71, 0x62, 0x1e, 0x89, 0x08, 0x19, 0x62, 0x48, 0xc2, 0x28, 0xd9, 0xdc, + 0xf6, 0x99, 0x18, 0x8e, 0x3d, 0xa7, 0x17, 0x05, 0x4d, 0x3f, 0xf2, 0xa3, 0xa6, 0x84, 0xbd, 0xf1, + 0x40, 0x66, 0x32, 0x91, 0x91, 0x6a, 0xdb, 0xbc, 0x3f, 0x4f, 0xe7, 0x64, 0x40, 0x42, 0xd2, 0x0c, + 0x58, 0xc0, 0x78, 0x33, 0x3e, 0xf6, 0x55, 0x14, 0x7b, 0xea, 0xab, 0x3a, 0xec, 0xef, 0x00, 0x96, + 0x9e, 0x0d, 0xc7, 0xe1, 0x31, 0xba, 0x07, 0xf5, 0xcc, 0x41, 0x0d, 0xd4, 0x41, 0x63, 0x65, 0xe7, + 0x7f, 0x47, 0x39, 0x70, 0x24, 0xe8, 0xb4, 0xc3, 0x5e, 0xd4, 0x67, 0xa1, 0xef, 0x4a, 0x0e, 0xda, + 0x87, 0x7a, 0x9f, 0x08, 0x52, 0x2b, 0xd4, 0x41, 0x63, 0xa9, 0xf5, 0xf4, 0xec, 0x72, 0x4b, 0xfb, + 0x71, 0xb9, 0xf5, 0xe8, 0x36, 0xd3, 0x9d, 0xb7, 0x61, 0x42, 0x06, 0xb4, 0x75, 0x22, 0xe8, 0xe1, + 0x88, 0xf5, 0xa8, 0x2b, 0x95, 0xec, 0x3d, 0x68, 0xce, 0x66, 0xa0, 0x65, 0x58, 0x91, 0x53, 0xbb, + 0xef, 0xde, 0xb8, 0x96, 0x86, 0xd6, 0xe0, 0x7f, 0x2a, 0xdd, 0x63, 0x89, 0x88, 0x7c, 0x4e, 0x02, + 0x0b, 0xa0, 0x1a, 0x5c, 0x57, 0xc5, 0xe7, 0xa3, 0x88, 0x88, 0xbf, 0x48, 0xc1, 0xfe, 0x02, 0xa0, + 0x71, 0x48, 0x39, 0xa3, 0x09, 0x1a, 0x40, 0x63, 0x44, 0x3c, 0x3a, 0x4a, 0x6a, 0xa0, 0x5e, 0x6c, + 0x54, 0x77, 0xd6, 0x9c, 0x5e, 0xc4, 0x05, 0x9d, 0xc4, 0x9e, 0xf3, 0x32, 0xab, 0xef, 0x13, 0xc6, + 0x5b, 0x8f, 0x73, 0xf7, 0x0f, 0x6e, 0xe5, 0x5e, 0xf6, 0xed, 0xf6, 0x49, 0x2c, 0x28, 0x77, 0x73, + 0x75, 0xd4, 0x84, 0x46, 0x2f, 0x33, 0x93, 0xd4, 0x0a, 0x72, 0xce, 0xea, 0x6c, 0x79, 0xbb, 0xbe, + 0xcf, 0xa5, 0xcd, 0x96, 0x9e, 0x4d, 0x71, 0x73, 0x9a, 0xfd, 0x04, 0xae, 0x1e, 0x0a, 0x4e, 0x49, + 0xa0, 0x8c, 0xb6, 0x88, 0xe8, 0x0d, 0xd1, 0x5d, 0x68, 0x24, 0x32, 0xcd, 0xdd, 0xae, 0xcc, 0x54, + 0x14, 0xc9, 0xcd, 0x51, 0x7b, 0x08, 0xd1, 0x7c, 0xb3, 0xd4, 0x4f, 0xd0, 0x1d, 0xb8, 0xa4, 0xf0, + 0x2e, 0x0b, 0xfb, 0x74, 0x22, 0x7f, 0xa3, 0xee, 0x56, 0x55, 0xed, 0x45, 0x56, 0xfa, 0x77, 0x9b, + 0xa7, 0x00, 0x56, 0xfe, 0x60, 0x68, 0x03, 0x9a, 0x01, 0x0b, 0xbb, 0x82, 0x05, 0xea, 0x48, 0x8a, + 0x6e, 0x39, 0x60, 0xe1, 0x11, 0x0b, 0xa8, 0x84, 0xc8, 0x44, 0x41, 0x85, 0x1c, 0x22, 0x13, 0x09, + 0x6d, 0xc1, 0x22, 0x27, 0x9f, 0x6a, 0xc5, 0x3a, 0x68, 0x54, 0x77, 0x96, 0x17, 0xae, 0xca, 0xcd, + 0x90, 0x8e, 0x6e, 0xea, 0x56, 0xa9, 0xa3, 0x9b, 0x25, 0xcb, 0xe8, 0xe8, 0xa6, 0x61, 0x95, 0x3b, + 0xba, 0x59, 0xb6, 0xcc, 0x8e, 0x6e, 0x9a, 0x56, 0xc5, 0xfe, 0x06, 0xe0, 0x92, 0xdc, 0xf9, 0xab, + 0x6c, 0x47, 0x94, 0xa3, 0xed, 0x85, 0x33, 0xdd, 0x98, 0x09, 0xce, 0x73, 0x9c, 0xa3, 0x93, 0x98, + 0xe6, 0x97, 0x8a, 0xa0, 0x1e, 0x92, 0xdc, 0x55, 0xc5, 0x95, 0x31, 0x5a, 0x87, 0xa5, 0x8f, 0x64, + 0x34, 0xa6, 0xd2, 0x54, 0xc5, 0x55, 0x89, 0xfd, 0x01, 0xea, 0x59, 0x5f, 0x76, 0x6e, 0xf3, 0x62, + 0xdd, 0xf6, 0x81, 0xa5, 0xa1, 0x75, 0x68, 0x2d, 0x14, 0x5f, 0xb7, 0x0f, 0x2c, 0x70, 0x83, 0xea, + 0xb6, 0xad, 0xc2, 0x4d, 0xaa, 0xdb, 0xb6, 0x8a, 0xad, 0xdd, 0xb3, 0x29, 0xd6, 0xce, 0xa7, 0x58, + 0xbb, 0x98, 0x62, 0xed, 0x7a, 0x8a, 0xc1, 0x69, 0x8a, 0xc1, 0xd7, 0x14, 0x83, 0xb3, 0x14, 0x83, + 0xf3, 0x14, 0x83, 0x9f, 0x29, 0x06, 0xbf, 0x52, 0xac, 0x5d, 0xa7, 0x18, 0x7c, 0xbe, 0xc2, 0xda, + 0xf9, 0x15, 0xd6, 0x2e, 0xae, 0xb0, 0xf6, 0xbe, 0x9c, 0x88, 0x88, 0xd3, 0xd8, 0xf3, 0x0c, 0xf9, + 0x62, 0x1f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xa5, 0xbc, 0xe5, 0x17, 0x29, 0x04, 0x00, 0x00, } func (x Chunk_Encoding) String() string { @@ -359,6 +437,67 @@ func (this *Series) Equal(that interface{}) bool { } return true } +func (this *StreamSeriesBatch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamSeriesBatch) + if !ok { + that2, ok := that.(StreamSeriesBatch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Series) != len(that1.Series) { + return false + } + for i := range this.Series { + if !this.Series[i].Equal(that1.Series[i]) { + return false + } + } + return true +} +func (this *StreamSeriesChunks) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamSeriesChunks) + if !ok { + that2, ok := that.(StreamSeriesChunks) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.SeriesIndex != that1.SeriesIndex { + return false + } + if len(this.Chunks) != len(that1.Chunks) { + return false + } + for i := range this.Chunks { + if !this.Chunks[i].Equal(&that1.Chunks[i]) { + return false + } + } + return true +} func (this *AggrChunk) Equal(that interface{}) bool { if that == nil { return this == nil @@ -447,6 +586,35 @@ func (this *Series) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *StreamSeriesBatch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&storepb.StreamSeriesBatch{") + if this.Series != nil { + s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StreamSeriesChunks) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&storepb.StreamSeriesChunks{") + s = append(s, "SeriesIndex: "+fmt.Sprintf("%#v", this.SeriesIndex)+",\n") + if this.Chunks != nil { + vs := make([]*AggrChunk, len(this.Chunks)) + for i := range vs { + vs[i] = &this.Chunks[i] + } + s = append(s, "Chunks: "+fmt.Sprintf("%#v", vs)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *AggrChunk) GoString() string { if this == nil { return "nil" @@ -570,6 +738,85 @@ func (m *Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StreamSeriesBatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamSeriesBatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamSeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Series) > 0 { + for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Series[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StreamSeriesChunks) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamSeriesChunks) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.SeriesIndex != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.SeriesIndex)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *AggrChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -703,6 +950,39 @@ func (m *Series) Size() (n int) { return n } +func (m *StreamSeriesBatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Series) > 0 { + for _, e := range m.Series { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + +func (m *StreamSeriesChunks) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SeriesIndex != 0 { + n += 1 + sovTypes(uint64(m.SeriesIndex)) + } + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *AggrChunk) Size() (n int) { if m == nil { return 0 @@ -775,6 +1055,37 @@ func (this *Series) String() string { }, "") return s } +func (this *StreamSeriesBatch) String() string { + if this == nil { + return "nil" + } + repeatedStringForSeries := "[]*Series{" + for _, f := range this.Series { + repeatedStringForSeries += strings.Replace(f.String(), "Series", "Series", 1) + "," + } + repeatedStringForSeries += "}" + s := strings.Join([]string{`&StreamSeriesBatch{`, + `Series:` + repeatedStringForSeries + `,`, + `}`, + }, "") + return s +} +func (this *StreamSeriesChunks) String() string { + if this == nil { + return "nil" + } + repeatedStringForChunks := "[]AggrChunk{" + for _, f := range this.Chunks { + repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "AggrChunk", "AggrChunk", 1), `&`, ``, 1) + "," + } + repeatedStringForChunks += "}" + s := strings.Join([]string{`&StreamSeriesChunks{`, + `SeriesIndex:` + fmt.Sprintf("%v", this.SeriesIndex) + `,`, + `Chunks:` + repeatedStringForChunks + `,`, + `}`, + }, "") + return s +} func (this *AggrChunk) String() string { if this == nil { return "nil" @@ -1033,6 +1344,199 @@ func (m *Series) Unmarshal(dAtA []byte) error { } return nil } +func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamSeriesBatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamSeriesBatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Series = append(m.Series, &Series{}) + if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StreamSeriesChunks) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamSeriesChunks: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamSeriesChunks: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesIndex", wireType) + } + m.SeriesIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SeriesIndex |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, AggrChunk{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AggrChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index b9b2e4d5098..6922eb3c483 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -37,6 +37,15 @@ message Series { repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; } +message StreamSeriesBatch { + repeated Series series = 1; // Only labels are populated here. +} + +message StreamSeriesChunks { + uint64 series_index = 1; // Index into list of all series previously sent with QueryStreamSeries messages by this ingester during this query response. + repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; +} + message AggrChunk { int64 min_time = 1; int64 max_time = 2; From 1d8f18ba7ef4ded3142936d71bc5502d971beb44 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 6 Jun 2023 20:28:42 +0530 Subject: [PATCH 02/75] Send streaming chunks from store-gateway Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 124 +++++++++++++++++++++++++++++++------ 1 file changed, 104 insertions(+), 20 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 2012307b01c..acad16a8961 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -615,6 +615,72 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie readers = newChunkReaders(chunkReaders) } + if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { + // The streaming feature is enabled where we stream the series labels first, followed + // by the chunks later. Fetch only the labels here. + req.SkipChunks = true + + // TODO: what to do with hints here? + seriesSet, _, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) + if err != nil { + return err + } + + // TODO: should we pool the seriesBuffer/seriesBatch? + seriesBuffer := make([]*storepb.Series, req.StreamingChunksBatchSize) + for i := range seriesBuffer { + seriesBuffer[i] = &storepb.Series{} + } + seriesBatch := &storepb.StreamSeriesBatch{ + Series: seriesBuffer[:0], + } + for seriesSet.Next() { + var lset labels.Labels + // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle + // because the subsequent call to seriesSet.Next() may release it. + // TODO: check if it is safe to hold the lset. + lset, _ = seriesSet.At() + + // We are re-using the slice for every batch this way. + seriesBatch.Series = seriesBatch.Series[:len(seriesBatch.Series)+1] + seriesBatch.Series[len(seriesBatch.Series)-1].Labels = mimirpb.FromLabelsToLabelAdapters(lset) + + // TODO: Add relevant trace spans and timers. + + if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { + msg := &grpc.PreparedMsg{} + if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch, false)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) + } + + // Send the message. + if err = srv.SendMsg(msg); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) + } + + seriesBatch.Series = seriesBatch.Series[:0] + } + } + + // Send any remaining series and signal that there are no more series. + msg := &grpc.PreparedMsg{} + if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch, true)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) + } + // Send the message. + if err = srv.SendMsg(msg); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) + } + + if seriesSet.Err() != nil { + return errors.Wrap(seriesSet.Err(), "expand series set") + } + + req.SkipChunks = false + } + + // TODO: if streaming is enabled, we don't need to fetch the labels again; we just need to fetch the chunk references. + // TODO: re-use the postings from above streamingSeriesSetForBlocks if not already being re-used. seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) if err != nil { return err @@ -646,36 +712,54 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie }) for seriesSet.Next() { - var lset labels.Labels - var series storepb.Series - - seriesCount++ - // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle // because the subsequent call to seriesSet.Next() may release it. - if req.SkipChunks { - lset, _ = seriesSet.At() - } else { - lset, series.Chunks = seriesSet.At() + lset, chks := seriesSet.At() + seriesCount++ + msg := &grpc.PreparedMsg{} + if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { + // We only need to stream chunks here because the series labels have already + // been sent above. + // TODO: is the 'is end of stream' parameter required here? + streamingChunks := storepb.StreamSeriesChunks{ + SeriesIndex: uint64(seriesCount - 1), + Chunks: chks, + } - chunksCount += len(series.Chunks) - s.metrics.chunkSizeBytes.Observe(float64(chunksSize(series.Chunks))) + // Encode the message. We encode it ourselves into a PreparedMsg in order to measure + // the time it takes. + encodeBegin := time.Now() + if err = msg.Encode(srv, storepb.NewStreamSeriesChunksResponse(&streamingChunks)); err != nil { + err = status.Error(codes.Internal, errors.Wrap(err, "encode streaming chunks response").Error()) + return + } + encodeDuration += time.Since(encodeBegin) + } else { + var series storepb.Series + if !req.SkipChunks { + series.Chunks = chks + } + series.Labels = mimirpb.FromLabelsToLabelAdapters(lset) + + // Encode the message. We encode it ourselves into a PreparedMsg in order to measure + // the time it takes. + encodeBegin := time.Now() + if err = msg.Encode(srv, storepb.NewSeriesResponse(&series)); err != nil { + err = status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) + return + } + encodeDuration += time.Since(encodeBegin) } - series.Labels = mimirpb.FromLabelsToLabelAdapters(lset) - // Encode the message. We encode it ourselves into a PreparedMsg in order to measure - // the time it takes. - encodeBegin := time.Now() - msg := &grpc.PreparedMsg{} - if err = msg.Encode(srv, storepb.NewSeriesResponse(&series)); err != nil { - err = status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) - return + if !req.SkipChunks { + chunksCount += len(chks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) } - encodeDuration += time.Since(encodeBegin) // Send the message. sendBegin := time.Now() if err = srv.SendMsg(msg); err != nil { + // TODO: set the right error wrapper message. err = status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) return } From d8a29b7e9ea14656553fc424d6dd20e6f2ba7a82 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 7 Jun 2023 16:00:21 +0530 Subject: [PATCH 03/75] Reuse postings Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 39 +++++++++++++++++++++++---------- pkg/storegateway/series_refs.go | 18 ++++++++++----- 2 files changed, 40 insertions(+), 17 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index acad16a8961..baa67ddf32a 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -615,13 +615,18 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie readers = newChunkReaders(chunkReaders) } + var reusePostings [][]storage.SeriesRef + var reusePendingMatchers [][]*labels.Matcher if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { // The streaming feature is enabled where we stream the series labels first, followed // by the chunks later. Fetch only the labels here. req.SkipChunks = true + reusePostings = make([][]storage.SeriesRef, len(blocks)) + reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) + // TODO: what to do with hints here? - seriesSet, _, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) + seriesSet, _, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err } @@ -680,8 +685,9 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } // TODO: if streaming is enabled, we don't need to fetch the labels again; we just need to fetch the chunk references. - // TODO: re-use the postings from above streamingSeriesSetForBlocks if not already being re-used. - seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) + // But we need labels to merge the series from blocks. Find other way of caching the resultant series refs (maybe final ordered + // list of series IDs and block IDs). + seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err } @@ -815,6 +821,8 @@ func (s *BucketStore) streamingSeriesSetForBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, + reusePostings [][]storage.SeriesRef, + reusePendingMatchers [][]*labels.Matcher, ) (storepb.SeriesSet, *hintspb.SeriesResponseHints, error) { var ( resHints = &hintspb.SeriesResponseHints{} @@ -824,8 +832,9 @@ func (s *BucketStore) streamingSeriesSetForBlocks( begin = time.Now() ) - for _, b := range blocks { + for i, b := range blocks { b := b + i := i // Keep track of queried blocks. resHints.AddQueriedBlock(b.meta.ULID) @@ -837,13 +846,13 @@ func (s *BucketStore) streamingSeriesSetForBlocks( if shardSelector != nil { blockSeriesHashCache = s.seriesHashCache.GetBlockCache(b.meta.ULID.String()) } + var ps []storage.SeriesRef + var pendingMatchers []*labels.Matcher + if len(reusePostings) > 0 { + ps, pendingMatchers = reusePostings[i], reusePendingMatchers[i] + } g.Go(func() error { - var ( - part seriesChunkRefsSetIterator - err error - ) - - part, err = openBlockSeriesChunkRefsSetsIterator( + part, newPs, newPendingMatchers, err := openBlockSeriesChunkRefsSetsIterator( ctx, s.maxSeriesPerBatch, s.userID, @@ -857,12 +866,19 @@ func (s *BucketStore) streamingSeriesSetForBlocks( req.MinTime, req.MaxTime, s.numChunksRangesPerSeries, stats, + ps, + pendingMatchers, s.logger, ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) } + if len(reusePostings) > 0 { + reusePostings[i] = newPs + reusePendingMatchers[i] = newPendingMatchers + } + mtx.Lock() batches = append(batches, part) mtx.Unlock() @@ -1138,7 +1154,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] // We ignore request's min/max time and query the entire block to make the result cacheable. minTime, maxTime := indexr.block.meta.MinTime, indexr.block.meta.MaxTime - seriesSetsIterator, err := openBlockSeriesChunkRefsSetsIterator( + seriesSetsIterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, seriesPerBatch, indexr.block.userID, @@ -1152,6 +1168,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] minTime, maxTime, 1, // we skip chunks, so this doesn't make any difference stats, + nil, nil, logger, ) if err != nil { diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 66c91ecea7f..b70564a0694 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -746,15 +746,21 @@ func openBlockSeriesChunkRefsSetsIterator( minTime, maxTime int64, // Series must have data in this time range to be returned (ignored if skipChunks=true). chunkRangesPerSeries int, stats *safeQueryStats, + ps []storage.SeriesRef, // If this is not empty, these posting are used as it as without fetching new ones. + pendingMatchers []*labels.Matcher, // This is used in conjunction with 'ps'. logger log.Logger, -) (seriesChunkRefsSetIterator, error) { +) (seriesChunkRefsSetIterator, []storage.SeriesRef, []*labels.Matcher, error) { if batchSize <= 0 { - return nil, errors.New("set size must be a positive number") + return nil, nil, nil, errors.New("set size must be a positive number") } - ps, pendingMatchers, err := indexr.ExpandedPostings(ctx, matchers, stats) - if err != nil { - return nil, errors.Wrap(err, "expanded matching postings") + // TODO: cache the filtered postings instead later. + if len(ps) == 0 { + var err error + ps, pendingMatchers, err = indexr.ExpandedPostings(ctx, matchers, stats) + if err != nil { + return nil, nil, nil, errors.Wrap(err, "expanded matching postings") + } } var iterator seriesChunkRefsSetIterator @@ -778,7 +784,7 @@ func openBlockSeriesChunkRefsSetsIterator( iterator = newFilteringSeriesChunkRefsSetIterator(pendingMatchers, iterator, stats) } - return seriesStreamingFetchRefsDurationIterator(iterator, stats), nil + return seriesStreamingFetchRefsDurationIterator(iterator, stats), ps, pendingMatchers, nil } // seriesStreamingFetchRefsDurationIterator tracks the time spent loading series and chunk refs. From 8488231b87918b86328233efcd0d93a01330050b Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 8 Jun 2023 17:40:12 +0530 Subject: [PATCH 04/75] Read streaming chunks from storegateway in queriers Signed-off-by: Ganesh Vernekar --- pkg/querier/block.go | 28 +- pkg/querier/block_streaming.go | 109 ++++++ .../blocks_store_querable_streaming.go | 171 +++++++++ pkg/querier/blocks_store_queryable.go | 71 +++- pkg/storegateway/bucket.go | 4 +- pkg/storegateway/storepb/types.pb.go | 324 +++++++++++++++--- pkg/storegateway/storepb/types.proto | 6 +- 7 files changed, 632 insertions(+), 81 deletions(-) create mode 100644 pkg/querier/block_streaming.go create mode 100644 pkg/querier/blocks_store_querable_streaming.go diff --git a/pkg/querier/block.go b/pkg/querier/block.go index f0a267e73ef..01d28ac232f 100644 --- a/pkg/querier/block.go +++ b/pkg/querier/block.go @@ -46,8 +46,7 @@ func convertMatchersToLabelMatcher(matchers []*labels.Matcher) []storepb.LabelMa // Implementation of storage.SeriesSet, based on individual responses from store client. type blockQuerierSeriesSet struct { - series []*storepb.Series - warnings storage.Warnings + series []*storepb.Series // next response to process next int @@ -88,7 +87,7 @@ func (bqss *blockQuerierSeriesSet) Err() error { } func (bqss *blockQuerierSeriesSet) Warnings() storage.Warnings { - return bqss.warnings + return nil } // newBlockQuerierSeries makes a new blockQuerierSeries. Input labels must be already sorted by name. @@ -115,9 +114,18 @@ func (bqs *blockQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { return series.NewErrIterator(errors.New("no chunks")) } - its := make([]iteratorWithMaxTime, 0, len(bqs.chunks)) + it, err := newBlockQuerierSeriesIterator(bqs.Labels(), bqs.chunks) + if err != nil { + return series.NewErrIterator(err) + } + + return it +} + +func newBlockQuerierSeriesIterator(labels labels.Labels, chunks []storepb.AggrChunk) (*blockQuerierSeriesIterator, error) { + its := make([]iteratorWithMaxTime, 0, len(chunks)) - for _, c := range bqs.chunks { + for _, c := range chunks { var ( ch chunkenc.Chunk err error @@ -130,22 +138,18 @@ func (bqs *blockQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { case storepb.Chunk_FloatHistogram: ch, err = chunkenc.FromData(chunkenc.EncFloatHistogram, c.Raw.Data) default: - return series.NewErrIterator(errors.Wrapf(err, "failed to initialize chunk from unknown type (%v) encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, bqs.Labels(), c.MinTime, c.MaxTime)) + return nil, errors.Wrapf(err, "failed to initialize chunk from unknown type (%v) encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, labels, c.MinTime, c.MaxTime) } if err != nil { - return series.NewErrIterator(errors.Wrapf(err, "failed to initialize chunk from %v type encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, bqs.Labels(), c.MinTime, c.MaxTime)) + return nil, errors.Wrapf(err, "failed to initialize chunk from %v type encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, labels, c.MinTime, c.MaxTime) } it := ch.Iterator(nil) its = append(its, iteratorWithMaxTime{it, c.MaxTime}) } - return newBlockQuerierSeriesIterator(bqs.Labels(), its) -} - -func newBlockQuerierSeriesIterator(labels labels.Labels, its []iteratorWithMaxTime) *blockQuerierSeriesIterator { - return &blockQuerierSeriesIterator{labels: labels, iterators: its, lastT: math.MinInt64} + return &blockQuerierSeriesIterator{labels: labels, iterators: its, lastT: math.MinInt64}, nil } // iteratorWithMaxTime is an iterator which is aware of the maxT of its embedded iterator. diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go new file mode 100644 index 00000000000..565eb6bda5d --- /dev/null +++ b/pkg/querier/block_streaming.go @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "sort" + + "github.com/pkg/errors" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/storage/series" + "github.com/grafana/mimir/pkg/storegateway/storepb" +) + +// Implementation of storage.SeriesSet, based on individual responses from store client. +type blockStreamingQuerierSeriesSet struct { + series []*storepb.StreamingSeries + streamReader *SeriesChunksStreamReader + + // next response to process + next int + + currSeries storage.Series +} + +func (bqss *blockStreamingQuerierSeriesSet) Next() bool { + bqss.currSeries = nil + + if bqss.next >= len(bqss.series) { + return false + } + + currLabels := mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels) + seriesIdxStart, seriesIdxEnd := bqss.next, bqss.next + bqss.next++ + + // Merge chunks for current series. Chunks may come in multiple responses, but as soon + // as the response has chunks for a new series, we can stop searching. Series are sorted. + // See documentation for StoreClient.Series call for details. + for bqss.next < len(bqss.series) && labels.Compare(currLabels, mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels)) == 0 { + bqss.next++ + } + seriesIdxEnd = bqss.next - 1 + + bqss.currSeries = newBlockStreamingQuerierSeries(currLabels, seriesIdxStart, seriesIdxEnd, bqss.streamReader) + return true +} + +func (bqss *blockStreamingQuerierSeriesSet) At() storage.Series { + return bqss.currSeries +} + +func (bqss *blockStreamingQuerierSeriesSet) Err() error { + return nil +} + +func (bqss *blockStreamingQuerierSeriesSet) Warnings() storage.Warnings { + return nil +} + +// newBlockStreamingQuerierSeries makes a new blockQuerierSeries. Input labels must be already sorted by name. +func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesIdxEnd int, streamReader *SeriesChunksStreamReader) *blockStreamingQuerierSeries { + return &blockStreamingQuerierSeries{ + labels: lbls, + seriesIdxStart: seriesIdxStart, + seriesIdxEnd: seriesIdxEnd, + streamReader: streamReader, + } +} + +type blockStreamingQuerierSeries struct { + labels labels.Labels + seriesIdxStart, seriesIdxEnd int + streamReader *SeriesChunksStreamReader +} + +func (bqs *blockStreamingQuerierSeries) Labels() labels.Labels { + return bqs.labels +} + +func (bqs *blockStreamingQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { + // Fetch the chunks from the stream. + var allChunks []storepb.AggrChunk + for i := bqs.seriesIdxStart; i <= bqs.seriesIdxEnd; i++ { + chks, err := bqs.streamReader.GetChunks(uint64(i)) + if err != nil { + return series.NewErrIterator(err) + } + allChunks = append(allChunks, chks...) + } + if len(allChunks) == 0 { + // should not happen in practice, but we have a unit test for it + return series.NewErrIterator(errors.New("no chunks")) + } + + sort.Slice(allChunks, func(i, j int) bool { + return allChunks[i].MinTime < allChunks[j].MinTime + }) + + it, err := newBlockQuerierSeriesIterator(bqs.Labels(), allChunks) + if err != nil { + return series.NewErrIterator(err) + } + + return it +} diff --git a/pkg/querier/blocks_store_querable_streaming.go b/pkg/querier/blocks_store_querable_streaming.go new file mode 100644 index 00000000000..da675b53fc7 --- /dev/null +++ b/pkg/querier/blocks_store_querable_streaming.go @@ -0,0 +1,171 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "errors" + "fmt" + "io" + + "github.com/go-kit/log" + "github.com/go-kit/log/level" + + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/querier/stats" + "github.com/grafana/mimir/pkg/storegateway/storegatewaypb" + "github.com/grafana/mimir/pkg/storegateway/storepb" + "github.com/grafana/mimir/pkg/util/limiter" +) + +type StreamingSeries struct { + Labels []mimirpb.LabelAdapter + Source StreamingSeriesSource +} + +// StreamingSeriesSource holds the relationship between a stream of chunks from a SeriesChunksStreamReader +// and the expected position of a series' chunks in that stream. +type StreamingSeriesSource struct { + StreamReader *SeriesChunksStreamReader + SeriesIndex uint64 +} + +// SeriesChunksStreamReader is responsible for managing the streaming of chunks from an ingester and buffering +// chunks in memory until they are consumed by the PromQL engine. +type SeriesChunksStreamReader struct { + client storegatewaypb.StoreGateway_SeriesClient + expectedSeriesCount int + queryLimiter *limiter.QueryLimiter + stats *stats.Stats + log log.Logger + + seriesCunksChan chan *storepb.StreamSeriesChunks + errorChan chan error +} + +func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *SeriesChunksStreamReader { + return &SeriesChunksStreamReader{ + client: client, + expectedSeriesCount: expectedSeriesCount, + queryLimiter: queryLimiter, + stats: stats, + log: log, + } +} + +// Close cleans up all resources associated with this SeriesChunksStreamReader. +// This method should only be called if StartBuffering is not called. +func (s *SeriesChunksStreamReader) Close() { + if err := s.client.CloseSend(); err != nil { + level.Warn(s.log).Log("msg", "closing ingester client stream failed", "err", err) + } +} + +// StartBuffering begins streaming series' chunks from the store gateway associated with +// this SeriesChunksStreamReader. Once all series have been consumed with GetChunks, all resources +// associated with this SeriesChunksStreamReader are cleaned up. +// If an error occurs while streaming, a subsequent call to GetChunks will return an error. +// To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. +func (s *SeriesChunksStreamReader) StartBuffering() { + s.seriesCunksChan = make(chan *storepb.StreamSeriesChunks, 30) // TODO: increase or reduce the channel size. + + // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. + s.errorChan = make(chan error, 1) + ctxDone := s.client.Context().Done() + + go func() { + defer func() { + if err := s.client.CloseSend(); err != nil { + level.Warn(s.log).Log("msg", "closing ingester client stream failed", "err", err) + } + + close(s.seriesCunksChan) + close(s.errorChan) + }() + + totalSeries := 0 + + for { + msg, err := s.client.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + if totalSeries < s.expectedSeriesCount { + s.errorChan <- fmt.Errorf("expected to receive %v series, but got EOF after receiving %v series", s.expectedSeriesCount, totalSeries) + } + } else { + s.errorChan <- err + } + + return + } + + c := msg.GetStreamingSeriesChunks() + if c == nil { + s.errorChan <- fmt.Errorf("expected to receive StreamingSeriesChunks, but got something else") + return + } + + totalSeries++ + if totalSeries > s.expectedSeriesCount { + s.errorChan <- fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries) + return + } + + if err := s.queryLimiter.AddChunks(len(c.Chunks)); err != nil { + s.errorChan <- err + return + } + + chunkBytes := 0 + for _, ch := range c.Chunks { + chunkBytes += ch.Size() + } + if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { + s.errorChan <- err + return + } + + s.stats.AddFetchedChunks(uint64(len(c.Chunks))) + s.stats.AddFetchedChunkBytes(uint64(chunkBytes)) + + select { + case <-ctxDone: + // Why do we abort if the context is done? + // We want to make sure that this goroutine is never leaked. + // This goroutine could be leaked if nothing is reading from the buffer, but this method is still trying to send + // more series to a full buffer: it would block forever. + // So, here, we try to send the series to the buffer if we can, but if the context is cancelled, then we give up. + // This only works correctly if the context is cancelled when the query request is complete or cancelled, + // which is true at the time of writing. + s.errorChan <- s.client.Context().Err() + return + case s.seriesCunksChan <- c: + // Batch enqueued successfully, nothing else to do for this batch. + } + } + }() +} + +// GetChunks returns the chunks for the series with index seriesIndex. +// This method must be called with monotonically increasing values of seriesIndex. +func (s *SeriesChunksStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { + chks, haveChunks := <-s.seriesCunksChan + + if !haveChunks { + // If there's an error, report it. + select { + case err, haveError := <-s.errorChan: + if haveError { + return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has failed: %w", seriesIndex, err) + } + default: + } + + return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has already been exhausted", seriesIndex) + } + + if chks.SeriesIndex != seriesIndex { + return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has series with index %v", seriesIndex, chks.SeriesIndex) + } + + return chks.Chunks, nil +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index ad812ad02f8..05299bb8e7a 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -687,6 +687,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor spanLog = spanlogger.FromContext(ctx, q.logger) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) reqStats = stats.FromContext(ctx) + streamReaders []*SeriesChunksStreamReader ) // Concurrently fetch series from all clients. @@ -717,7 +718,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return nil } + // A storegateway client will only fill either of mySeries or myStreamingSeries, and not both. mySeries := []*storepb.Series(nil) + myStreamingSeries := []*storepb.StreamingSeries(nil) myWarnings := storage.Warnings(nil) myQueriedBlocks := []ulid.ULID(nil) indexBytesFetched := uint64(0) @@ -742,7 +745,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return nil } - // Response may either contain series, warning or hints. + // Response may either contain series, streaming series, warning or hints. if s := resp.GetSeries(); s != nil { mySeries = append(mySeries, s) @@ -761,6 +764,17 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } } + if ss := resp.GetStreamingSeries(); ss != nil { + for _, s := range ss.Series { + // Add series fingerprint to query limiter; will return error if we are over the limit + limitErr := queryLimiter.AddSeries(s.Labels) + if limitErr != nil { + return validation.LimitError(limitErr.Error()) + } + } + myStreamingSeries = append(myStreamingSeries, ss.Series...) + } + if w := resp.GetWarning(); w != "" { myWarnings = append(myWarnings, errors.New(w)) } @@ -784,26 +798,42 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } } - numSeries := len(mySeries) - chunksFetched, chunkBytes := countChunksAndBytes(mySeries...) - - reqStats.AddFetchedSeries(uint64(numSeries)) - reqStats.AddFetchedChunkBytes(uint64(chunkBytes)) - reqStats.AddFetchedChunks(uint64(chunksFetched)) reqStats.AddFetchedIndexBytes(indexBytesFetched) - - level.Debug(spanLog).Log("msg", "received series from store-gateway", - "instance", c.RemoteAddress(), - "fetched series", numSeries, - "fetched chunk bytes", chunkBytes, - "fetched chunks", chunksFetched, - "fetched index bytes", indexBytesFetched, - "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), - "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) + var streamReader *SeriesChunksStreamReader + if len(mySeries) > 0 { + chunksFetched, chunkBytes := countChunksAndBytes(mySeries...) + + reqStats.AddFetchedSeries(uint64(len(mySeries))) + reqStats.AddFetchedChunkBytes(uint64(chunkBytes)) + reqStats.AddFetchedChunks(uint64(chunksFetched)) + + level.Debug(spanLog).Log("msg", "received series from store-gateway", + "instance", c.RemoteAddress(), + "fetched series", len(mySeries), + "fetched chunk bytes", chunkBytes, + "fetched chunks", chunksFetched, + "fetched index bytes", indexBytesFetched, + "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), + "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) + } else if len(myStreamingSeries) > 0 { + reqStats.AddFetchedSeries(uint64(len(mySeries))) + streamReader = NewSeriesChunksStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) + level.Debug(spanLog).Log("msg", "received streaming series from store-gateway", + "instance", c.RemoteAddress(), + "fetched series", len(mySeries), + "fetched index bytes", indexBytesFetched, + "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), + "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) + } // Store the result. mtx.Lock() - seriesSets = append(seriesSets, &blockQuerierSeriesSet{series: mySeries}) + if len(mySeries) > 0 { + seriesSets = append(seriesSets, &blockQuerierSeriesSet{series: mySeries}) + } else if len(myStreamingSeries) > 0 { + seriesSets = append(seriesSets, &blockStreamingQuerierSeriesSet{series: myStreamingSeries, streamReader: streamReader}) + streamReaders = append(streamReaders, streamReader) + } warnings = append(warnings, myWarnings...) queriedBlocks = append(queriedBlocks, myQueriedBlocks...) mtx.Unlock() @@ -814,9 +844,16 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor // Wait until all client requests complete. if err := g.Wait(); err != nil { + for _, sr := range streamReaders { + sr.Close() + } return nil, nil, nil, err } + for _, sr := range streamReaders { + sr.StartBuffering() + } + return seriesSets, queriedBlocks, warnings, nil } diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index baa67ddf32a..e90dff7a6d8 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -632,9 +632,9 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } // TODO: should we pool the seriesBuffer/seriesBatch? - seriesBuffer := make([]*storepb.Series, req.StreamingChunksBatchSize) + seriesBuffer := make([]*storepb.StreamingSeries, req.StreamingChunksBatchSize) for i := range seriesBuffer { - seriesBuffer[i] = &storepb.Series{} + seriesBuffer[i] = &storepb.StreamingSeries{} } seriesBatch := &storepb.StreamSeriesBatch{ Series: seriesBuffer[:0], diff --git a/pkg/storegateway/storepb/types.pb.go b/pkg/storegateway/storepb/types.pb.go index c4169a385b1..4c2fc79aae7 100644 --- a/pkg/storegateway/storepb/types.pb.go +++ b/pkg/storegateway/storepb/types.pb.go @@ -76,7 +76,7 @@ var LabelMatcher_Type_value = map[string]int32{ } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5, 0} + return fileDescriptor_d938547f84707355, []int{6, 0} } type Chunk struct { @@ -153,14 +153,50 @@ func (m *Series) XXX_DiscardUnknown() { var xxx_messageInfo_Series proto.InternalMessageInfo +type StreamingSeries struct { + Labels []github_com_grafana_mimir_pkg_mimirpb.LabelAdapter `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/grafana/mimir/pkg/mimirpb.LabelAdapter" json:"labels"` +} + +func (m *StreamingSeries) Reset() { *m = StreamingSeries{} } +func (*StreamingSeries) ProtoMessage() {} +func (*StreamingSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{2} +} +func (m *StreamingSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamingSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamingSeries.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamingSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamingSeries.Merge(m, src) +} +func (m *StreamingSeries) XXX_Size() int { + return m.Size() +} +func (m *StreamingSeries) XXX_DiscardUnknown() { + xxx_messageInfo_StreamingSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamingSeries proto.InternalMessageInfo + type StreamSeriesBatch struct { - Series []*Series `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` + Series []*StreamingSeries `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` } func (m *StreamSeriesBatch) Reset() { *m = StreamSeriesBatch{} } func (*StreamSeriesBatch) ProtoMessage() {} func (*StreamSeriesBatch) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{2} + return fileDescriptor_d938547f84707355, []int{3} } func (m *StreamSeriesBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -197,7 +233,7 @@ type StreamSeriesChunks struct { func (m *StreamSeriesChunks) Reset() { *m = StreamSeriesChunks{} } func (*StreamSeriesChunks) ProtoMessage() {} func (*StreamSeriesChunks) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{3} + return fileDescriptor_d938547f84707355, []int{4} } func (m *StreamSeriesChunks) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -235,7 +271,7 @@ type AggrChunk struct { func (m *AggrChunk) Reset() { *m = AggrChunk{} } func (*AggrChunk) ProtoMessage() {} func (*AggrChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{4} + return fileDescriptor_d938547f84707355, []int{5} } func (m *AggrChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -274,7 +310,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{6} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -308,6 +344,7 @@ func init() { proto.RegisterEnum("thanos.LabelMatcher_Type", LabelMatcher_Type_name, LabelMatcher_Type_value) proto.RegisterType((*Chunk)(nil), "thanos.Chunk") proto.RegisterType((*Series)(nil), "thanos.Series") + proto.RegisterType((*StreamingSeries)(nil), "thanos.StreamingSeries") proto.RegisterType((*StreamSeriesBatch)(nil), "thanos.StreamSeriesBatch") proto.RegisterType((*StreamSeriesChunks)(nil), "thanos.StreamSeriesChunks") proto.RegisterType((*AggrChunk)(nil), "thanos.AggrChunk") @@ -317,46 +354,47 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 624 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x4c, - 0x14, 0xf5, 0x24, 0x8e, 0xe3, 0x4c, 0xda, 0x7e, 0xee, 0xb4, 0xfa, 0x94, 0x76, 0x31, 0x0d, 0x5e, - 0xa0, 0x08, 0xa9, 0x0e, 0x14, 0x36, 0x08, 0x36, 0x0d, 0x0a, 0x2a, 0x11, 0x3f, 0xad, 0x5b, 0x24, - 0x84, 0x90, 0xa2, 0x71, 0x32, 0x71, 0x46, 0x8d, 0x7f, 0x34, 0x9e, 0x40, 0xba, 0xeb, 0x23, 0xf0, - 0x0a, 0xec, 0x78, 0x11, 0xa4, 0xee, 0xe8, 0xb2, 0x62, 0x51, 0x11, 0x77, 0xc3, 0xb2, 0x8f, 0x80, - 0x3c, 0xe3, 0x40, 0xa2, 0x6e, 0xca, 0xca, 0xf7, 0xde, 0x73, 0xee, 0xb9, 0x47, 0xd7, 0x77, 0x60, - 0x55, 0x9c, 0xc4, 0x34, 0x71, 0x62, 0x1e, 0x89, 0x08, 0x19, 0x62, 0x48, 0xc2, 0x28, 0xd9, 0xdc, - 0xf6, 0x99, 0x18, 0x8e, 0x3d, 0xa7, 0x17, 0x05, 0x4d, 0x3f, 0xf2, 0xa3, 0xa6, 0x84, 0xbd, 0xf1, - 0x40, 0x66, 0x32, 0x91, 0x91, 0x6a, 0xdb, 0xbc, 0x3f, 0x4f, 0xe7, 0x64, 0x40, 0x42, 0xd2, 0x0c, - 0x58, 0xc0, 0x78, 0x33, 0x3e, 0xf6, 0x55, 0x14, 0x7b, 0xea, 0xab, 0x3a, 0xec, 0xef, 0x00, 0x96, - 0x9e, 0x0d, 0xc7, 0xe1, 0x31, 0xba, 0x07, 0xf5, 0xcc, 0x41, 0x0d, 0xd4, 0x41, 0x63, 0x65, 0xe7, - 0x7f, 0x47, 0x39, 0x70, 0x24, 0xe8, 0xb4, 0xc3, 0x5e, 0xd4, 0x67, 0xa1, 0xef, 0x4a, 0x0e, 0xda, - 0x87, 0x7a, 0x9f, 0x08, 0x52, 0x2b, 0xd4, 0x41, 0x63, 0xa9, 0xf5, 0xf4, 0xec, 0x72, 0x4b, 0xfb, - 0x71, 0xb9, 0xf5, 0xe8, 0x36, 0xd3, 0x9d, 0xb7, 0x61, 0x42, 0x06, 0xb4, 0x75, 0x22, 0xe8, 0xe1, - 0x88, 0xf5, 0xa8, 0x2b, 0x95, 0xec, 0x3d, 0x68, 0xce, 0x66, 0xa0, 0x65, 0x58, 0x91, 0x53, 0xbb, - 0xef, 0xde, 0xb8, 0x96, 0x86, 0xd6, 0xe0, 0x7f, 0x2a, 0xdd, 0x63, 0x89, 0x88, 0x7c, 0x4e, 0x02, - 0x0b, 0xa0, 0x1a, 0x5c, 0x57, 0xc5, 0xe7, 0xa3, 0x88, 0x88, 0xbf, 0x48, 0xc1, 0xfe, 0x02, 0xa0, - 0x71, 0x48, 0x39, 0xa3, 0x09, 0x1a, 0x40, 0x63, 0x44, 0x3c, 0x3a, 0x4a, 0x6a, 0xa0, 0x5e, 0x6c, - 0x54, 0x77, 0xd6, 0x9c, 0x5e, 0xc4, 0x05, 0x9d, 0xc4, 0x9e, 0xf3, 0x32, 0xab, 0xef, 0x13, 0xc6, - 0x5b, 0x8f, 0x73, 0xf7, 0x0f, 0x6e, 0xe5, 0x5e, 0xf6, 0xed, 0xf6, 0x49, 0x2c, 0x28, 0x77, 0x73, - 0x75, 0xd4, 0x84, 0x46, 0x2f, 0x33, 0x93, 0xd4, 0x0a, 0x72, 0xce, 0xea, 0x6c, 0x79, 0xbb, 0xbe, - 0xcf, 0xa5, 0xcd, 0x96, 0x9e, 0x4d, 0x71, 0x73, 0x9a, 0xfd, 0x04, 0xae, 0x1e, 0x0a, 0x4e, 0x49, - 0xa0, 0x8c, 0xb6, 0x88, 0xe8, 0x0d, 0xd1, 0x5d, 0x68, 0x24, 0x32, 0xcd, 0xdd, 0xae, 0xcc, 0x54, - 0x14, 0xc9, 0xcd, 0x51, 0x7b, 0x08, 0xd1, 0x7c, 0xb3, 0xd4, 0x4f, 0xd0, 0x1d, 0xb8, 0xa4, 0xf0, - 0x2e, 0x0b, 0xfb, 0x74, 0x22, 0x7f, 0xa3, 0xee, 0x56, 0x55, 0xed, 0x45, 0x56, 0xfa, 0x77, 0x9b, - 0xa7, 0x00, 0x56, 0xfe, 0x60, 0x68, 0x03, 0x9a, 0x01, 0x0b, 0xbb, 0x82, 0x05, 0xea, 0x48, 0x8a, - 0x6e, 0x39, 0x60, 0xe1, 0x11, 0x0b, 0xa8, 0x84, 0xc8, 0x44, 0x41, 0x85, 0x1c, 0x22, 0x13, 0x09, - 0x6d, 0xc1, 0x22, 0x27, 0x9f, 0x6a, 0xc5, 0x3a, 0x68, 0x54, 0x77, 0x96, 0x17, 0xae, 0xca, 0xcd, - 0x90, 0x8e, 0x6e, 0xea, 0x56, 0xa9, 0xa3, 0x9b, 0x25, 0xcb, 0xe8, 0xe8, 0xa6, 0x61, 0x95, 0x3b, - 0xba, 0x59, 0xb6, 0xcc, 0x8e, 0x6e, 0x9a, 0x56, 0xc5, 0xfe, 0x06, 0xe0, 0x92, 0xdc, 0xf9, 0xab, - 0x6c, 0x47, 0x94, 0xa3, 0xed, 0x85, 0x33, 0xdd, 0x98, 0x09, 0xce, 0x73, 0x9c, 0xa3, 0x93, 0x98, - 0xe6, 0x97, 0x8a, 0xa0, 0x1e, 0x92, 0xdc, 0x55, 0xc5, 0x95, 0x31, 0x5a, 0x87, 0xa5, 0x8f, 0x64, - 0x34, 0xa6, 0xd2, 0x54, 0xc5, 0x55, 0x89, 0xfd, 0x01, 0xea, 0x59, 0x5f, 0x76, 0x6e, 0xf3, 0x62, - 0xdd, 0xf6, 0x81, 0xa5, 0xa1, 0x75, 0x68, 0x2d, 0x14, 0x5f, 0xb7, 0x0f, 0x2c, 0x70, 0x83, 0xea, - 0xb6, 0xad, 0xc2, 0x4d, 0xaa, 0xdb, 0xb6, 0x8a, 0xad, 0xdd, 0xb3, 0x29, 0xd6, 0xce, 0xa7, 0x58, - 0xbb, 0x98, 0x62, 0xed, 0x7a, 0x8a, 0xc1, 0x69, 0x8a, 0xc1, 0xd7, 0x14, 0x83, 0xb3, 0x14, 0x83, - 0xf3, 0x14, 0x83, 0x9f, 0x29, 0x06, 0xbf, 0x52, 0xac, 0x5d, 0xa7, 0x18, 0x7c, 0xbe, 0xc2, 0xda, - 0xf9, 0x15, 0xd6, 0x2e, 0xae, 0xb0, 0xf6, 0xbe, 0x9c, 0x88, 0x88, 0xd3, 0xd8, 0xf3, 0x0c, 0xf9, - 0x62, 0x1f, 0xfe, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xa5, 0xbc, 0xe5, 0x17, 0x29, 0x04, 0x00, 0x00, + // 638 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0xf6, 0x24, 0x8e, 0xe3, 0x4c, 0xda, 0x5b, 0x77, 0x5a, 0xdd, 0x9b, 0x76, 0x31, 0xcd, 0xf5, + 0x2a, 0xba, 0x52, 0x9d, 0x4b, 0x61, 0x83, 0xc4, 0xa6, 0x81, 0xa0, 0x12, 0xf1, 0xd3, 0xba, 0x45, + 0x42, 0x08, 0x29, 0x1a, 0x27, 0x13, 0x67, 0xd4, 0xf8, 0x47, 0xe3, 0x09, 0x24, 0xbb, 0x3e, 0x02, + 0xaf, 0xc0, 0x8e, 0x17, 0x41, 0xea, 0x8e, 0x2e, 0x2b, 0x16, 0x15, 0x71, 0x37, 0x2c, 0xfb, 0x08, + 0xc8, 0x33, 0x0e, 0xa4, 0x74, 0x53, 0x36, 0xac, 0x3c, 0xe7, 0x7c, 0xdf, 0x39, 0xdf, 0x37, 0x47, + 0x67, 0x0c, 0xab, 0x62, 0x1a, 0xd3, 0xc4, 0x89, 0x79, 0x24, 0x22, 0x64, 0x88, 0x21, 0x09, 0xa3, + 0x64, 0x73, 0xdb, 0x67, 0x62, 0x38, 0xf6, 0x9c, 0x5e, 0x14, 0x34, 0xfd, 0xc8, 0x8f, 0x9a, 0x12, + 0xf6, 0xc6, 0x03, 0x19, 0xc9, 0x40, 0x9e, 0x54, 0xd9, 0xe6, 0xff, 0x8b, 0x74, 0x4e, 0x06, 0x24, + 0x24, 0xcd, 0x80, 0x05, 0x8c, 0x37, 0xe3, 0x63, 0x5f, 0x9d, 0x62, 0x4f, 0x7d, 0x55, 0x85, 0xfd, + 0x19, 0xc0, 0xd2, 0xc3, 0xe1, 0x38, 0x3c, 0x46, 0xff, 0x41, 0x3d, 0x73, 0x50, 0x03, 0x75, 0xd0, + 0xf8, 0x6b, 0xe7, 0x6f, 0x47, 0x39, 0x70, 0x24, 0xe8, 0xb4, 0xc3, 0x5e, 0xd4, 0x67, 0xa1, 0xef, + 0x4a, 0x0e, 0xda, 0x87, 0x7a, 0x9f, 0x08, 0x52, 0x2b, 0xd4, 0x41, 0x63, 0xa9, 0xf5, 0xe0, 0xf4, + 0x62, 0x4b, 0xfb, 0x72, 0xb1, 0x75, 0xef, 0x36, 0xea, 0xce, 0xcb, 0x30, 0x21, 0x03, 0xda, 0x9a, + 0x0a, 0x7a, 0x38, 0x62, 0x3d, 0xea, 0xca, 0x4e, 0xf6, 0x1e, 0x34, 0xe7, 0x1a, 0x68, 0x19, 0x56, + 0xa4, 0x6a, 0xf7, 0xd5, 0x0b, 0xd7, 0xd2, 0xd0, 0x1a, 0x5c, 0x51, 0xe1, 0x1e, 0x4b, 0x44, 0xe4, + 0x73, 0x12, 0x58, 0x00, 0xd5, 0xe0, 0xba, 0x4a, 0x3e, 0x1e, 0x45, 0x44, 0xfc, 0x44, 0x0a, 0xf6, + 0x07, 0x00, 0x8d, 0x43, 0xca, 0x19, 0x4d, 0xd0, 0x00, 0x1a, 0x23, 0xe2, 0xd1, 0x51, 0x52, 0x03, + 0xf5, 0x62, 0xa3, 0xba, 0xb3, 0xe6, 0xf4, 0x22, 0x2e, 0xe8, 0x24, 0xf6, 0x9c, 0xa7, 0x59, 0x7e, + 0x9f, 0x30, 0xde, 0xba, 0x9f, 0xbb, 0xbf, 0x73, 0x2b, 0xf7, 0xb2, 0x6e, 0xb7, 0x4f, 0x62, 0x41, + 0xb9, 0x9b, 0x77, 0x47, 0x4d, 0x68, 0xf4, 0x32, 0x33, 0x49, 0xad, 0x20, 0x75, 0x56, 0xe7, 0xc3, + 0xdb, 0xf5, 0x7d, 0x2e, 0x6d, 0xb6, 0xf4, 0x4c, 0xc5, 0xcd, 0x69, 0xf6, 0x14, 0xae, 0x1c, 0x0a, + 0x4e, 0x49, 0xc0, 0x42, 0xff, 0xcf, 0x7a, 0xb5, 0x1f, 0xc1, 0x55, 0x25, 0xad, 0x74, 0x5b, 0x44, + 0xf4, 0x86, 0xd9, 0x05, 0x12, 0x19, 0xe6, 0xe2, 0xff, 0xcc, 0x2f, 0xf0, 0x8b, 0x4b, 0x37, 0xa7, + 0xd9, 0x43, 0x88, 0x16, 0xbb, 0xc8, 0x3b, 0x26, 0xe8, 0x5f, 0xb8, 0xa4, 0xf0, 0x2e, 0x0b, 0xfb, + 0x74, 0x22, 0x57, 0x49, 0x77, 0xab, 0x2a, 0xf7, 0x24, 0x4b, 0xfd, 0xfe, 0xa8, 0x4e, 0x00, 0xac, + 0xfc, 0xc0, 0xd0, 0x06, 0x34, 0x03, 0x16, 0x76, 0x05, 0x0b, 0xd4, 0xa2, 0x16, 0xdd, 0x72, 0xc0, + 0xc2, 0x23, 0x16, 0x50, 0x09, 0x91, 0x89, 0x82, 0x0a, 0x39, 0x44, 0x26, 0x12, 0xda, 0x82, 0x45, + 0x4e, 0xde, 0xd5, 0x8a, 0x75, 0xd0, 0xa8, 0xee, 0x2c, 0x5f, 0xdb, 0x6c, 0x37, 0x43, 0x3a, 0xba, + 0xa9, 0x5b, 0xa5, 0x8e, 0x6e, 0x96, 0x2c, 0xa3, 0xa3, 0x9b, 0x86, 0x55, 0xee, 0xe8, 0x66, 0xd9, + 0x32, 0x3b, 0xba, 0x69, 0x5a, 0x15, 0xfb, 0x13, 0x80, 0x4b, 0x72, 0x96, 0xcf, 0xb2, 0x61, 0x51, + 0x8e, 0xb6, 0xaf, 0x3d, 0x95, 0x8d, 0x79, 0xc3, 0x45, 0x8e, 0x73, 0x34, 0x8d, 0x69, 0xfe, 0x5a, + 0x10, 0xd4, 0x43, 0x92, 0xbb, 0xaa, 0xb8, 0xf2, 0x8c, 0xd6, 0x61, 0xe9, 0x2d, 0x19, 0x8d, 0xa9, + 0x34, 0x55, 0x71, 0x55, 0x60, 0xbf, 0x81, 0x7a, 0x56, 0x97, 0xad, 0xfc, 0x62, 0xb3, 0x6e, 0xfb, + 0xc0, 0xd2, 0xd0, 0x3a, 0xb4, 0xae, 0x25, 0x9f, 0xb7, 0x0f, 0x2c, 0x70, 0x83, 0xea, 0xb6, 0xad, + 0xc2, 0x4d, 0xaa, 0xdb, 0xb6, 0x8a, 0xad, 0xdd, 0xd3, 0x19, 0xd6, 0xce, 0x66, 0x58, 0x3b, 0x9f, + 0x61, 0xed, 0x6a, 0x86, 0xc1, 0x49, 0x8a, 0xc1, 0xc7, 0x14, 0x83, 0xd3, 0x14, 0x83, 0xb3, 0x14, + 0x83, 0xaf, 0x29, 0x06, 0xdf, 0x52, 0xac, 0x5d, 0xa5, 0x18, 0xbc, 0xbf, 0xc4, 0xda, 0xd9, 0x25, + 0xd6, 0xce, 0x2f, 0xb1, 0xf6, 0xba, 0x9c, 0x88, 0x88, 0xd3, 0xd8, 0xf3, 0x0c, 0xf9, 0xd7, 0xb8, + 0xfb, 0x3d, 0x00, 0x00, 0xff, 0xff, 0x18, 0xad, 0xe5, 0x96, 0xad, 0x04, 0x00, 0x00, } func (x Chunk_Encoding) String() string { @@ -437,6 +475,35 @@ func (this *Series) Equal(that interface{}) bool { } return true } +func (this *StreamingSeries) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamingSeries) + if !ok { + that2, ok := that.(StreamingSeries) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Labels) != len(that1.Labels) { + return false + } + for i := range this.Labels { + if !this.Labels[i].Equal(that1.Labels[i]) { + return false + } + } + return true +} func (this *StreamSeriesBatch) Equal(that interface{}) bool { if that == nil { return this == nil @@ -586,6 +653,16 @@ func (this *Series) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *StreamingSeries) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&storepb.StreamingSeries{") + s = append(s, "Labels: "+fmt.Sprintf("%#v", this.Labels)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} func (this *StreamSeriesBatch) GoString() string { if this == nil { return "nil" @@ -738,6 +815,43 @@ func (m *Series) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StreamingSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamingSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamingSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Labels) > 0 { + for iNdEx := len(m.Labels) - 1; iNdEx >= 0; iNdEx-- { + { + size := m.Labels[iNdEx].Size() + i -= size + if _, err := m.Labels[iNdEx].MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *StreamSeriesBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -950,6 +1064,21 @@ func (m *Series) Size() (n int) { return n } +func (m *StreamingSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Labels) > 0 { + for _, e := range m.Labels { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *StreamSeriesBatch) Size() (n int) { if m == nil { return 0 @@ -1055,13 +1184,23 @@ func (this *Series) String() string { }, "") return s } +func (this *StreamingSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StreamingSeries{`, + `Labels:` + fmt.Sprintf("%v", this.Labels) + `,`, + `}`, + }, "") + return s +} func (this *StreamSeriesBatch) String() string { if this == nil { return "nil" } - repeatedStringForSeries := "[]*Series{" + repeatedStringForSeries := "[]*StreamingSeries{" for _, f := range this.Series { - repeatedStringForSeries += strings.Replace(f.String(), "Series", "Series", 1) + "," + repeatedStringForSeries += strings.Replace(f.String(), "StreamingSeries", "StreamingSeries", 1) + "," } repeatedStringForSeries += "}" s := strings.Join([]string{`&StreamSeriesBatch{`, @@ -1344,6 +1483,93 @@ func (m *Series) Unmarshal(dAtA []byte) error { } return nil } +func (m *StreamingSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamingSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamingSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Labels = append(m.Labels, github_com_grafana_mimir_pkg_mimirpb.LabelAdapter{}) + if err := m.Labels[len(m.Labels)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -1402,7 +1628,7 @@ func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Series = append(m.Series, &Series{}) + m.Series = append(m.Series, &StreamingSeries{}) if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index 6922eb3c483..f04259cec40 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -37,8 +37,12 @@ message Series { repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; } +message StreamingSeries { + repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/mimir/pkg/mimirpb.LabelAdapter"]; +} + message StreamSeriesBatch { - repeated Series series = 1; // Only labels are populated here. + repeated StreamingSeries series = 1; // Only labels are populated here. } message StreamSeriesChunks { From e2d189a40f42053c87f06590b7452c131e856bb7 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 8 Jun 2023 18:02:36 +0530 Subject: [PATCH 05/75] Fix lint Signed-off-by: Ganesh Vernekar --- pkg/querier/block_streaming.go | 5 ++--- pkg/storegateway/bucket_chunk_reader_test.go | 3 ++- pkg/storegateway/series_refs_test.go | 15 ++++++++++----- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index 565eb6bda5d..abe73b6f82f 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -34,7 +34,7 @@ func (bqss *blockStreamingQuerierSeriesSet) Next() bool { } currLabels := mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels) - seriesIdxStart, seriesIdxEnd := bqss.next, bqss.next + seriesIdxStart := bqss.next bqss.next++ // Merge chunks for current series. Chunks may come in multiple responses, but as soon @@ -43,9 +43,8 @@ func (bqss *blockStreamingQuerierSeriesSet) Next() bool { for bqss.next < len(bqss.series) && labels.Compare(currLabels, mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels)) == 0 { bqss.next++ } - seriesIdxEnd = bqss.next - 1 - bqss.currSeries = newBlockStreamingQuerierSeries(currLabels, seriesIdxStart, seriesIdxEnd, bqss.streamReader) + bqss.currSeries = newBlockStreamingQuerierSeries(currLabels, seriesIdxStart, bqss.next-1, bqss.streamReader) return true } diff --git a/pkg/storegateway/bucket_chunk_reader_test.go b/pkg/storegateway/bucket_chunk_reader_test.go index 14d637fcbeb..034f4c78e12 100644 --- a/pkg/storegateway/bucket_chunk_reader_test.go +++ b/pkg/storegateway/bucket_chunk_reader_test.go @@ -28,7 +28,7 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { newTestBucketBlock := prepareTestBlock(test.NewTB(t), appendTestSeries(1000)) block := newTestBucketBlock() - seriesRefsIterator, err := openBlockSeriesChunkRefsSetsIterator( + seriesRefsIterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, 5000, "tenant-1", @@ -43,6 +43,7 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { block.meta.MaxTime, 2, newSafeQueryStats(), + nil, nil, log.NewNopLogger(), ) require.NoError(t, err) diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 6055c49835b..fc3654dd6c3 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1662,7 +1662,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { maxT = testCase.maxT } - iterator, err := openBlockSeriesChunkRefsSetsIterator( + iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, testCase.batchSize, "", @@ -1677,6 +1677,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { maxT, 2, newSafeQueryStats(), + nil, nil, nil, ) require.NoError(t, err) @@ -1763,7 +1764,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { block.pendingReaders.Add(2) // this is hacky, but can be replaced only block.indexReade() accepts a strategy querySeries := func(indexReader *bucketIndexReader) []seriesChunkRefsSet { hashCache := hashcache.NewSeriesHashCache(1024 * 1024).GetBlockCache(block.meta.ULID.String()) - iterator, err := openBlockSeriesChunkRefsSetsIterator( + iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, testCase.batchSize, "", @@ -1778,6 +1779,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { block.meta.MaxTime, 2, newSafeQueryStats(), + nil, nil, nil, ) require.NoError(t, err) @@ -1826,7 +1828,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { - iterator, err := openBlockSeriesChunkRefsSetsIterator( + iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, 5000, "", @@ -1841,6 +1843,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { block.meta.MaxTime, 2, newSafeQueryStats(), + nil, nil, nil, ) require.NoError(b, err) @@ -2374,7 +2377,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { // All test cases have a single matcher, so the strategy wouldn't really make a difference. // Pending matchers are tested in other tests. indexReader := b.indexReader(selectAllStrategy{}) - ss, err := openBlockSeriesChunkRefsSetsIterator( + ss, _, _, err := openBlockSeriesChunkRefsSetsIterator( context.Background(), batchSize, "", @@ -2389,6 +2392,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { b.meta.MaxTime, 1, statsColdCache, + nil, nil, log.NewNopLogger(), ) @@ -2405,7 +2409,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { } statsWarnCache := newSafeQueryStats() - ss, err = openBlockSeriesChunkRefsSetsIterator( + ss, _, _, err = openBlockSeriesChunkRefsSetsIterator( context.Background(), batchSize, "", @@ -2420,6 +2424,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { b.meta.MaxTime, 1, statsWarnCache, + nil, nil, log.NewNopLogger(), ) require.NoError(t, err) From 8bc1d1257c2d791819c2147ff666cc2be43ada84 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 8 Jun 2023 19:01:15 +0530 Subject: [PATCH 06/75] Test if streaming works by making it default Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 05299bb8e7a..c80c9c8b5f1 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -707,7 +707,8 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor if err != nil { return errors.Wrapf(err, "failed to create series request") } - + + req.StreamingChunksBatchSize = 128 stream, err := c.Series(gCtx, req) if err != nil { if shouldStopQueryFunc(err) { From 40f475350aa280c0c5220c30dd16254a3efa2b63 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 8 Jun 2023 19:17:12 +0530 Subject: [PATCH 07/75] Do not ignore IsEndOfSeriesStream Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 7 +- pkg/storegateway/bucket.go | 5 +- pkg/storegateway/storepb/custom.go | 3 +- pkg/storegateway/storepb/rpc.pb.go | 145 +++++++++----------------- pkg/storegateway/storepb/rpc.proto | 2 - pkg/storegateway/storepb/types.pb.go | 125 ++++++++++++++-------- pkg/storegateway/storepb/types.proto | 3 +- 7 files changed, 146 insertions(+), 144 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index c80c9c8b5f1..de73842234f 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -707,8 +707,8 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor if err != nil { return errors.Wrapf(err, "failed to create series request") } - - req.StreamingChunksBatchSize = 128 + + req.StreamingChunksBatchSize = 10 stream, err := c.Series(gCtx, req) if err != nil { if shouldStopQueryFunc(err) { @@ -774,6 +774,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } } myStreamingSeries = append(myStreamingSeries, ss.Series...) + if ss.IsEndOfSeriesStream { + break + } } if w := resp.GetWarning(); w != "" { diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index e90dff7a6d8..b56291a7905 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -654,7 +654,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { msg := &grpc.PreparedMsg{} - if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch, false)); err != nil { + if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) } @@ -669,7 +669,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie // Send any remaining series and signal that there are no more series. msg := &grpc.PreparedMsg{} - if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch, true)); err != nil { + seriesBatch.IsEndOfSeriesStream = true + if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) } // Send the message. diff --git a/pkg/storegateway/storepb/custom.go b/pkg/storegateway/storepb/custom.go index 90b82985e33..a974d87415a 100644 --- a/pkg/storegateway/storepb/custom.go +++ b/pkg/storegateway/storepb/custom.go @@ -39,12 +39,11 @@ func NewStatsResponse(indexBytesFetched int) *SeriesResponse { } } -func NewStreamSeriesResponse(series *StreamSeriesBatch, endOfStream bool) *SeriesResponse { +func NewStreamSeriesResponse(series *StreamSeriesBatch) *SeriesResponse { return &SeriesResponse{ Result: &SeriesResponse_StreamingSeries{ StreamingSeries: series, }, - IsEndOfSeriesStream: endOfStream, } } diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index 4b5ee4eddae..f019d6a3a2f 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -121,8 +121,7 @@ type SeriesResponse struct { // *SeriesResponse_Stats // *SeriesResponse_StreamingSeries // *SeriesResponse_StreamingSeriesChunks - Result isSeriesResponse_Result `protobuf_oneof:"result"` - IsEndOfSeriesStream bool `protobuf:"varint,7,opt,name=is_end_of_series_stream,json=isEndOfSeriesStream,proto3" json:"is_end_of_series_stream,omitempty"` + Result isSeriesResponse_Result `protobuf_oneof:"result"` } func (m *SeriesResponse) Reset() { *m = SeriesResponse{} } @@ -431,58 +430,56 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 814 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x94, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xc7, 0x3d, 0xf1, 0xd8, 0x99, 0x4c, 0x2e, 0x8b, 0x6f, 0x76, 0xef, 0xce, 0xf1, 0x49, 0xbe, - 0x28, 0x12, 0x52, 0x84, 0x20, 0x87, 0x16, 0x04, 0xa2, 0xa0, 0xb8, 0x9c, 0x38, 0x05, 0x8b, 0x1f, - 0x92, 0xf7, 0x44, 0x41, 0x63, 0x39, 0xc9, 0x24, 0xb1, 0x2e, 0x19, 0x07, 0x8f, 0x03, 0x9b, 0xab, - 0x68, 0xe9, 0xf8, 0x33, 0x90, 0xf8, 0x0b, 0x68, 0xa9, 0xb6, 0x63, 0xcb, 0xab, 0x10, 0xc9, 0x36, - 0x94, 0xfb, 0x27, 0xa0, 0xf9, 0xe1, 0x38, 0x59, 0x05, 0x2d, 0x2b, 0xd1, 0xf9, 0x7d, 0xbf, 0x6f, - 0xde, 0xbc, 0xf9, 0xcc, 0x1b, 0xe3, 0x5a, 0xb6, 0x18, 0x76, 0x17, 0x59, 0x9a, 0xa7, 0xc4, 0xce, - 0xa7, 0x31, 0x4b, 0xb9, 0x57, 0xcf, 0x57, 0x0b, 0xca, 0x95, 0xe8, 0xbd, 0x37, 0x49, 0xf2, 0xe9, - 0x72, 0xd0, 0x1d, 0xa6, 0xf3, 0xa7, 0x93, 0x74, 0x92, 0x3e, 0x95, 0xf2, 0x60, 0x39, 0x96, 0x91, - 0x0c, 0xe4, 0x97, 0x4e, 0x6f, 0x4e, 0xd2, 0x74, 0x32, 0xa3, 0x65, 0x56, 0xcc, 0x56, 0xca, 0x6a, - 0xff, 0x56, 0xc1, 0x8d, 0x33, 0x9a, 0x25, 0x94, 0x87, 0xf4, 0xbb, 0x25, 0xe5, 0x39, 0x69, 0x62, - 0x34, 0x4f, 0x58, 0x94, 0x27, 0x73, 0xea, 0x82, 0x16, 0xe8, 0x98, 0x61, 0x75, 0x9e, 0xb0, 0x97, - 0xc9, 0x9c, 0x4a, 0x2b, 0x3e, 0x57, 0x56, 0x45, 0x5b, 0xf1, 0xb9, 0xb4, 0x3e, 0x12, 0x56, 0x3e, - 0x9c, 0xd2, 0x8c, 0xbb, 0x66, 0xcb, 0xec, 0xd4, 0x4f, 0x4f, 0xba, 0xaa, 0xf3, 0xee, 0x17, 0xf1, - 0x80, 0xce, 0xbe, 0x54, 0x66, 0x0f, 0x5e, 0xfc, 0xf9, 0xc4, 0x08, 0xb7, 0xb9, 0xe4, 0x09, 0xae, - 0xf3, 0x57, 0xc9, 0x22, 0x1a, 0x4e, 0x97, 0xec, 0x15, 0x77, 0x51, 0x0b, 0x74, 0x50, 0x88, 0x85, - 0xf4, 0x5c, 0x2a, 0xe4, 0x1d, 0x6c, 0x4d, 0x13, 0x96, 0x73, 0xb7, 0xd6, 0x02, 0xb2, 0xaa, 0x3a, - 0x4b, 0xb7, 0x38, 0x4b, 0xf7, 0x19, 0x5b, 0x85, 0x2a, 0x85, 0x7c, 0x8a, 0x1f, 0xf3, 0x3c, 0xa3, - 0xf1, 0x3c, 0x61, 0x13, 0x5d, 0x31, 0x1a, 0x88, 0x9d, 0x22, 0x9e, 0xbc, 0xa6, 0xee, 0xa8, 0x05, - 0x3a, 0x30, 0x74, 0xb7, 0x29, 0x6a, 0x87, 0x9e, 0x48, 0x38, 0x4b, 0x5e, 0xd3, 0x00, 0x22, 0xe8, - 0x58, 0x01, 0x44, 0x96, 0x63, 0x07, 0x10, 0xd9, 0x4e, 0x35, 0x80, 0xa8, 0xea, 0xa0, 0x00, 0x22, - 0xec, 0xd4, 0x03, 0x88, 0xea, 0xce, 0xbd, 0x00, 0xa2, 0x7b, 0x4e, 0x23, 0x80, 0xa8, 0xe1, 0x1c, - 0xb5, 0x3f, 0xc6, 0xd6, 0x59, 0x1e, 0xe7, 0x9c, 0x74, 0xf1, 0xf1, 0x98, 0x8a, 0x03, 0x8d, 0xa2, - 0x84, 0x8d, 0xe8, 0x79, 0x34, 0x58, 0xe5, 0x94, 0x4b, 0x7a, 0x30, 0xbc, 0xaf, 0xad, 0xcf, 0x85, - 0xd3, 0x13, 0x46, 0xfb, 0x27, 0x13, 0x1f, 0x15, 0xd0, 0xf9, 0x22, 0x65, 0x9c, 0x92, 0x0e, 0xb6, - 0xb9, 0x54, 0xe4, 0xaa, 0xfa, 0xe9, 0x51, 0x41, 0x4f, 0xe5, 0xf5, 0x8d, 0x50, 0xfb, 0xc4, 0xc3, - 0xd5, 0x1f, 0xe2, 0x8c, 0x25, 0x6c, 0x22, 0xef, 0xa0, 0xd6, 0x37, 0xc2, 0x42, 0x20, 0xef, 0x16, - 0xb0, 0xcc, 0x7f, 0x87, 0xd5, 0x37, 0x0a, 0x5c, 0x6f, 0x63, 0x8b, 0x8b, 0xfe, 0x5d, 0x28, 0xb3, - 0x1b, 0xdb, 0x2d, 0x85, 0x28, 0xd2, 0xa4, 0x4b, 0x5e, 0x60, 0xa7, 0xa4, 0xaa, 0x9b, 0xb4, 0xe4, - 0x8a, 0x66, 0xb9, 0x42, 0xf8, 0xaa, 0x55, 0xc9, 0xb3, 0x6f, 0x84, 0x6f, 0x6d, 0x17, 0x29, 0x9d, - 0xbc, 0xc4, 0x8f, 0x6e, 0xd6, 0x29, 0xae, 0xdd, 0x96, 0xe5, 0xbc, 0x43, 0xe5, 0xd4, 0x25, 0xf5, - 0x8d, 0xf0, 0xc1, 0x8d, 0x7a, 0x7a, 0x3e, 0x3e, 0xc4, 0x8f, 0x12, 0x1e, 0x51, 0x36, 0x8a, 0xd2, - 0x71, 0x51, 0x55, 0x65, 0xba, 0x55, 0x39, 0x4c, 0xc7, 0x09, 0xff, 0x8c, 0x8d, 0xbe, 0x1e, 0xab, - 0x55, 0xaa, 0x74, 0x0f, 0x61, 0x3b, 0xa3, 0x7c, 0x39, 0xcb, 0xdb, 0xbf, 0x02, 0x7c, 0x5f, 0x4e, - 0xe8, 0x57, 0xf1, 0xbc, 0x7c, 0x04, 0x27, 0x12, 0x4d, 0x96, 0x4b, 0x90, 0x66, 0xa8, 0x02, 0xe2, - 0x60, 0x93, 0xb2, 0x91, 0xc4, 0x65, 0x86, 0xe2, 0xb3, 0x9c, 0x4e, 0xeb, 0xf6, 0xe9, 0xdc, 0x7d, - 0x22, 0xf6, 0x7f, 0x7f, 0x22, 0x01, 0x44, 0xc0, 0xa9, 0x04, 0x10, 0x55, 0x1c, 0xb3, 0x9d, 0x61, - 0xb2, 0xdb, 0xac, 0x1e, 0x9e, 0x13, 0x6c, 0x31, 0x21, 0xb8, 0xa0, 0x65, 0x76, 0x6a, 0xa1, 0x0a, - 0x88, 0x87, 0x91, 0x9e, 0x0b, 0xee, 0x56, 0xa4, 0xb1, 0x8d, 0xcb, 0xbe, 0xcd, 0x5b, 0xfb, 0x6e, - 0xff, 0x0e, 0xf4, 0xa6, 0xdf, 0xc4, 0xb3, 0xe5, 0x1e, 0xa2, 0x99, 0x50, 0xe5, 0xc0, 0xd6, 0x42, - 0x15, 0x94, 0xe0, 0xe0, 0x01, 0x70, 0xd6, 0x01, 0x70, 0xf6, 0xdd, 0xc0, 0x55, 0xef, 0x04, 0xae, - 0xe2, 0x98, 0x01, 0x44, 0xa6, 0x03, 0xdb, 0x4b, 0x7c, 0xbc, 0x77, 0x06, 0x4d, 0xee, 0x21, 0xb6, - 0xbf, 0x97, 0x8a, 0x46, 0xa7, 0xa3, 0xff, 0x8b, 0xdd, 0xe9, 0x1f, 0x40, 0xfc, 0x23, 0xd2, 0x8c, - 0x92, 0x4f, 0xb0, 0xad, 0xdf, 0xc1, 0x83, 0xfd, 0xa7, 0xad, 0x79, 0x7a, 0x0f, 0x6f, 0xca, 0xaa, - 0xc5, 0xf7, 0x01, 0x79, 0x8e, 0x71, 0x79, 0xe9, 0xa4, 0xb9, 0x77, 0xf6, 0xdd, 0xa9, 0xf5, 0xbc, - 0x43, 0x96, 0x3e, 0xe9, 0x0b, 0x5c, 0xdf, 0x01, 0x40, 0xf6, 0x53, 0xf7, 0x6e, 0xd6, 0x7b, 0x7c, - 0xd0, 0x53, 0x75, 0x7a, 0xcf, 0x2e, 0xd6, 0xbe, 0x71, 0xb9, 0xf6, 0x8d, 0x37, 0x6b, 0xdf, 0xb8, - 0x5e, 0xfb, 0xe0, 0xc7, 0x8d, 0x0f, 0x7e, 0xd9, 0xf8, 0xe0, 0x62, 0xe3, 0x83, 0xcb, 0x8d, 0x0f, - 0xfe, 0xda, 0xf8, 0xe0, 0xef, 0x8d, 0x6f, 0x5c, 0x6f, 0x7c, 0xf0, 0xf3, 0x95, 0x6f, 0x5c, 0x5e, - 0xf9, 0xc6, 0x9b, 0x2b, 0xdf, 0xf8, 0xb6, 0xca, 0x05, 0x88, 0xc5, 0x60, 0x60, 0x4b, 0x52, 0x1f, - 0xfc, 0x13, 0x00, 0x00, 0xff, 0xff, 0x5f, 0xa4, 0x20, 0xe0, 0xe6, 0x06, 0x00, 0x00, + // 784 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcf, 0x6f, 0xe2, 0x46, + 0x14, 0xc7, 0x3d, 0x78, 0x6c, 0x86, 0x21, 0xa4, 0xce, 0xe4, 0x47, 0x8d, 0x23, 0x39, 0x08, 0xa9, + 0x12, 0xaa, 0x5a, 0x52, 0xa5, 0x52, 0xab, 0x1e, 0x7a, 0x08, 0x91, 0x22, 0x6a, 0xb5, 0x3d, 0x38, + 0x51, 0x0f, 0xbd, 0x20, 0x03, 0x13, 0xb0, 0x02, 0x36, 0xf5, 0x98, 0x36, 0xe4, 0xd4, 0x3f, 0xa1, + 0x7f, 0x46, 0xa5, 0xfd, 0x0b, 0xf6, 0xba, 0xa7, 0xdc, 0x36, 0xd2, 0x5e, 0x72, 0x5a, 0x2d, 0xe4, + 0xb2, 0xc7, 0xfc, 0x09, 0xab, 0xf9, 0x01, 0x86, 0x88, 0x55, 0x36, 0xd2, 0xde, 0x3c, 0xdf, 0xef, + 0x9b, 0x37, 0xef, 0x7d, 0xde, 0x33, 0x2e, 0x24, 0xa3, 0x4e, 0x7d, 0x94, 0xc4, 0x69, 0x4c, 0xcc, + 0xb4, 0x1f, 0x44, 0x31, 0x73, 0x8a, 0xe9, 0x64, 0x44, 0x99, 0x14, 0x9d, 0x6f, 0x7b, 0x61, 0xda, + 0x1f, 0xb7, 0xeb, 0x9d, 0x78, 0x78, 0xd8, 0x8b, 0x7b, 0xf1, 0xa1, 0x90, 0xdb, 0xe3, 0x0b, 0x71, + 0x12, 0x07, 0xf1, 0xa5, 0xc2, 0xcb, 0xbd, 0x38, 0xee, 0x0d, 0x68, 0x16, 0x15, 0x44, 0x13, 0x69, + 0x55, 0x5f, 0xe6, 0x70, 0xe9, 0x8c, 0x26, 0x21, 0x65, 0x3e, 0xfd, 0x6b, 0x4c, 0x59, 0x4a, 0xca, + 0x18, 0x0d, 0xc3, 0xa8, 0x95, 0x86, 0x43, 0x6a, 0x83, 0x0a, 0xa8, 0xe9, 0x7e, 0x7e, 0x18, 0x46, + 0xe7, 0xe1, 0x90, 0x0a, 0x2b, 0xb8, 0x92, 0x56, 0x4e, 0x59, 0xc1, 0x95, 0xb0, 0x7e, 0xe0, 0x56, + 0xda, 0xe9, 0xd3, 0x84, 0xd9, 0x7a, 0x45, 0xaf, 0x15, 0x8f, 0x76, 0xea, 0xb2, 0xf2, 0xfa, 0xaf, + 0x41, 0x9b, 0x0e, 0x7e, 0x93, 0x66, 0x03, 0xde, 0xbc, 0x3d, 0xd0, 0xfc, 0x45, 0x2c, 0x39, 0xc0, + 0x45, 0x76, 0x19, 0x8e, 0x5a, 0x9d, 0xfe, 0x38, 0xba, 0x64, 0x36, 0xaa, 0x80, 0x1a, 0xf2, 0x31, + 0x97, 0x4e, 0x84, 0x42, 0xbe, 0xc6, 0x46, 0x3f, 0x8c, 0x52, 0x66, 0x17, 0x2a, 0x40, 0x64, 0x95, + 0xbd, 0xd4, 0xe7, 0xbd, 0xd4, 0x8f, 0xa3, 0x89, 0x2f, 0x43, 0xc8, 0xcf, 0x78, 0x9f, 0xa5, 0x09, + 0x0d, 0x86, 0x61, 0xd4, 0x53, 0x19, 0x5b, 0x6d, 0xfe, 0x52, 0x8b, 0x85, 0xd7, 0xd4, 0xee, 0x56, + 0x40, 0x0d, 0xfa, 0xf6, 0x22, 0x44, 0xbe, 0xd0, 0xe0, 0x01, 0x67, 0xe1, 0x35, 0xf5, 0x20, 0x82, + 0x96, 0xe1, 0x41, 0x64, 0x58, 0xa6, 0x07, 0x91, 0x69, 0xe5, 0x3d, 0x88, 0xf2, 0x16, 0xf2, 0x20, + 0xc2, 0x56, 0xd1, 0x83, 0xa8, 0x68, 0x6d, 0x78, 0x10, 0x6d, 0x58, 0x25, 0x0f, 0xa2, 0x92, 0xb5, + 0x59, 0xfd, 0x11, 0x1b, 0x67, 0x69, 0x90, 0x32, 0x52, 0xc7, 0xdb, 0x17, 0x94, 0x37, 0xd4, 0x6d, + 0x85, 0x51, 0x97, 0x5e, 0xb5, 0xda, 0x93, 0x94, 0x32, 0x41, 0x0f, 0xfa, 0x5b, 0xca, 0xfa, 0x85, + 0x3b, 0x0d, 0x6e, 0x54, 0xdf, 0xe4, 0xf0, 0xe6, 0x1c, 0x3a, 0x1b, 0xc5, 0x11, 0xa3, 0xa4, 0x86, + 0x4d, 0x26, 0x14, 0x71, 0xab, 0x78, 0xb4, 0x39, 0xa7, 0x27, 0xe3, 0x9a, 0x9a, 0xaf, 0x7c, 0xe2, + 0xe0, 0xfc, 0x3f, 0x41, 0x12, 0x85, 0x51, 0x4f, 0xcc, 0xa0, 0xd0, 0xd4, 0xfc, 0xb9, 0x40, 0xbe, + 0x99, 0xc3, 0xd2, 0x3f, 0x0e, 0xab, 0xa9, 0xcd, 0x71, 0x7d, 0x85, 0x0d, 0xc6, 0xeb, 0xb7, 0xa1, + 0x88, 0x2e, 0x2d, 0x9e, 0xe4, 0x22, 0x0f, 0x13, 0x2e, 0x39, 0xc5, 0x56, 0x46, 0x55, 0x15, 0x69, + 0x88, 0x1b, 0xe5, 0xec, 0x06, 0xf7, 0x65, 0xa9, 0x82, 0x67, 0x53, 0xf3, 0xbf, 0x58, 0x5c, 0x92, + 0x3a, 0x39, 0xc7, 0x5f, 0x3e, 0xce, 0x33, 0x1f, 0xbb, 0x29, 0xd2, 0x39, 0xeb, 0xd2, 0xc9, 0x21, + 0x35, 0x35, 0x7f, 0xf7, 0x51, 0x3e, 0x35, 0x3d, 0x84, 0xcd, 0x84, 0xb2, 0xf1, 0x20, 0xad, 0xbe, + 0x00, 0x78, 0x4b, 0xec, 0xda, 0xef, 0xc1, 0x30, 0x5b, 0xe7, 0x1d, 0xd1, 0x64, 0x92, 0x0a, 0x24, + 0xba, 0x2f, 0x0f, 0xc4, 0xc2, 0x3a, 0x8d, 0xba, 0xa2, 0x71, 0xdd, 0xe7, 0x9f, 0xd9, 0x9e, 0x19, + 0x4f, 0xef, 0xd9, 0xf2, 0xb2, 0x9b, 0x9f, 0xbe, 0xec, 0x1e, 0x44, 0xc0, 0xca, 0x79, 0x10, 0xe5, + 0x2c, 0xbd, 0x9a, 0x60, 0xb2, 0x5c, 0xac, 0x5a, 0x83, 0x1d, 0x6c, 0x44, 0x5c, 0xb0, 0x41, 0x45, + 0xaf, 0x15, 0x7c, 0x79, 0x20, 0x0e, 0x46, 0x6a, 0xc2, 0xcc, 0xce, 0x09, 0x63, 0x71, 0xce, 0xea, + 0xd6, 0x9f, 0xac, 0xbb, 0xfa, 0x0a, 0xa8, 0x47, 0xff, 0x08, 0x06, 0xe3, 0x15, 0x44, 0x03, 0xae, + 0x8a, 0xd5, 0x2b, 0xf8, 0xf2, 0x90, 0x81, 0x83, 0x6b, 0xc0, 0x19, 0x6b, 0xc0, 0x99, 0xcf, 0x03, + 0x97, 0x7f, 0x16, 0xb8, 0x9c, 0xa5, 0x7b, 0x10, 0xe9, 0x16, 0xac, 0x8e, 0xf1, 0xf6, 0x4a, 0x0f, + 0x8a, 0xdc, 0x1e, 0x36, 0xff, 0x16, 0x8a, 0x42, 0xa7, 0x4e, 0x9f, 0x8b, 0xdd, 0xd1, 0x6b, 0xc0, + 0xff, 0xf6, 0x38, 0xa1, 0xe4, 0x27, 0x6c, 0xaa, 0x8d, 0xde, 0x5d, 0xfd, 0x49, 0x15, 0x4f, 0x67, + 0xef, 0xb1, 0x2c, 0x4b, 0xfc, 0x0e, 0x90, 0x13, 0x8c, 0xb3, 0xa1, 0x93, 0xf2, 0x4a, 0xef, 0xcb, + 0x5b, 0xeb, 0x38, 0xeb, 0x2c, 0xd5, 0xe9, 0x29, 0x2e, 0x2e, 0x01, 0x20, 0xab, 0xa1, 0x2b, 0x93, + 0x75, 0xf6, 0xd7, 0x7a, 0x32, 0x4f, 0xe3, 0xf8, 0x66, 0xea, 0x6a, 0xb7, 0x53, 0x57, 0xbb, 0x9b, + 0xba, 0xda, 0xc3, 0xd4, 0x05, 0xff, 0xce, 0x5c, 0xf0, 0xff, 0xcc, 0x05, 0x37, 0x33, 0x17, 0xdc, + 0xce, 0x5c, 0xf0, 0x6e, 0xe6, 0x82, 0xf7, 0x33, 0x57, 0x7b, 0x98, 0xb9, 0xe0, 0xbf, 0x7b, 0x57, + 0xbb, 0xbd, 0x77, 0xb5, 0xbb, 0x7b, 0x57, 0xfb, 0x33, 0xcf, 0x38, 0x88, 0x51, 0xbb, 0x6d, 0x0a, + 0x52, 0xdf, 0x7f, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x72, 0x44, 0x83, 0xb0, 0x06, 0x00, 0x00, } func (this *SeriesRequest) Equal(that interface{}) bool { @@ -581,9 +578,6 @@ func (this *SeriesResponse) Equal(that interface{}) bool { } else if !this.Result.Equal(that1.Result) { return false } - if this.IsEndOfSeriesStream != that1.IsEndOfSeriesStream { - return false - } return true } func (this *SeriesResponse_Series) Equal(that interface{}) bool { @@ -926,12 +920,11 @@ func (this *SeriesResponse) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 11) + s := make([]string, 0, 10) s = append(s, "&storepb.SeriesResponse{") if this.Result != nil { s = append(s, "Result: "+fmt.Sprintf("%#v", this.Result)+",\n") } - s = append(s, "IsEndOfSeriesStream: "+fmt.Sprintf("%#v", this.IsEndOfSeriesStream)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -1389,16 +1382,6 @@ func (m *SeriesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l - if m.IsEndOfSeriesStream { - i-- - if m.IsEndOfSeriesStream { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } if m.Result != nil { { size := m.Result.Size() @@ -1818,9 +1801,6 @@ func (m *SeriesResponse) Size() (n int) { if m.Result != nil { n += m.Result.Size() } - if m.IsEndOfSeriesStream { - n += 2 - } return n } @@ -2040,7 +2020,6 @@ func (this *SeriesResponse) String() string { } s := strings.Join([]string{`&SeriesResponse{`, `Result:` + fmt.Sprintf("%v", this.Result) + `,`, - `IsEndOfSeriesStream:` + fmt.Sprintf("%v", this.IsEndOfSeriesStream) + `,`, `}`, }, "") return s @@ -2682,26 +2661,6 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { } m.Result = &SeriesResponse_StreamingSeriesChunks{v} iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IsEndOfSeriesStream", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRpc - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.IsEndOfSeriesStream = bool(v != 0) default: iNdEx = preIndex skippy, err := skipRpc(dAtA[iNdEx:]) diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index 4a28a836fbf..688f013c340 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -113,8 +113,6 @@ message SeriesResponse { StreamSeriesChunks streaming_series_chunks = 6; } - - bool is_end_of_series_stream = 7; } message LabelNamesRequest { diff --git a/pkg/storegateway/storepb/types.pb.go b/pkg/storegateway/storepb/types.pb.go index 4c2fc79aae7..93491a92a81 100644 --- a/pkg/storegateway/storepb/types.pb.go +++ b/pkg/storegateway/storepb/types.pb.go @@ -190,7 +190,8 @@ func (m *StreamingSeries) XXX_DiscardUnknown() { var xxx_messageInfo_StreamingSeries proto.InternalMessageInfo type StreamSeriesBatch struct { - Series []*StreamingSeries `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` + Series []*StreamingSeries `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` + IsEndOfSeriesStream bool `protobuf:"varint,2,opt,name=is_end_of_series_stream,json=isEndOfSeriesStream,proto3" json:"is_end_of_series_stream,omitempty"` } func (m *StreamSeriesBatch) Reset() { *m = StreamSeriesBatch{} } @@ -354,47 +355,49 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 638 bytes of a gzipped FileDescriptorProto + // 668 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xf6, 0x24, 0x8e, 0xe3, 0x4c, 0xda, 0x5b, 0x77, 0x5a, 0xdd, 0x9b, 0x76, 0x31, 0xcd, 0xf5, - 0x2a, 0xba, 0x52, 0x9d, 0x4b, 0x61, 0x83, 0xc4, 0xa6, 0x81, 0xa0, 0x12, 0xf1, 0xd3, 0xba, 0x45, - 0x42, 0x08, 0x29, 0x1a, 0x27, 0x13, 0x67, 0xd4, 0xf8, 0x47, 0xe3, 0x09, 0x24, 0xbb, 0x3e, 0x02, - 0xaf, 0xc0, 0x8e, 0x17, 0x41, 0xea, 0x8e, 0x2e, 0x2b, 0x16, 0x15, 0x71, 0x37, 0x2c, 0xfb, 0x08, - 0xc8, 0x33, 0x0e, 0xa4, 0x74, 0x53, 0x36, 0xac, 0x3c, 0xe7, 0x7c, 0xdf, 0x39, 0xdf, 0x37, 0x47, - 0x67, 0x0c, 0xab, 0x62, 0x1a, 0xd3, 0xc4, 0x89, 0x79, 0x24, 0x22, 0x64, 0x88, 0x21, 0x09, 0xa3, - 0x64, 0x73, 0xdb, 0x67, 0x62, 0x38, 0xf6, 0x9c, 0x5e, 0x14, 0x34, 0xfd, 0xc8, 0x8f, 0x9a, 0x12, - 0xf6, 0xc6, 0x03, 0x19, 0xc9, 0x40, 0x9e, 0x54, 0xd9, 0xe6, 0xff, 0x8b, 0x74, 0x4e, 0x06, 0x24, - 0x24, 0xcd, 0x80, 0x05, 0x8c, 0x37, 0xe3, 0x63, 0x5f, 0x9d, 0x62, 0x4f, 0x7d, 0x55, 0x85, 0xfd, - 0x19, 0xc0, 0xd2, 0xc3, 0xe1, 0x38, 0x3c, 0x46, 0xff, 0x41, 0x3d, 0x73, 0x50, 0x03, 0x75, 0xd0, - 0xf8, 0x6b, 0xe7, 0x6f, 0x47, 0x39, 0x70, 0x24, 0xe8, 0xb4, 0xc3, 0x5e, 0xd4, 0x67, 0xa1, 0xef, - 0x4a, 0x0e, 0xda, 0x87, 0x7a, 0x9f, 0x08, 0x52, 0x2b, 0xd4, 0x41, 0x63, 0xa9, 0xf5, 0xe0, 0xf4, - 0x62, 0x4b, 0xfb, 0x72, 0xb1, 0x75, 0xef, 0x36, 0xea, 0xce, 0xcb, 0x30, 0x21, 0x03, 0xda, 0x9a, - 0x0a, 0x7a, 0x38, 0x62, 0x3d, 0xea, 0xca, 0x4e, 0xf6, 0x1e, 0x34, 0xe7, 0x1a, 0x68, 0x19, 0x56, - 0xa4, 0x6a, 0xf7, 0xd5, 0x0b, 0xd7, 0xd2, 0xd0, 0x1a, 0x5c, 0x51, 0xe1, 0x1e, 0x4b, 0x44, 0xe4, - 0x73, 0x12, 0x58, 0x00, 0xd5, 0xe0, 0xba, 0x4a, 0x3e, 0x1e, 0x45, 0x44, 0xfc, 0x44, 0x0a, 0xf6, - 0x07, 0x00, 0x8d, 0x43, 0xca, 0x19, 0x4d, 0xd0, 0x00, 0x1a, 0x23, 0xe2, 0xd1, 0x51, 0x52, 0x03, - 0xf5, 0x62, 0xa3, 0xba, 0xb3, 0xe6, 0xf4, 0x22, 0x2e, 0xe8, 0x24, 0xf6, 0x9c, 0xa7, 0x59, 0x7e, - 0x9f, 0x30, 0xde, 0xba, 0x9f, 0xbb, 0xbf, 0x73, 0x2b, 0xf7, 0xb2, 0x6e, 0xb7, 0x4f, 0x62, 0x41, - 0xb9, 0x9b, 0x77, 0x47, 0x4d, 0x68, 0xf4, 0x32, 0x33, 0x49, 0xad, 0x20, 0x75, 0x56, 0xe7, 0xc3, - 0xdb, 0xf5, 0x7d, 0x2e, 0x6d, 0xb6, 0xf4, 0x4c, 0xc5, 0xcd, 0x69, 0xf6, 0x14, 0xae, 0x1c, 0x0a, - 0x4e, 0x49, 0xc0, 0x42, 0xff, 0xcf, 0x7a, 0xb5, 0x1f, 0xc1, 0x55, 0x25, 0xad, 0x74, 0x5b, 0x44, - 0xf4, 0x86, 0xd9, 0x05, 0x12, 0x19, 0xe6, 0xe2, 0xff, 0xcc, 0x2f, 0xf0, 0x8b, 0x4b, 0x37, 0xa7, - 0xd9, 0x43, 0x88, 0x16, 0xbb, 0xc8, 0x3b, 0x26, 0xe8, 0x5f, 0xb8, 0xa4, 0xf0, 0x2e, 0x0b, 0xfb, - 0x74, 0x22, 0x57, 0x49, 0x77, 0xab, 0x2a, 0xf7, 0x24, 0x4b, 0xfd, 0xfe, 0xa8, 0x4e, 0x00, 0xac, - 0xfc, 0xc0, 0xd0, 0x06, 0x34, 0x03, 0x16, 0x76, 0x05, 0x0b, 0xd4, 0xa2, 0x16, 0xdd, 0x72, 0xc0, - 0xc2, 0x23, 0x16, 0x50, 0x09, 0x91, 0x89, 0x82, 0x0a, 0x39, 0x44, 0x26, 0x12, 0xda, 0x82, 0x45, - 0x4e, 0xde, 0xd5, 0x8a, 0x75, 0xd0, 0xa8, 0xee, 0x2c, 0x5f, 0xdb, 0x6c, 0x37, 0x43, 0x3a, 0xba, - 0xa9, 0x5b, 0xa5, 0x8e, 0x6e, 0x96, 0x2c, 0xa3, 0xa3, 0x9b, 0x86, 0x55, 0xee, 0xe8, 0x66, 0xd9, - 0x32, 0x3b, 0xba, 0x69, 0x5a, 0x15, 0xfb, 0x13, 0x80, 0x4b, 0x72, 0x96, 0xcf, 0xb2, 0x61, 0x51, - 0x8e, 0xb6, 0xaf, 0x3d, 0x95, 0x8d, 0x79, 0xc3, 0x45, 0x8e, 0x73, 0x34, 0x8d, 0x69, 0xfe, 0x5a, - 0x10, 0xd4, 0x43, 0x92, 0xbb, 0xaa, 0xb8, 0xf2, 0x8c, 0xd6, 0x61, 0xe9, 0x2d, 0x19, 0x8d, 0xa9, - 0x34, 0x55, 0x71, 0x55, 0x60, 0xbf, 0x81, 0x7a, 0x56, 0x97, 0xad, 0xfc, 0x62, 0xb3, 0x6e, 0xfb, - 0xc0, 0xd2, 0xd0, 0x3a, 0xb4, 0xae, 0x25, 0x9f, 0xb7, 0x0f, 0x2c, 0x70, 0x83, 0xea, 0xb6, 0xad, - 0xc2, 0x4d, 0xaa, 0xdb, 0xb6, 0x8a, 0xad, 0xdd, 0xd3, 0x19, 0xd6, 0xce, 0x66, 0x58, 0x3b, 0x9f, - 0x61, 0xed, 0x6a, 0x86, 0xc1, 0x49, 0x8a, 0xc1, 0xc7, 0x14, 0x83, 0xd3, 0x14, 0x83, 0xb3, 0x14, - 0x83, 0xaf, 0x29, 0x06, 0xdf, 0x52, 0xac, 0x5d, 0xa5, 0x18, 0xbc, 0xbf, 0xc4, 0xda, 0xd9, 0x25, - 0xd6, 0xce, 0x2f, 0xb1, 0xf6, 0xba, 0x9c, 0x88, 0x88, 0xd3, 0xd8, 0xf3, 0x0c, 0xf9, 0xd7, 0xb8, - 0xfb, 0x3d, 0x00, 0x00, 0xff, 0xff, 0x18, 0xad, 0xe5, 0x96, 0xad, 0x04, 0x00, 0x00, + 0x14, 0xf6, 0x24, 0x4e, 0xe2, 0x4c, 0xda, 0x5b, 0x77, 0x5a, 0xdd, 0xa6, 0x5d, 0x4c, 0x73, 0xbd, + 0x8a, 0xae, 0x54, 0x07, 0x4a, 0x37, 0x48, 0x6c, 0x1a, 0x14, 0x54, 0x22, 0xa0, 0xed, 0xb4, 0x48, + 0x08, 0x21, 0x45, 0xe3, 0x64, 0xe2, 0x8c, 0x1a, 0xff, 0xc8, 0x9e, 0x40, 0xc2, 0xaa, 0x8f, 0xc0, + 0x2b, 0xb0, 0xe3, 0x45, 0x90, 0xba, 0xa3, 0xcb, 0x8a, 0x45, 0x45, 0xdc, 0x0d, 0xcb, 0x3e, 0x02, + 0xf2, 0x8c, 0x03, 0x29, 0xdd, 0x94, 0x0d, 0xab, 0xcc, 0x39, 0xdf, 0x77, 0xce, 0xf7, 0xcd, 0xc9, + 0x1c, 0xc3, 0x8a, 0x98, 0x84, 0x2c, 0xb6, 0xc3, 0x28, 0x10, 0x01, 0x2a, 0x8a, 0x01, 0xf5, 0x83, + 0x78, 0x63, 0xcb, 0xe5, 0x62, 0x30, 0x72, 0xec, 0x6e, 0xe0, 0x35, 0xdc, 0xc0, 0x0d, 0x1a, 0x12, + 0x76, 0x46, 0x7d, 0x19, 0xc9, 0x40, 0x9e, 0x54, 0xd9, 0xc6, 0xbd, 0x79, 0x7a, 0x44, 0xfb, 0xd4, + 0xa7, 0x0d, 0x8f, 0x7b, 0x3c, 0x6a, 0x84, 0x27, 0xae, 0x3a, 0x85, 0x8e, 0xfa, 0x55, 0x15, 0xd6, + 0x17, 0x00, 0x0b, 0x8f, 0x07, 0x23, 0xff, 0x04, 0xfd, 0x0f, 0xf5, 0xd4, 0x41, 0x15, 0xd4, 0x40, + 0xfd, 0x9f, 0xed, 0x7f, 0x6d, 0xe5, 0xc0, 0x96, 0xa0, 0xdd, 0xf2, 0xbb, 0x41, 0x8f, 0xfb, 0x2e, + 0x91, 0x1c, 0x74, 0x00, 0xf5, 0x1e, 0x15, 0xb4, 0x9a, 0xab, 0x81, 0xfa, 0x42, 0xf3, 0xd1, 0xd9, + 0xe5, 0xa6, 0xf6, 0xf5, 0x72, 0x73, 0xe7, 0x2e, 0xea, 0xf6, 0x4b, 0x3f, 0xa6, 0x7d, 0xd6, 0x9c, + 0x08, 0x76, 0x34, 0xe4, 0x5d, 0x46, 0x64, 0x27, 0x6b, 0x0f, 0x1a, 0x33, 0x0d, 0xb4, 0x08, 0xcb, + 0x52, 0xb5, 0xf3, 0x6a, 0x9f, 0x98, 0x1a, 0x5a, 0x81, 0x4b, 0x2a, 0xdc, 0xe3, 0xb1, 0x08, 0xdc, + 0x88, 0x7a, 0x26, 0x40, 0x55, 0xb8, 0xaa, 0x92, 0x4f, 0x86, 0x01, 0x15, 0xbf, 0x90, 0x9c, 0xf5, + 0x11, 0xc0, 0xe2, 0x11, 0x8b, 0x38, 0x8b, 0x51, 0x1f, 0x16, 0x87, 0xd4, 0x61, 0xc3, 0xb8, 0x0a, + 0x6a, 0xf9, 0x7a, 0x65, 0x7b, 0xc5, 0xee, 0x06, 0x91, 0x60, 0xe3, 0xd0, 0xb1, 0x9f, 0xa5, 0xf9, + 0x03, 0xca, 0xa3, 0xe6, 0xc3, 0xcc, 0xfd, 0xfd, 0x3b, 0xb9, 0x97, 0x75, 0xbb, 0x3d, 0x1a, 0x0a, + 0x16, 0x91, 0xac, 0x3b, 0x6a, 0xc0, 0x62, 0x37, 0x35, 0x13, 0x57, 0x73, 0x52, 0x67, 0x79, 0x36, + 0xbc, 0x5d, 0xd7, 0x8d, 0xa4, 0xcd, 0xa6, 0x9e, 0xaa, 0x90, 0x8c, 0x66, 0x4d, 0xe0, 0xd2, 0x91, + 0x88, 0x18, 0xf5, 0xb8, 0xef, 0xfe, 0x5d, 0xaf, 0xd6, 0x7b, 0xb8, 0xac, 0xa4, 0x95, 0x6e, 0x93, + 0x8a, 0xee, 0x20, 0xbd, 0x40, 0x2c, 0xc3, 0x4c, 0x7c, 0x6d, 0x76, 0x81, 0xdf, 0x5c, 0x92, 0x8c, + 0x86, 0x76, 0xe0, 0x1a, 0x8f, 0x3b, 0xcc, 0xef, 0x75, 0x82, 0x7e, 0x47, 0xe5, 0x3a, 0xb1, 0xe4, + 0xca, 0x37, 0x61, 0x90, 0x15, 0x1e, 0xb7, 0xfc, 0xde, 0x7e, 0x5f, 0xd5, 0xa9, 0x36, 0xd6, 0x00, + 0xa2, 0x79, 0x6d, 0x39, 0x99, 0x18, 0xfd, 0x07, 0x17, 0xb2, 0x0e, 0xdc, 0xef, 0xb1, 0xb1, 0x7c, + 0x80, 0x3a, 0xa9, 0xa8, 0xdc, 0xd3, 0x34, 0xf5, 0xe7, 0x03, 0x3e, 0x05, 0xb0, 0xfc, 0x13, 0x43, + 0xeb, 0xd0, 0xf0, 0xb8, 0xdf, 0x11, 0xdc, 0x53, 0xcf, 0x3b, 0x4f, 0x4a, 0x1e, 0xf7, 0x8f, 0xb9, + 0xc7, 0x24, 0x44, 0xc7, 0x0a, 0xca, 0x65, 0x10, 0x1d, 0x4b, 0x68, 0x13, 0xe6, 0x23, 0xfa, 0xae, + 0x9a, 0xaf, 0x81, 0x7a, 0x65, 0x7b, 0xf1, 0xc6, 0x3e, 0x90, 0x14, 0x69, 0xeb, 0x86, 0x6e, 0x16, + 0xda, 0xba, 0x51, 0x30, 0x8b, 0x6d, 0xdd, 0x28, 0x9a, 0xa5, 0xb6, 0x6e, 0x94, 0x4c, 0xa3, 0xad, + 0x1b, 0x86, 0x59, 0xb6, 0x3e, 0x03, 0xb8, 0x20, 0xff, 0x81, 0xe7, 0xe9, 0x88, 0x59, 0x84, 0xb6, + 0x6e, 0x2c, 0xd8, 0xfa, 0xac, 0xe1, 0x3c, 0xc7, 0x3e, 0x9e, 0x84, 0x2c, 0xdb, 0x31, 0x04, 0x75, + 0x9f, 0x66, 0xae, 0xca, 0x44, 0x9e, 0xd1, 0x2a, 0x2c, 0xbc, 0xa5, 0xc3, 0x11, 0x93, 0xa6, 0xca, + 0x44, 0x05, 0xd6, 0x1b, 0xa8, 0xa7, 0x75, 0xe9, 0xa2, 0xcc, 0x37, 0xeb, 0xb4, 0x0e, 0x4d, 0x0d, + 0xad, 0x42, 0xf3, 0x46, 0xf2, 0x45, 0xeb, 0xd0, 0x04, 0xb7, 0xa8, 0xa4, 0x65, 0xe6, 0x6e, 0x53, + 0x49, 0xcb, 0xcc, 0x37, 0x77, 0xcf, 0xa6, 0x58, 0x3b, 0x9f, 0x62, 0xed, 0x62, 0x8a, 0xb5, 0xeb, + 0x29, 0x06, 0xa7, 0x09, 0x06, 0x9f, 0x12, 0x0c, 0xce, 0x12, 0x0c, 0xce, 0x13, 0x0c, 0xbe, 0x25, + 0x18, 0x7c, 0x4f, 0xb0, 0x76, 0x9d, 0x60, 0xf0, 0xe1, 0x0a, 0x6b, 0xe7, 0x57, 0x58, 0xbb, 0xb8, + 0xc2, 0xda, 0xeb, 0x52, 0x2c, 0x82, 0x88, 0x85, 0x8e, 0x53, 0x94, 0xdf, 0x9a, 0x07, 0x3f, 0x02, + 0x00, 0x00, 0xff, 0xff, 0xb3, 0x55, 0x72, 0x7c, 0xe3, 0x04, 0x00, 0x00, } func (x Chunk_Encoding) String() string { @@ -531,6 +534,9 @@ func (this *StreamSeriesBatch) Equal(that interface{}) bool { return false } } + if this.IsEndOfSeriesStream != that1.IsEndOfSeriesStream { + return false + } return true } func (this *StreamSeriesChunks) Equal(that interface{}) bool { @@ -667,11 +673,12 @@ func (this *StreamSeriesBatch) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&storepb.StreamSeriesBatch{") if this.Series != nil { s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") } + s = append(s, "IsEndOfSeriesStream: "+fmt.Sprintf("%#v", this.IsEndOfSeriesStream)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -872,6 +879,16 @@ func (m *StreamSeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.IsEndOfSeriesStream { + i-- + if m.IsEndOfSeriesStream { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } if len(m.Series) > 0 { for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- { { @@ -1091,6 +1108,9 @@ func (m *StreamSeriesBatch) Size() (n int) { n += 1 + l + sovTypes(uint64(l)) } } + if m.IsEndOfSeriesStream { + n += 2 + } return n } @@ -1205,6 +1225,7 @@ func (this *StreamSeriesBatch) String() string { repeatedStringForSeries += "}" s := strings.Join([]string{`&StreamSeriesBatch{`, `Series:` + repeatedStringForSeries + `,`, + `IsEndOfSeriesStream:` + fmt.Sprintf("%v", this.IsEndOfSeriesStream) + `,`, `}`, }, "") return s @@ -1633,6 +1654,26 @@ func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IsEndOfSeriesStream", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.IsEndOfSeriesStream = bool(v != 0) default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index f04259cec40..055a0d0e999 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -42,7 +42,8 @@ message StreamingSeries { } message StreamSeriesBatch { - repeated StreamingSeries series = 1; // Only labels are populated here. + repeated StreamingSeries series = 1; + bool is_end_of_series_stream = 2; } message StreamSeriesChunks { From c321c2e268c5920b6d1bfd958aaa760b37c1a583 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 8 Jun 2023 19:43:25 +0530 Subject: [PATCH 08/75] Remove the default streaming Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index de73842234f..f52774e02d0 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -708,7 +708,6 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return errors.Wrapf(err, "failed to create series request") } - req.StreamingChunksBatchSize = 10 stream, err := c.Series(gCtx, req) if err != nil { if shouldStopQueryFunc(err) { From 32d4bce2b787d52ff6a3a811131b87c556da0960 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Fri, 9 Jun 2023 16:01:11 +0530 Subject: [PATCH 09/75] Extend testBucketStore_e2e to test streaming Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_e2e_test.go | 27 +++--- pkg/storegateway/bucket_store_server_test.go | 88 +++++++++++++++++++- 2 files changed, 100 insertions(+), 15 deletions(-) diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 55a14b09c84..35416241c7e 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -417,19 +417,22 @@ func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite) { }, }, } { - if ok := t.Run(fmt.Sprint(i), func(t *testing.T) { - seriesSet, _, _, err := srv.Series(context.Background(), tcase.req) - require.NoError(t, err) - - assert.Equal(t, len(tcase.expected), len(seriesSet)) - - for i, s := range seriesSet { - assert.Equal(t, tcase.expected[i], s.Labels) - assert.Equal(t, tcase.expectedChunkLen, len(s.Chunks)) + for _, streamingBatchSize := range []uint64{0, 1, 10} { + if ok := t.Run(fmt.Sprintf("%d,streamingBatchSize=%d", i, streamingBatchSize), func(t *testing.T) { + tcase.req.StreamingChunksBatchSize = streamingBatchSize + seriesSet, _, _, err := srv.Series(context.Background(), tcase.req) + require.NoError(t, err) + + assert.Equal(t, len(tcase.expected), len(seriesSet)) + + for i, s := range seriesSet { + assert.Equal(t, tcase.expected[i], s.Labels) + assert.Equal(t, tcase.expectedChunkLen, len(s.Chunks)) + } + assertQueryStatsMetricsRecorded(t, len(tcase.expected), tcase.expectedChunkLen, s.metricsRegistry) + }); !ok { + return } - assertQueryStatsMetricsRecorded(t, len(tcase.expected), tcase.expectedChunkLen, s.metricsRegistry) - }); !ok { - return } } } diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index f0bb5545ff5..b40025495a3 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -95,9 +95,10 @@ func newStoreGatewayTestServer(t testing.TB, store storegatewaypb.StoreGatewaySe // via the gRPC stream. func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest) (seriesSet []*storepb.Series, warnings storage.Warnings, hints hintspb.SeriesResponseHints, err error) { var ( - conn *grpc.ClientConn - stream storepb.Store_SeriesClient - res *storepb.SeriesResponse + conn *grpc.ClientConn + stream storepb.Store_SeriesClient + res *storepb.SeriesResponse + streamingSeriesSet []*storepb.StreamSeriesBatch ) // Create a gRPC connection to the server. @@ -143,6 +144,10 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } if recvSeries := res.GetSeries(); recvSeries != nil { + if !req.SkipChunks && req.StreamingChunksBatchSize > 0 { + err = errors.New("got a normal series when streaming was enabled") + return + } var recvSeriesData []byte // We use a pool for the chunks and may use other pools in the future. @@ -163,6 +168,83 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest seriesSet = append(seriesSet, copiedSeries) } + + if recvSeries := res.GetStreamingSeries(); recvSeries != nil { + if req.StreamingChunksBatchSize == 0 || req.SkipChunks { + err = errors.New("got a streaming series when streaming was disabled") + return + } + + var recvSeriesData []byte + + // We prefer to stay on the safest side at this stage + // so we do a marshal+unmarshal to copy the whole series. + recvSeriesData, err = recvSeries.Marshal() + if err != nil { + err = errors.Wrap(err, "marshal received series") + return + } + + copiedSeries := &storepb.StreamSeriesBatch{} + if err = copiedSeries.Unmarshal(recvSeriesData); err != nil { + err = errors.Wrap(err, "unmarshal received series") + return + } + + streamingSeriesSet = append(streamingSeriesSet, copiedSeries) + + if recvSeries.IsEndOfSeriesStream { + break + } + } + } + if req.StreamingChunksBatchSize > 0 { + // Get the streaming chunks. + idx := -1 + for _, batch := range streamingSeriesSet { + for _, s := range batch.Series { + idx++ + // We don't expect EOF errors here. + res, err = stream.Recv() + if err != nil { + return + } + + chks := res.GetStreamingSeriesChunks() + if chks == nil { + continue + } + if chks.SeriesIndex != uint64(idx) { + err = errors.Errorf("mismatch in series ref when getting streaming chunks, exp %d, got %d", idx, chks.SeriesIndex) + return + } + + // We prefer to stay on the safest side at this stage + // so we do a marshal+unmarshal to copy the whole chunks. + var data []byte + data, err = chks.Marshal() + if err != nil { + err = errors.Wrap(err, "marshal received series") + return + } + + copiedChunks := &storepb.StreamSeriesChunks{} + if err = copiedChunks.Unmarshal(data); err != nil { + err = errors.Wrap(err, "unmarshal received series") + return + } + + seriesSet = append(seriesSet, &storepb.Series{ + Labels: s.Labels, + Chunks: copiedChunks.Chunks, + }) + } + } + + res, err = stream.Recv() + if errors.Is(err, io.EOF) { + err = nil + } } return From 3054be1f8d0e127176cde89087949965c66615fe Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Fri, 9 Jun 2023 19:22:43 +0530 Subject: [PATCH 10/75] Fix the case of wrong series in the 1st pass of streaming Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 35 ++++--- pkg/storegateway/bucket_chunk_reader_test.go | 2 +- pkg/storegateway/bucket_e2e_test.go | 96 +++++++++++++++++--- pkg/storegateway/bucket_index_reader.go | 4 +- pkg/storegateway/bucket_store_server_test.go | 13 ++- pkg/storegateway/bucket_test.go | 2 +- pkg/storegateway/series_refs.go | 36 ++++---- pkg/storegateway/series_refs_test.go | 10 +- 8 files changed, 149 insertions(+), 49 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index b56291a7905..9abc66b480b 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -532,6 +532,9 @@ type seriesChunks struct { // Series implements the storepb.StoreServer interface. func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) (err error) { + if req.SkipChunks { + req.StreamingChunksBatchSize = 0 + } defer func() { if err == nil { return @@ -832,7 +835,6 @@ func (s *BucketStore) streamingSeriesSetForBlocks( g, _ = errgroup.WithContext(ctx) begin = time.Now() ) - for i, b := range blocks { b := b i := i @@ -864,6 +866,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( shardSelector, cachedSeriesHasher{blockSeriesHashCache}, req.SkipChunks, + req.StreamingChunksBatchSize > 0, req.MinTime, req.MaxTime, s.numChunksRangesPerSeries, stats, @@ -1165,7 +1168,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] matchers, nil, cachedSeriesHasher{nil}, - true, + true, false, minTime, maxTime, 1, // we skip chunks, so this doesn't make any difference stats, @@ -1385,7 +1388,7 @@ func labelValuesFromSeries(ctx context.Context, labelName string, seriesPerBatch b.meta, nil, nil, - true, + true, false, b.meta.MinTime, b.meta.MaxTime, b.userID, @@ -1742,7 +1745,7 @@ type symbolizedLabel struct { // decodeSeries decodes a series entry from the given byte slice decoding all chunk metas of the series. // If skipChunks is specified decodeSeries does not return any chunks, but only labels and only if at least single chunk is within time range. // decodeSeries returns false, when there are no series data for given time range. -func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]chunks.Meta, skipChunks bool) (ok bool, lset []symbolizedLabel, err error) { +func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]chunks.Meta, resMint, resMaxt int64, skipChunks, streamingSeries bool) (ok bool, lset []symbolizedLabel, err error) { *chks = (*chks)[:0] @@ -1777,16 +1780,24 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch // Found a chunk. if skipChunks { - // We are not interested in chunks and we know there is at least one, that's enough to return series. - return true, lset, nil + if streamingSeries { + // We are not interested in chunks, but we want the series to overlap with the query mint-maxt. + if maxt >= resMint && mint <= resMaxt { + // Chunk overlaps. + return true, lset, nil + } + } else { + // We are not interested in chunks and we know there is at least one, that's enough to return series. + return true, lset, nil + } + } else { + *chks = append(*chks, chunks.Meta{ + Ref: chunks.ChunkRef(ref), + MinTime: mint, + MaxTime: maxt, + }) } - *chks = append(*chks, chunks.Meta{ - Ref: chunks.ChunkRef(ref), - MinTime: mint, - MaxTime: maxt, - }) - mint = maxt } return len(*chks) > 0, lset, d.Err() diff --git a/pkg/storegateway/bucket_chunk_reader_test.go b/pkg/storegateway/bucket_chunk_reader_test.go index 034f4c78e12..023d1f8a6d0 100644 --- a/pkg/storegateway/bucket_chunk_reader_test.go +++ b/pkg/storegateway/bucket_chunk_reader_test.go @@ -38,7 +38,7 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "j", "foo")}, nil, nil, - false, + false, false, block.meta.MinTime, block.meta.MaxTime, 2, diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 35416241c7e..11b2f7675ba 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -67,7 +67,7 @@ type storeSuite struct { } func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt objstore.Bucket, - series []labels.Labels, extLset labels.Labels) (minTime, maxTime int64) { + series []labels.Labels, extLset labels.Labels, differentBlockTimes bool) (minTime, maxTime int64) { ctx := context.Background() logger := log.NewNopLogger() @@ -85,6 +85,13 @@ func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt o // gets created each. This way we can easily verify we got 10 chunks per series below. id1, err := block.CreateBlock(ctx, dir, series[:4], 10, mint, maxt, extLset) assert.NoError(t, err) + if differentBlockTimes { + // This shifts the 2nd block ahead by 2hrs. This way the first and the + // last blocks created have no overlapping blocks. + mint = maxt + maxt = timestamp.FromTime(now.Add(2 * time.Hour)) + maxTime = maxt + } id2, err := block.CreateBlock(ctx, dir, series[4:], 10, mint, maxt, extLset) assert.NoError(t, err) @@ -117,6 +124,7 @@ type prepareStoreConfig struct { chunksCache chunkscache.Cache metricsRegistry *prometheus.Registry postingsStrategy postingsSelectionStrategy + differentBlockTime bool } func (c *prepareStoreConfig) apply(opts ...prepareStoreConfigOption) *prepareStoreConfig { @@ -164,7 +172,7 @@ func withManyParts() prepareStoreConfigOption { func prepareStoreWithTestBlocks(t testing.TB, bkt objstore.Bucket, cfg *prepareStoreConfig) *storeSuite { extLset := labels.FromStrings("ext1", "value1") - minTime, maxTime := prepareTestBlocks(t, time.Now(), 3, cfg.tempDir, bkt, cfg.series, extLset) + minTime, maxTime := prepareTestBlocks(t, time.Now(), 3, cfg.tempDir, bkt, cfg.series, extLset, cfg.differentBlockTime) s := &storeSuite{ logger: log.NewNopLogger(), @@ -218,10 +226,16 @@ func prepareStoreWithTestBlocks(t testing.TB, bkt objstore.Bucket, cfg *prepareS return s } +type testBucketStoreCase struct { + req *storepb.SeriesRequest + expected [][]mimirpb.LabelAdapter + expectedChunkLen int +} + // TODO(bwplotka): Benchmark Series. // //nolint:revive -func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite) { +func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite, additionalCases ...testBucketStoreCase) { t.Helper() mint, maxt := s.store.TimeRange() @@ -239,11 +253,7 @@ func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite) { srv := newBucketStoreTestServer(t, s.store) // TODO(bwplotka): Add those test cases to TSDB querier_test.go as well, there are no tests for matching. - for i, tcase := range []struct { - req *storepb.SeriesRequest - expected [][]mimirpb.LabelAdapter - expectedChunkLen int - }{ + testCases := []testBucketStoreCase{ { req: &storepb.SeriesRequest{ Matchers: []storepb.LabelMatcher{ @@ -416,10 +426,11 @@ func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite) { {{Name: "a", Value: "1"}, {Name: "c", Value: "2"}}, }, }, - } { - for _, streamingBatchSize := range []uint64{0, 1, 10} { + } + for i, tcase := range append(testCases, additionalCases...) { + for _, streamingBatchSize := range []int{0, 1, 2, 10} { if ok := t.Run(fmt.Sprintf("%d,streamingBatchSize=%d", i, streamingBatchSize), func(t *testing.T) { - tcase.req.StreamingChunksBatchSize = streamingBatchSize + tcase.req.StreamingChunksBatchSize = uint64(streamingBatchSize) seriesSet, _, _, err := srv.Series(context.Background(), tcase.req) require.NoError(t, err) @@ -509,6 +520,69 @@ func TestBucketStore_e2e(t *testing.T) { }) } +func TestBucketStore_e2e_StreamingEdgeCases(t *testing.T) { + foreachStore(t, func(t *testing.T, newSuite suiteFactory) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + s := newSuite(func(config *prepareStoreConfig) { + config.differentBlockTime = true + }) + + _, maxt := s.store.TimeRange() + additionalCases := []testBucketStoreCase{ + { // This tests if the first phase of streaming that sends only the series is filtering the series by chunk time range. + // The request time range overlaps with 2 blocks with 4 timeseries each, but only the 2nd block + // has some overlapping data that should be returned. + req: &storepb.SeriesRequest{ + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "a", Value: "1|2"}, + }, + MinTime: maxt - 121*int64(time.Minute/time.Millisecond), + MaxTime: maxt, + }, + expectedChunkLen: 1, + expected: [][]mimirpb.LabelAdapter{ + {{Name: "a", Value: "1"}, {Name: "c", Value: "1"}}, + {{Name: "a", Value: "1"}, {Name: "c", Value: "2"}}, + {{Name: "a", Value: "2"}, {Name: "c", Value: "1"}}, + {{Name: "a", Value: "2"}, {Name: "c", Value: "2"}}, + }, + }, + } + + if ok := t.Run("no caches", func(t *testing.T) { + s.cache.SwapIndexCacheWith(noopCache{}) + s.cache.SwapChunksCacheWith(chunkscache.NoopCache{}) + testBucketStore_e2e(t, ctx, s, additionalCases...) + }); !ok { + return + } + + if ok := t.Run("with large, sufficient index cache", func(t *testing.T) { + indexCache, err := indexcache.NewInMemoryIndexCacheWithConfig(s.logger, nil, indexcache.InMemoryIndexCacheConfig{ + MaxItemSize: 1e5, + MaxSize: 2e5, + }) + assert.NoError(t, err) + s.cache.SwapIndexCacheWith(indexCache) + testBucketStore_e2e(t, ctx, s, additionalCases...) + }); !ok { + return + } + + t.Run("with small index cache", func(t *testing.T) { + indexCache2, err := indexcache.NewInMemoryIndexCacheWithConfig(s.logger, nil, indexcache.InMemoryIndexCacheConfig{ + MaxItemSize: 50, + MaxSize: 100, + }) + assert.NoError(t, err) + s.cache.SwapIndexCacheWith(indexCache2) + testBucketStore_e2e(t, ctx, s, additionalCases...) + }) + }) +} + type naivePartitioner struct{} func (g naivePartitioner) Partition(length int, rng func(int) (uint64, uint64)) (parts []Part) { diff --git a/pkg/storegateway/bucket_index_reader.go b/pkg/storegateway/bucket_index_reader.go index a2a0b107a25..970f6c463d4 100644 --- a/pkg/storegateway/bucket_index_reader.go +++ b/pkg/storegateway/bucket_index_reader.go @@ -755,12 +755,12 @@ func (l *bucketIndexLoadedSeries) addSeries(ref storage.SeriesRef, data []byte) // Error is returned on decoding error or if the reference does not resolve to a known series. // // It's NOT safe to call this function concurrently with addSeries(). -func (l *bucketIndexLoadedSeries) unsafeLoadSeries(ref storage.SeriesRef, chks *[]chunks.Meta, skipChunks bool, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) (ok bool, _ []symbolizedLabel, err error) { +func (l *bucketIndexLoadedSeries) unsafeLoadSeries(ref storage.SeriesRef, chks *[]chunks.Meta, mint, maxt int64, skipChunks, streamingSeries bool, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) (ok bool, _ []symbolizedLabel, err error) { b, ok := l.series[ref] if !ok { return false, nil, errors.Errorf("series %d not found", ref) } stats.seriesProcessed++ stats.seriesProcessedSizeSum += len(b) - return decodeSeries(b, lsetPool, chks, skipChunks) + return decodeSeries(b, lsetPool, chks, mint, maxt, skipChunks, streamingSeries) } diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index b40025495a3..aa1a725873d 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -198,7 +198,8 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } } } - if req.StreamingChunksBatchSize > 0 { + + if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { // Get the streaming chunks. idx := -1 for _, batch := range streamingSeriesSet { @@ -212,7 +213,8 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest chks := res.GetStreamingSeriesChunks() if chks == nil { - continue + err = errors.Errorf("expected streaming chunks, got something else") + return } if chks.SeriesIndex != uint64(idx) { err = errors.Errorf("mismatch in series ref when getting streaming chunks, exp %d, got %d", idx, chks.SeriesIndex) @@ -242,6 +244,13 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } res, err = stream.Recv() + for err != nil { + if res.GetHints() == nil && res.GetStats() == nil { + err = errors.Errorf("got unexpected response type") + break + } + res, err = stream.Recv() + } if errors.Is(err, io.EOF) { err = nil } diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 9a4584076c1..d20100af3ef 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1164,7 +1164,7 @@ func loadSeries(ctx context.Context, tb test.TB, postings []storage.SeriesRef, i indexr.block.meta, nil, nil, - true, + true, false, 0, 0, "", diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index b70564a0694..12248462d96 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -712,19 +712,19 @@ func (l *limitingSeriesChunkRefsSetIterator) Err() error { } type loadingSeriesChunkRefsSetIterator struct { - ctx context.Context - postingsSetIterator *postingsSetsIterator - indexr *bucketIndexReader - indexCache indexcache.IndexCache - stats *safeQueryStats - blockID ulid.ULID - shard *sharding.ShardSelector - seriesHasher seriesHasher - skipChunks bool - minTime, maxTime int64 - tenantID string - chunkRangesPerSeries int - logger log.Logger + ctx context.Context + postingsSetIterator *postingsSetsIterator + indexr *bucketIndexReader + indexCache indexcache.IndexCache + stats *safeQueryStats + blockID ulid.ULID + shard *sharding.ShardSelector + seriesHasher seriesHasher + skipChunks, streamingSeries bool + minTime, maxTime int64 + tenantID string + chunkRangesPerSeries int + logger log.Logger chunkMetasBuffer []chunks.Meta @@ -743,6 +743,7 @@ func openBlockSeriesChunkRefsSetsIterator( shard *sharding.ShardSelector, // Shard selector. seriesHasher seriesHasher, skipChunks bool, // If true chunks are not loaded and minTime/maxTime are ignored. + streamingSeries bool, // If true, along with skipChunks=true, the series returned overlap with query mint and maxt. minTime, maxTime int64, // Series must have data in this time range to be returned (ignored if skipChunks=true). chunkRangesPerSeries int, stats *safeQueryStats, @@ -774,6 +775,7 @@ func openBlockSeriesChunkRefsSetsIterator( shard, seriesHasher, skipChunks, + streamingSeries, minTime, maxTime, tenantID, @@ -806,13 +808,14 @@ func newLoadingSeriesChunkRefsSetIterator( shard *sharding.ShardSelector, seriesHasher seriesHasher, skipChunks bool, + streamingSeries bool, minTime int64, maxTime int64, tenantID string, chunkRangesPerSeries int, logger log.Logger, ) *loadingSeriesChunkRefsSetIterator { - if skipChunks { + if skipChunks && !streamingSeries { minTime, maxTime = blockMeta.MinTime, blockMeta.MaxTime } @@ -826,6 +829,7 @@ func newLoadingSeriesChunkRefsSetIterator( shard: shard, seriesHasher: seriesHasher, skipChunks: skipChunks, + streamingSeries: streamingSeries, minTime: minTime, maxTime: maxTime, tenantID: tenantID, @@ -847,7 +851,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Next() bool { nextPostings := s.postingsSetIterator.At() var cachedSeriesID cachedSeriesForPostingsID - if s.skipChunks { + if s.skipChunks && !s.streamingSeries { var err error // Calculate the cache ID before we filter out anything from the postings, // so that the key doesn't depend on the series hash cache or any other filtering we do on the postings list. @@ -1118,7 +1122,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Err() error { // loadSeries returns a for chunks. It is not safe to use the returned []chunks.Meta after calling loadSeries again func (s *loadingSeriesChunkRefsSetIterator) loadSeries(ref storage.SeriesRef, loadedSeries *bucketIndexLoadedSeries, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) ([]symbolizedLabel, []chunks.Meta, error) { - ok, lbls, err := loadedSeries.unsafeLoadSeries(ref, &s.chunkMetasBuffer, s.skipChunks, stats, lsetPool) + ok, lbls, err := loadedSeries.unsafeLoadSeries(ref, &s.chunkMetasBuffer, s.minTime, s.maxTime, s.skipChunks, s.streamingSeries, stats, lsetPool) if !ok || err != nil { return nil, nil, errors.Wrap(err, "loadSeries") } diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index fc3654dd6c3..5a9e48bd026 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1340,7 +1340,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { block.meta, testCase.shard, hasher, - testCase.skipChunks, + testCase.skipChunks, false, testCase.minT, testCase.maxT, "t1", @@ -1672,7 +1672,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { []*labels.Matcher{testCase.matcher}, nil, cachedSeriesHasher{hashCache}, - testCase.skipChunks, + testCase.skipChunks, false, minT, maxT, 2, @@ -1775,6 +1775,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { nil, cachedSeriesHasher{hashCache}, true, // skip chunks since we are testing labels filtering + false, block.meta.MinTime, block.meta.MaxTime, 2, @@ -1839,6 +1840,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { nil, cachedSeriesHasher{hashCache}, false, // we don't skip chunks, so we can measure impact in loading chunk refs too + false, block.meta.MinTime, block.meta.MaxTime, 2, @@ -2387,7 +2389,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { testCase.matchers, testCase.shard, seriesHasher, - true, + true, false, b.meta.MinTime, b.meta.MaxTime, 1, @@ -2419,7 +2421,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { testCase.matchers, testCase.shard, seriesHasher, - true, + true, false, b.meta.MinTime, b.meta.MaxTime, 1, From 42c4e0857aabcdd742be65911fa6cdb2f4fab2a2 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 13:17:42 +0530 Subject: [PATCH 11/75] Use the streaming config for storegateway Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 84 ++++++++++++---------- pkg/querier/blocks_store_queryable_test.go | 2 +- 2 files changed, 48 insertions(+), 38 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index f52774e02d0..674a69f8b2e 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -143,13 +143,14 @@ func newBlocksStoreQueryableMetrics(reg prometheus.Registerer) *blocksStoreQuery type BlocksStoreQueryable struct { services.Service - stores BlocksStoreSet - finder BlocksFinder - consistency *BlocksConsistencyChecker - logger log.Logger - queryStoreAfter time.Duration - metrics *blocksStoreQueryableMetrics - limits BlocksStoreLimits + stores BlocksStoreSet + finder BlocksFinder + consistency *BlocksConsistencyChecker + logger log.Logger + queryStoreAfter time.Duration + metrics *blocksStoreQueryableMetrics + limits BlocksStoreLimits + streamingChunksBatchSize uint64 // Subservices manager. subservices *services.Manager @@ -162,6 +163,7 @@ func NewBlocksStoreQueryable( consistency *BlocksConsistencyChecker, limits BlocksStoreLimits, queryStoreAfter time.Duration, + streamingChunksBatchSize uint64, logger log.Logger, reg prometheus.Registerer, ) (*BlocksStoreQueryable, error) { @@ -171,15 +173,16 @@ func NewBlocksStoreQueryable( } q := &BlocksStoreQueryable{ - stores: stores, - finder: finder, - consistency: consistency, - queryStoreAfter: queryStoreAfter, - logger: logger, - subservices: manager, - subservicesWatcher: services.NewFailureWatcher(), - metrics: newBlocksStoreQueryableMetrics(reg), - limits: limits, + stores: stores, + finder: finder, + consistency: consistency, + queryStoreAfter: queryStoreAfter, + logger: logger, + subservices: manager, + subservicesWatcher: services.NewFailureWatcher(), + metrics: newBlocksStoreQueryableMetrics(reg), + limits: limits, + streamingChunksBatchSize: streamingChunksBatchSize, } q.Service = services.NewBasicService(q.starting, q.running, q.stopping) @@ -261,7 +264,12 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa reg, ) - return NewBlocksStoreQueryable(stores, finder, consistency, limits, querierCfg.QueryStoreAfter, logger, reg) + streamingBufferSize := querierCfg.StreamingChunksPerIngesterSeriesBufferSize + if !querierCfg.PreferStreamingChunks { + streamingBufferSize = 0 + } + + return NewBlocksStoreQueryable(stores, finder, consistency, limits, querierCfg.QueryStoreAfter, streamingBufferSize, logger, reg) } func (q *BlocksStoreQueryable) starting(ctx context.Context) error { @@ -301,30 +309,32 @@ func (q *BlocksStoreQueryable) Querier(ctx context.Context, mint, maxt int64) (s } return &blocksStoreQuerier{ - ctx: ctx, - minT: mint, - maxT: maxt, - userID: userID, - finder: q.finder, - stores: q.stores, - metrics: q.metrics, - limits: q.limits, - consistency: q.consistency, - logger: q.logger, - queryStoreAfter: q.queryStoreAfter, + ctx: ctx, + minT: mint, + maxT: maxt, + userID: userID, + finder: q.finder, + stores: q.stores, + metrics: q.metrics, + limits: q.limits, + streamingChunksBatchSize: q.streamingChunksBatchSize, + consistency: q.consistency, + logger: q.logger, + queryStoreAfter: q.queryStoreAfter, }, nil } type blocksStoreQuerier struct { - ctx context.Context - minT, maxT int64 - userID string - finder BlocksFinder - stores BlocksStoreSet - metrics *blocksStoreQueryableMetrics - consistency *BlocksConsistencyChecker - limits BlocksStoreLimits - logger log.Logger + ctx context.Context + minT, maxT int64 + userID string + finder BlocksFinder + stores BlocksStoreSet + metrics *blocksStoreQueryableMetrics + consistency *BlocksConsistencyChecker + limits BlocksStoreLimits + streamingChunksBatchSize uint64 + logger log.Logger // If set, the querier manipulates the max time to not be greater than // "now - queryStoreAfter" so that most recent blocks are not queried. diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index da599741a5e..e337520f462 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -1731,7 +1731,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { // Instantiate the querier that will be executed to run the query. logger := log.NewNopLogger() - queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, logger, nil) + queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, 0, logger, nil) require.NoError(t, err) require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck From 52d9c04c08832216372d827b42e6fd519e3be3c8 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 14:58:44 +0530 Subject: [PATCH 12/75] Updated unit tests for storegateway Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_e2e_test.go | 12 +-- pkg/storegateway/series_refs_test.go | 142 ++++++++++++++++++--------- 2 files changed, 100 insertions(+), 54 deletions(-) diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 11b2f7675ba..6ba65aa95f2 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -67,7 +67,7 @@ type storeSuite struct { } func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt objstore.Bucket, - series []labels.Labels, extLset labels.Labels, differentBlockTimes bool) (minTime, maxTime int64) { + series []labels.Labels, extLset labels.Labels, shiftedBlocks bool) (minTime, maxTime int64) { ctx := context.Background() logger := log.NewNopLogger() @@ -85,7 +85,7 @@ func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt o // gets created each. This way we can easily verify we got 10 chunks per series below. id1, err := block.CreateBlock(ctx, dir, series[:4], 10, mint, maxt, extLset) assert.NoError(t, err) - if differentBlockTimes { + if shiftedBlocks { // This shifts the 2nd block ahead by 2hrs. This way the first and the // last blocks created have no overlapping blocks. mint = maxt @@ -124,7 +124,7 @@ type prepareStoreConfig struct { chunksCache chunkscache.Cache metricsRegistry *prometheus.Registry postingsStrategy postingsSelectionStrategy - differentBlockTime bool + shiftedBlocks bool } func (c *prepareStoreConfig) apply(opts ...prepareStoreConfigOption) *prepareStoreConfig { @@ -172,7 +172,7 @@ func withManyParts() prepareStoreConfigOption { func prepareStoreWithTestBlocks(t testing.TB, bkt objstore.Bucket, cfg *prepareStoreConfig) *storeSuite { extLset := labels.FromStrings("ext1", "value1") - minTime, maxTime := prepareTestBlocks(t, time.Now(), 3, cfg.tempDir, bkt, cfg.series, extLset, cfg.differentBlockTime) + minTime, maxTime := prepareTestBlocks(t, time.Now(), 3, cfg.tempDir, bkt, cfg.series, extLset, cfg.shiftedBlocks) s := &storeSuite{ logger: log.NewNopLogger(), @@ -428,7 +428,7 @@ func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite, addit }, } for i, tcase := range append(testCases, additionalCases...) { - for _, streamingBatchSize := range []int{0, 1, 2, 10} { + for _, streamingBatchSize := range []int{0, 1, 2, 10, 256} { if ok := t.Run(fmt.Sprintf("%d,streamingBatchSize=%d", i, streamingBatchSize), func(t *testing.T) { tcase.req.StreamingChunksBatchSize = uint64(streamingBatchSize) seriesSet, _, _, err := srv.Series(context.Background(), tcase.req) @@ -526,7 +526,7 @@ func TestBucketStore_e2e_StreamingEdgeCases(t *testing.T) { defer cancel() s := newSuite(func(config *prepareStoreConfig) { - config.differentBlockTime = true + config.shiftedBlocks = true }) _, maxt := s.store.TimeRange() diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 5a9e48bd026..696e389188d 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1116,17 +1116,19 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { assert.NoError(t, appender.Commit()) }) - testCases := map[string]struct { - blockFactory func() *bucketBlock // if nil, defaultTestBlockFactory is used - shard *sharding.ShardSelector - matchers []*labels.Matcher - seriesHasher seriesHasher - skipChunks bool - minT, maxT int64 - batchSize int + type testCase struct { + blockFactory func() *bucketBlock // if nil, defaultTestBlockFactory is used + shard *sharding.ShardSelector + matchers []*labels.Matcher + seriesHasher seriesHasher + skipChunks, streamingSeries bool + minT, maxT int64 + batchSize int expectedSets []seriesChunkRefsSet - }{ + } + + testCases := map[string]testCase{ "loads one batch": { minT: 0, maxT: 10000, @@ -1307,51 +1309,95 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { return sets }(), }, + "skip chunks with streaming on 1": { + minT: 0, + maxT: 25, + batchSize: 100, + skipChunks: true, + streamingSeries: true, // mint and maxt is considered. + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, + expectedSets: []seriesChunkRefsSet{ + {series: []seriesChunkRefs{ + {lset: labels.FromStrings("l1", "v1")}, + {lset: labels.FromStrings("l1", "v2")}, + }}, + }, + }, + "skip chunks with streaming on 2": { + minT: 15, + maxT: 35, + batchSize: 100, + skipChunks: true, + streamingSeries: true, // mint and maxt is considered. + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, + expectedSets: []seriesChunkRefsSet{ + {series: []seriesChunkRefs{ + {lset: labels.FromStrings("l1", "v2")}, + {lset: labels.FromStrings("l1", "v3")}, + }}, + }, + }, + } + + runTest := func(tc testCase) { + // Setup + blockFactory := defaultTestBlockFactory + if tc.blockFactory != nil { + blockFactory = tc.blockFactory + } + block := blockFactory() + indexr := block.indexReader(selectAllStrategy{}) + postings, _, err := indexr.ExpandedPostings(context.Background(), tc.matchers, newSafeQueryStats()) + require.NoError(t, err) + postingsIterator := newPostingsSetsIterator( + postings, + tc.batchSize, + ) + hasher := tc.seriesHasher + if hasher == nil { + hasher = cachedSeriesHasher{hashcache.NewSeriesHashCache(100).GetBlockCache("")} + } + loadingIterator := newLoadingSeriesChunkRefsSetIterator( + context.Background(), + postingsIterator, + indexr, + noopCache{}, + newSafeQueryStats(), + block.meta, + tc.shard, + hasher, + tc.skipChunks, + tc.streamingSeries, + tc.minT, + tc.maxT, + "t1", + 1, + log.NewNopLogger(), + ) + + // Tests + sets := readAllSeriesChunkRefsSet(loadingIterator) + assert.NoError(t, loadingIterator.Err()) + assertSeriesChunkRefsSetsEqual(t, block.meta.ULID, tc.expectedSets, sets) } for testName, testCase := range testCases { - testName, testCase := testName, testCase + testName, tc := testName, testCase t.Run(testName, func(t *testing.T) { t.Parallel() - - // Setup - blockFactory := defaultTestBlockFactory - if testCase.blockFactory != nil { - blockFactory = testCase.blockFactory - } - block := blockFactory() - indexr := block.indexReader(selectAllStrategy{}) - postings, _, err := indexr.ExpandedPostings(context.Background(), testCase.matchers, newSafeQueryStats()) - require.NoError(t, err) - postingsIterator := newPostingsSetsIterator( - postings, - testCase.batchSize, - ) - hasher := testCase.seriesHasher - if hasher == nil { - hasher = cachedSeriesHasher{hashcache.NewSeriesHashCache(100).GetBlockCache("")} + if tc.skipChunks { + runTest(tc) + } else { + // We test with both streaming on and off when we are fetching chunks. + for _, streaming := range []bool{true, false} { + tcCopy := tc + t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { + t.Parallel() + tcCopy.streamingSeries = streaming + runTest(tcCopy) + }) + } } - loadingIterator := newLoadingSeriesChunkRefsSetIterator( - context.Background(), - postingsIterator, - indexr, - noopCache{}, - newSafeQueryStats(), - block.meta, - testCase.shard, - hasher, - testCase.skipChunks, false, - testCase.minT, - testCase.maxT, - "t1", - 1, - log.NewNopLogger(), - ) - - // Tests - sets := readAllSeriesChunkRefsSet(loadingIterator) - assert.NoError(t, loadingIterator.Err()) - assertSeriesChunkRefsSetsEqual(t, block.meta.ULID, testCase.expectedSets, sets) }) } } From 254b4e207ba3ca8900cfd6810fe92b7e122bf26d Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 18:50:42 +0530 Subject: [PATCH 13/75] Send hints and stats back in right order. Unit tests in queriers. Signed-off-by: Ganesh Vernekar --- .../blocks_store_querable_streaming.go | 22 +- pkg/querier/blocks_store_queryable_test.go | 219 +++++++++++++----- pkg/storegateway/bucket.go | 48 ++-- 3 files changed, 203 insertions(+), 86 deletions(-) diff --git a/pkg/querier/blocks_store_querable_streaming.go b/pkg/querier/blocks_store_querable_streaming.go index da675b53fc7..889598f382d 100644 --- a/pkg/querier/blocks_store_querable_streaming.go +++ b/pkg/querier/blocks_store_querable_streaming.go @@ -3,20 +3,23 @@ package querier import ( - "errors" "fmt" "io" "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/pkg/errors" "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storegateway/storegatewaypb" "github.com/grafana/mimir/pkg/storegateway/storepb" "github.com/grafana/mimir/pkg/util/limiter" + "github.com/grafana/mimir/pkg/util/validation" ) +// StreamingSeries holds the labels of the streaming series and the source to get the chunks +// for the series. type StreamingSeries struct { Labels []mimirpb.LabelAdapter Source StreamingSeriesSource @@ -29,7 +32,7 @@ type StreamingSeriesSource struct { SeriesIndex uint64 } -// SeriesChunksStreamReader is responsible for managing the streaming of chunks from an ingester and buffering +// SeriesChunksStreamReader is responsible for managing the streaming of chunks from a storegateway and buffering // chunks in memory until they are consumed by the PromQL engine. type SeriesChunksStreamReader struct { client storegatewaypb.StoreGateway_SeriesClient @@ -56,11 +59,11 @@ func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient // This method should only be called if StartBuffering is not called. func (s *SeriesChunksStreamReader) Close() { if err := s.client.CloseSend(); err != nil { - level.Warn(s.log).Log("msg", "closing ingester client stream failed", "err", err) + level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) } } -// StartBuffering begins streaming series' chunks from the store gateway associated with +// StartBuffering begins streaming series' chunks from the storegateway associated with // this SeriesChunksStreamReader. Once all series have been consumed with GetChunks, all resources // associated with this SeriesChunksStreamReader are cleaned up. // If an error occurs while streaming, a subsequent call to GetChunks will return an error. @@ -75,7 +78,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { go func() { defer func() { if err := s.client.CloseSend(); err != nil { - level.Warn(s.log).Log("msg", "closing ingester client stream failed", "err", err) + level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) } close(s.seriesCunksChan) @@ -111,7 +114,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { } if err := s.queryLimiter.AddChunks(len(c.Chunks)); err != nil { - s.errorChan <- err + s.errorChan <- validation.LimitError(err.Error()) return } @@ -120,7 +123,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { chunkBytes += ch.Size() } if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { - s.errorChan <- err + s.errorChan <- validation.LimitError(err.Error()) return } @@ -155,7 +158,10 @@ func (s *SeriesChunksStreamReader) GetChunks(seriesIndex uint64) ([]storepb.Aggr select { case err, haveError := <-s.errorChan: if haveError { - return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has failed: %w", seriesIndex, err) + if _, ok := err.(validation.LimitError); ok { + return nil, err + } + return nil, errors.Wrapf(err, "attempted to read series at index %v from stream, but the stream has failed", seriesIndex) } default: } diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index e337520f462..ee3cd3804e9 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -32,6 +32,7 @@ import ( "github.com/weaveworks/common/user" "golang.org/x/exp/slices" "google.golang.org/grpc" + "google.golang.org/grpc/metadata" "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/storage/sharding" @@ -763,77 +764,133 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - ctx := limiter.AddQueryLimiterToContext(context.Background(), testData.queryLimiter) - reg := prometheus.NewPedanticRegistry() - stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} - finder := &blocksFinderMock{} - finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) - - q := &blocksStoreQuerier{ - ctx: ctx, - minT: minT, - maxT: maxT, - userID: "user-1", - finder: finder, - stores: stores, - consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), - logger: log.NewNopLogger(), - metrics: newBlocksStoreQueryableMetrics(reg), - limits: testData.limits, - } + for _, streaming := range []bool{false, true} { + t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { + ctx := limiter.AddQueryLimiterToContext(context.Background(), testData.queryLimiter) + reg := prometheus.NewPedanticRegistry() + + if streaming { + // Convert the storegateway response to streaming response. + for _, res := range testData.storeSetResponses { + m, ok := res.(map[BlocksStoreClient][]ulid.ULID) + if ok { + for k, _ := range m { + mockClient := k.(*storeGatewayClientMock) + var seriesResponses []*storepb.Series + var newResponses []*storepb.SeriesResponse + for i, mr := range mockClient.mockedSeriesResponses { + s := mr.GetSeries() + if s != nil { + seriesResponses = append(seriesResponses, s) + continue + } + for _, s := range seriesResponses { + newResponses = append(newResponses, mockStreamingSeriesBatchResponse(false, s.Labels)) + } + newResponses = append(newResponses, mockClient.mockedSeriesResponses[i:]...) + newResponses = append(newResponses, mockStreamingSeriesBatchResponse(true)) + for idx, s := range seriesResponses { + newResponses = append(newResponses, mockStreamingSeriesChunksResponse(uint64(idx), s.Chunks)) + } + break + } + mockClient.mockedSeriesResponses = newResponses + } + } + } + } - matchers := []*labels.Matcher{ - labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, metricName), - } - if testData.queryShardID != "" { - matchers = append(matchers, labels.MustNewMatcher(labels.MatchEqual, sharding.ShardLabel, testData.queryShardID)) - } + stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} + finder := &blocksFinderMock{} + finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) + + q := &blocksStoreQuerier{ + ctx: ctx, + minT: minT, + maxT: maxT, + userID: "user-1", + finder: finder, + stores: stores, + consistency: NewBlocksConsistencyChecker(0, 0, log.NewNopLogger(), nil), + logger: log.NewNopLogger(), + metrics: newBlocksStoreQueryableMetrics(reg), + limits: testData.limits, + } - sp := &storage.SelectHints{Start: minT, End: maxT} - set := q.Select(true, sp, matchers...) - if testData.expectedErr != nil { - assert.ErrorContains(t, set.Err(), testData.expectedErr.Error()) - assert.IsType(t, set.Err(), testData.expectedErr) - assert.False(t, set.Next()) - assert.Nil(t, set.Warnings()) - return - } + matchers := []*labels.Matcher{ + labels.MustNewMatcher(labels.MatchEqual, labels.MetricName, metricName), + } + if testData.queryShardID != "" { + matchers = append(matchers, labels.MustNewMatcher(labels.MatchEqual, sharding.ShardLabel, testData.queryShardID)) + } - require.NoError(t, set.Err()) - assert.Len(t, set.Warnings(), 0) - - // Read all returned series and their values. - var actualSeries []seriesResult - var it chunkenc.Iterator - for set.Next() { - var actualValues []valueResult - - it = set.At().Iterator(it) - for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() { - assert.Equal(t, valType, chunkenc.ValFloat) - t, v := it.At() - actualValues = append(actualValues, valueResult{ - t: t, - v: v, - }) - } + sp := &storage.SelectHints{Start: minT, End: maxT} + set := q.Select(true, sp, matchers...) + if testData.expectedErr != nil { + if streaming && set.Err() == nil { + // In case of streaming, the error can happen during iteration. + foundErr := false + for set.Next() { + it := set.At().Iterator(nil) + for it.Next() != chunkenc.ValNone { + } + err := it.Err() + if err != nil { + assert.ErrorContains(t, err, testData.expectedErr.Error()) + // TODO: it is non-trivial to match the type here. The error + // gets wrapping multiple times. Is it necessary to return the exact type? + //assert.IsType(t, testData.expectedErr, err) + foundErr = true + break + } + } + assert.True(t, foundErr) + } else { + assert.ErrorContains(t, set.Err(), testData.expectedErr.Error()) + assert.IsType(t, set.Err(), testData.expectedErr) + assert.False(t, set.Next()) + assert.Nil(t, set.Warnings()) + } + return + } - require.NoError(t, it.Err()) + require.NoError(t, set.Err()) + assert.Len(t, set.Warnings(), 0) + + // Read all returned series and their values. + var actualSeries []seriesResult + var it chunkenc.Iterator + for set.Next() { + var actualValues []valueResult + + it = set.At().Iterator(it) + for valType := it.Next(); valType != chunkenc.ValNone; valType = it.Next() { + assert.Equal(t, valType, chunkenc.ValFloat) + t, v := it.At() + actualValues = append(actualValues, valueResult{ + t: t, + v: v, + }) + } + + require.NoError(t, it.Err()) + + actualSeries = append(actualSeries, seriesResult{ + lbls: set.At().Labels(), + values: actualValues, + }) + } + require.NoError(t, set.Err()) + assert.Equal(t, testData.expectedSeries, actualSeries) - actualSeries = append(actualSeries, seriesResult{ - lbls: set.At().Labels(), - values: actualValues, + // Assert on metrics (optional, only for test cases defining it). + if testData.expectedMetrics != "" { + assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(testData.expectedMetrics), + "cortex_querier_storegateway_instances_hit_per_query", "cortex_querier_storegateway_refetches_per_query", + "cortex_querier_blocks_found_total", "cortex_querier_blocks_queried_total", "cortex_querier_blocks_with_compactor_shard_but_incompatible_query_shard_total")) + } }) } - require.NoError(t, set.Err()) - assert.Equal(t, testData.expectedSeries, actualSeries) - - // Assert on metrics (optional, only for test cases defining it). - if testData.expectedMetrics != "" { - assert.NoError(t, testutil.GatherAndCompare(reg, strings.NewReader(testData.expectedMetrics), - "cortex_querier_storegateway_instances_hit_per_query", "cortex_querier_storegateway_refetches_per_query", - "cortex_querier_blocks_found_total", "cortex_querier_blocks_queried_total", "cortex_querier_blocks_with_compactor_shard_but_incompatible_query_shard_total")) - } }) } } @@ -1905,6 +1962,7 @@ type storeGatewayClientMock struct { func (m *storeGatewayClientMock) Series(context.Context, *storepb.SeriesRequest, ...grpc.CallOption) (storegatewaypb.StoreGateway_SeriesClient, error) { seriesClient := &storeGatewaySeriesClientMock{ + ClientStream: grpcClientStreamMock{}, mockedResponses: m.mockedSeriesResponses, } @@ -1942,6 +2000,15 @@ func (m *storeGatewaySeriesClientMock) Recv() (*storepb.SeriesResponse, error) { return res, nil } +type grpcClientStreamMock struct{} + +func (grpcClientStreamMock) Header() (metadata.MD, error) { return nil, nil } +func (grpcClientStreamMock) Trailer() metadata.MD { return nil } +func (grpcClientStreamMock) CloseSend() error { return nil } +func (grpcClientStreamMock) Context() context.Context { return context.Background() } +func (grpcClientStreamMock) SendMsg(m interface{}) error { return nil } +func (grpcClientStreamMock) RecvMsg(m interface{}) error { return nil } + type cancelerStoreGatewaySeriesClientMock struct { storeGatewaySeriesClientMock ctx context.Context @@ -2034,6 +2101,30 @@ func mockSeriesResponseWithChunks(lbls labels.Labels, chunks ...storepb.AggrChun } } +func mockStreamingSeriesBatchResponse(endOfStream bool, lbls ...[]mimirpb.LabelAdapter) *storepb.SeriesResponse { + res := &storepb.StreamSeriesBatch{} + for _, l := range lbls { + res.Series = append(res.Series, &storepb.StreamingSeries{Labels: l}) + } + res.IsEndOfSeriesStream = endOfStream + return &storepb.SeriesResponse{ + Result: &storepb.SeriesResponse_StreamingSeries{ + StreamingSeries: res, + }, + } +} + +func mockStreamingSeriesChunksResponse(index uint64, chks []storepb.AggrChunk) *storepb.SeriesResponse { + return &storepb.SeriesResponse{ + Result: &storepb.SeriesResponse_StreamingSeriesChunks{ + StreamingSeriesChunks: &storepb.StreamSeriesChunks{ + SeriesIndex: index, + Chunks: chks, + }, + }, + } +} + func mockStatsResponse(fetchedIndexBytes int) *storepb.SeriesResponse { return &storepb.SeriesResponse{ Result: &storepb.SeriesResponse_Stats{ diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 9abc66b480b..e18d6fcfc2a 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -629,7 +629,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) // TODO: what to do with hints here? - seriesSet, _, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err } @@ -642,6 +642,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie seriesBatch := &storepb.StreamSeriesBatch{ Series: seriesBuffer[:0], } + // TODO: can we send this in parallel while we start fetching the chunks below? for seriesSet.Next() { var lset labels.Labels // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle @@ -670,6 +671,22 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } } + // We need to send hints and stats before sending the chunks. + // Also, these need to be sent before we send IsEndOfSeriesStream=true. + var anyHints *types.Any + if anyHints, err = types.MarshalAny(resHints); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) + } + + if err := srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) + } + + unsafeStats := stats.export() + if err := srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) + } + // Send any remaining series and signal that there are no more series. msg := &grpc.PreparedMsg{} seriesBatch.IsEndOfSeriesStream = true @@ -787,21 +804,24 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return } - var anyHints *types.Any - if anyHints, err = types.MarshalAny(resHints); err != nil { - err = status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) - return - } + if req.StreamingChunksBatchSize == 0 || req.SkipChunks { + // Hints and stats were not sent before, so send it now. + var anyHints *types.Any + if anyHints, err = types.MarshalAny(resHints); err != nil { + err = status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) + return + } - if err = srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { - err = status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) - return - } + if err = srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { + err = status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) + return + } - unsafeStats := stats.export() - if err = srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { - err = status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) - return + unsafeStats := stats.export() + if err = srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { + err = status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) + return + } } return err From 0ca67b94aa4d2a4af45fdb090526222ef923572f Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 19:23:17 +0530 Subject: [PATCH 14/75] Use streaming in TestBlocksStoreQuerier_PromQLExecution Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable_test.go | 144 +++++++++++---------- 1 file changed, 79 insertions(+), 65 deletions(-) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index ee3cd3804e9..ebfda2e7987 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -764,6 +764,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { + // Non-streaming should be tested first because in the streaming case, + // the below code changes the testData in-place. for _, streaming := range []bool{false, true} { t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { ctx := limiter.AddQueryLimiterToContext(context.Background(), testData.queryLimiter) @@ -776,25 +778,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { if ok { for k, _ := range m { mockClient := k.(*storeGatewayClientMock) - var seriesResponses []*storepb.Series - var newResponses []*storepb.SeriesResponse - for i, mr := range mockClient.mockedSeriesResponses { - s := mr.GetSeries() - if s != nil { - seriesResponses = append(seriesResponses, s) - continue - } - for _, s := range seriesResponses { - newResponses = append(newResponses, mockStreamingSeriesBatchResponse(false, s.Labels)) - } - newResponses = append(newResponses, mockClient.mockedSeriesResponses[i:]...) - newResponses = append(newResponses, mockStreamingSeriesBatchResponse(true)) - for idx, s := range seriesResponses { - newResponses = append(newResponses, mockStreamingSeriesChunksResponse(uint64(idx), s.Chunks)) - } - break - } - mockClient.mockedSeriesResponses = newResponses + mockClient.mockedSeriesResponses = generateStreamingResponses(mockClient.mockedSeriesResponses) } } } @@ -895,6 +879,26 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { } } +func generateStreamingResponses(seriesResponses []*storepb.SeriesResponse) []*storepb.SeriesResponse { + var series, chunks, others []*storepb.SeriesResponse + for i, mr := range seriesResponses { + s := mr.GetSeries() + if s != nil { + series = append(series, mockStreamingSeriesBatchResponse(false, s.Labels)) + chunks = append(chunks, mockStreamingSeriesChunksResponse(uint64(len(series)-1), s.Chunks)) + continue + } + others = seriesResponses[i:] + break + } + + seriesResponses = append(seriesResponses[:0], series...) + seriesResponses = append(seriesResponses, others...) + seriesResponses = append(seriesResponses, mockStreamingSeriesBatchResponse(true)) + seriesResponses = append(seriesResponses, chunks...) + return seriesResponses +} + func TestBlocksStoreQuerier_Select_cancelledContext(t *testing.T) { const ( metricName = "test_metric" @@ -1760,57 +1764,67 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - block1 := ulid.MustNew(1, nil) - block2 := ulid.MustNew(2, nil) - - // Mock the finder to simulate we need to query two blocks. - finder := &blocksFinderMock{ - Service: services.NewIdleService(nil, nil), - } - finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ - {ID: block1}, - {ID: block2}, - }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) - - // Mock the store-gateway response, to simulate the case each block is queried from a different gateway. - gateway1 := &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: append(testData.storeGateway1Responses, mockHintsResponse(block1))} - gateway2 := &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: append(testData.storeGateway2Responses, mockHintsResponse(block2))} - - stores := &blocksStoreSetMock{ - Service: services.NewIdleService(nil, nil), - mockedResponses: []interface{}{ - map[BlocksStoreClient][]ulid.ULID{ - gateway1: {block1}, - gateway2: {block2}, - }, - }, - } + // Non-streaming should be tested first because in the streaming case, + // the below code changes the testData in-place. + for _, streaming := range []bool{false, true} { + t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { + block1 := ulid.MustNew(1, nil) + block2 := ulid.MustNew(2, nil) - // Instantiate the querier that will be executed to run the query. - logger := log.NewNopLogger() - queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, 0, logger, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) - defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck + // Mock the finder to simulate we need to query two blocks. + finder := &blocksFinderMock{ + Service: services.NewIdleService(nil, nil), + } + finder.On("GetBlocks", mock.Anything, "user-1", mock.Anything, mock.Anything).Return(bucketindex.Blocks{ + {ID: block1}, + {ID: block2}, + }, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), error(nil)) + + // Mock the store-gateway response, to simulate the case each block is queried from a different gateway. + gateway1 := &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: append(testData.storeGateway1Responses, mockHintsResponse(block1))} + gateway2 := &storeGatewayClientMock{remoteAddr: "2.2.2.2", mockedSeriesResponses: append(testData.storeGateway2Responses, mockHintsResponse(block2))} + if streaming { + gateway1.mockedSeriesResponses = generateStreamingResponses(gateway1.mockedSeriesResponses) + gateway2.mockedSeriesResponses = generateStreamingResponses(gateway2.mockedSeriesResponses) + } - engine := promql.NewEngine(promql.EngineOpts{ - Logger: logger, - Timeout: 10 * time.Second, - MaxSamples: 1e6, - }) + stores := &blocksStoreSetMock{ + Service: services.NewIdleService(nil, nil), + mockedResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + gateway1: {block1}, + gateway2: {block2}, + }, + }, + } - // Query metrics. - ctx := user.InjectOrgID(context.Background(), "user-1") - q, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, queryStart, queryEnd, 15*time.Second) - require.NoError(t, err) + // Instantiate the querier that will be executed to run the query. + logger := log.NewNopLogger() + queryable, err := NewBlocksStoreQueryable(stores, finder, NewBlocksConsistencyChecker(0, 0, logger, nil), &blocksStoreLimitsMock{}, 0, 0, logger, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), queryable)) + defer services.StopAndAwaitTerminated(context.Background(), queryable) // nolint:errcheck + + engine := promql.NewEngine(promql.EngineOpts{ + Logger: logger, + Timeout: 10 * time.Second, + MaxSamples: 1e6, + }) + + // Query metrics. + ctx := user.InjectOrgID(context.Background(), "user-1") + q, err := engine.NewRangeQuery(ctx, queryable, nil, testData.query, queryStart, queryEnd, 15*time.Second) + require.NoError(t, err) - res := q.Exec(ctx) - require.NoError(t, err) - require.NoError(t, res.Err) + res := q.Exec(ctx) + require.NoError(t, err) + require.NoError(t, res.Err) - matrix, err := res.Matrix() - require.NoError(t, err) - assert.Equal(t, testData.expected, matrix) + matrix, err := res.Matrix() + require.NoError(t, err) + assert.Equal(t, testData.expected, matrix) + }) + } }) } } From deafaa55e4eab2d3228ef16babc810b34c6996ac Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 19:26:56 +0530 Subject: [PATCH 15/75] Lint Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable_test.go | 6 +++--- pkg/storegateway/series_refs_test.go | 1 + 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index ebfda2e7987..03a2ab8a951 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -776,7 +776,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { for _, res := range testData.storeSetResponses { m, ok := res.(map[BlocksStoreClient][]ulid.ULID) if ok { - for k, _ := range m { + for k := range m { mockClient := k.(*storeGatewayClientMock) mockClient.mockedSeriesResponses = generateStreamingResponses(mockClient.mockedSeriesResponses) } @@ -2020,8 +2020,8 @@ func (grpcClientStreamMock) Header() (metadata.MD, error) { return nil, nil } func (grpcClientStreamMock) Trailer() metadata.MD { return nil } func (grpcClientStreamMock) CloseSend() error { return nil } func (grpcClientStreamMock) Context() context.Context { return context.Background() } -func (grpcClientStreamMock) SendMsg(m interface{}) error { return nil } -func (grpcClientStreamMock) RecvMsg(m interface{}) error { return nil } +func (grpcClientStreamMock) SendMsg(interface{}) error { return nil } +func (grpcClientStreamMock) RecvMsg(interface{}) error { return nil } type cancelerStoreGatewaySeriesClientMock struct { storeGatewaySeriesClientMock diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 696e389188d..48726540a0d 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1391,6 +1391,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { // We test with both streaming on and off when we are fetching chunks. for _, streaming := range []bool{true, false} { tcCopy := tc + streaming := streaming t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { t.Parallel() tcCopy.streamingSeries = streaming From 6ea14a402e3b9a34db5fa6c55061b857662df18b Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 19:30:49 +0530 Subject: [PATCH 16/75] Fix tests Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_e2e_test.go | 2 +- pkg/storegateway/bucket_store_server_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 6ba65aa95f2..3f571232553 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -428,7 +428,7 @@ func testBucketStore_e2e(t *testing.T, ctx context.Context, s *storeSuite, addit }, } for i, tcase := range append(testCases, additionalCases...) { - for _, streamingBatchSize := range []int{0, 1, 2, 10, 256} { + for _, streamingBatchSize := range []int{0, 1, 5, 256} { if ok := t.Run(fmt.Sprintf("%d,streamingBatchSize=%d", i, streamingBatchSize), func(t *testing.T) { tcase.req.StreamingChunksBatchSize = uint64(streamingBatchSize) seriesSet, _, _, err := srv.Series(context.Background(), tcase.req) diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index aa1a725873d..84904b219bc 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -244,7 +244,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } res, err = stream.Recv() - for err != nil { + for err == nil { if res.GetHints() == nil && res.GetStats() == nil { err = errors.Errorf("got unexpected response type") break From 05b92831b8b14cb92bd4be413582411ff17f259e Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 19:44:01 +0530 Subject: [PATCH 17/75] lint Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 03a2ab8a951..95f1bb87f6b 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -816,7 +816,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { foundErr := false for set.Next() { it := set.At().Iterator(nil) - for it.Next() != chunkenc.ValNone { + for it.Next() != chunkenc.ValNone { // nolint } err := it.Err() if err != nil { From 719b2549a1cc2ad3b4401f34b02099c181d6ec7f Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 12 Jun 2023 21:18:28 +0530 Subject: [PATCH 18/75] Actually pass down streaming batch size from querier to storegateway Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 674a69f8b2e..4b9dc04ba38 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -713,7 +713,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor // But this is an acceptable workaround for now. skipChunks := sp != nil && sp.Func == "series" - req, err := createSeriesRequest(minT, maxT, convertedMatchers, skipChunks, blockIDs) + req, err := createSeriesRequest(minT, maxT, convertedMatchers, skipChunks, blockIDs, q.streamingChunksBatchSize) if err != nil { return errors.Wrapf(err, "failed to create series request") } @@ -1049,7 +1049,7 @@ func (q *blocksStoreQuerier) fetchLabelValuesFromStore( return valueSets, warnings, queriedBlocks, nil } -func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skipChunks bool, blockIDs []ulid.ULID) (*storepb.SeriesRequest, error) { +func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skipChunks bool, blockIDs []ulid.ULID, streamingBatchSize uint64) (*storepb.SeriesRequest, error) { // Selectively query only specific blocks. hints := &hintspb.SeriesRequestHints{ BlockMatchers: []storepb.LabelMatcher{ @@ -1067,11 +1067,12 @@ func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skip } return &storepb.SeriesRequest{ - MinTime: minT, - MaxTime: maxT, - Matchers: matchers, - Hints: anyHints, - SkipChunks: skipChunks, + MinTime: minT, + MaxTime: maxT, + Matchers: matchers, + Hints: anyHints, + SkipChunks: skipChunks, + StreamingChunksBatchSize: streamingBatchSize, }, nil } From df0a7ea985a730149eeb4ce9766659520e65cec9 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 13 Jun 2023 15:41:45 +0530 Subject: [PATCH 19/75] Add unit tests. Quick self review. Signed-off-by: Ganesh Vernekar --- pkg/querier/block_streaming.go | 18 +- pkg/querier/block_streaming_test.go | 201 +++++++++++++++++++++ pkg/querier/blocks_store_queryable.go | 29 +-- pkg/querier/blocks_store_queryable_test.go | 3 +- pkg/storegateway/bucket.go | 8 +- pkg/storegateway/bucket_e2e_test.go | 1 + pkg/storegateway/series_refs.go | 2 +- pkg/storegateway/storepb/types.proto | 2 +- 8 files changed, 237 insertions(+), 27 deletions(-) create mode 100644 pkg/querier/block_streaming_test.go diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index abe73b6f82f..e4e2a298d2a 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -18,7 +18,7 @@ import ( // Implementation of storage.SeriesSet, based on individual responses from store client. type blockStreamingQuerierSeriesSet struct { series []*storepb.StreamingSeries - streamReader *SeriesChunksStreamReader + streamReader chunkStreamer // next response to process next int @@ -26,6 +26,10 @@ type blockStreamingQuerierSeriesSet struct { currSeries storage.Series } +type chunkStreamer interface { + GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) +} + func (bqss *blockStreamingQuerierSeriesSet) Next() bool { bqss.currSeries = nil @@ -34,12 +38,12 @@ func (bqss *blockStreamingQuerierSeriesSet) Next() bool { } currLabels := mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels) - seriesIdxStart := bqss.next + seriesIdxStart := bqss.next // First series in this group. We might merge with more below. bqss.next++ - // Merge chunks for current series. Chunks may come in multiple responses, but as soon - // as the response has chunks for a new series, we can stop searching. Series are sorted. - // See documentation for StoreClient.Series call for details. + // Chunks may come in multiple responses, but as soon as the response has chunks for a new series, + // we can stop searching. Series are sorted. See documentation for StoreClient.Series call for details. + // The actualy merging of chunks happens in the Iterator() call where chunks are fetched. for bqss.next < len(bqss.series) && labels.Compare(currLabels, mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels)) == 0 { bqss.next++ } @@ -61,7 +65,7 @@ func (bqss *blockStreamingQuerierSeriesSet) Warnings() storage.Warnings { } // newBlockStreamingQuerierSeries makes a new blockQuerierSeries. Input labels must be already sorted by name. -func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesIdxEnd int, streamReader *SeriesChunksStreamReader) *blockStreamingQuerierSeries { +func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesIdxEnd int, streamReader chunkStreamer) *blockStreamingQuerierSeries { return &blockStreamingQuerierSeries{ labels: lbls, seriesIdxStart: seriesIdxStart, @@ -73,7 +77,7 @@ func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesId type blockStreamingQuerierSeries struct { labels labels.Labels seriesIdxStart, seriesIdxEnd int - streamReader *SeriesChunksStreamReader + streamReader chunkStreamer } func (bqs *blockStreamingQuerierSeries) Labels() labels.Labels { diff --git a/pkg/querier/block_streaming_test.go b/pkg/querier/block_streaming_test.go new file mode 100644 index 00000000000..97ff63586bc --- /dev/null +++ b/pkg/querier/block_streaming_test.go @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "fmt" + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/storegateway/storepb" + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/stretchr/testify/require" + "math" + "testing" +) + +func TestBlockStreamingQuerierSeriesSet(t *testing.T) { + + cases := map[string]struct { + input []testSeries + expResult []testSeries + }{ + "simple case of one series": { + input: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar"), + values: []testSample{{1, 1}}, + }, + }, + expResult: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar"), + values: []testSample{{1, 1}}, + }, + }, + }, + "multiple unique series": { + input: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}}, + }, + { + lbls: labels.FromStrings("foo", "bar2"), + values: []testSample{{2, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}}, + }, + }, + expResult: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}}, + }, + { + lbls: labels.FromStrings("foo", "bar2"), + values: []testSample{{2, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}}, + }, + }, + }, + "multiple entries of the same series": { + input: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}}, + }, + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{6, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}}, + }, + }, + expResult: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}, {6, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}}, + }, + }, + }, + "multiple entries of the same series again": { + input: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}}, + }, + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{6, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{4, 3}, {5, 3}, {6, 3}}, + }, + }, + expResult: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}, {6, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}, {4, 3}, {5, 3}, {6, 3}}, + }, + }, + }, + } + + for name, c := range cases { + t.Run(name, func(t *testing.T) { + ss := &blockStreamingQuerierSeriesSet{streamReader: &mockChunkStreamer{series: c.input}} + for _, s := range c.input { + ss.series = append(ss.series, &storepb.StreamingSeries{ + Labels: mimirpb.FromLabelsToLabelAdapters(s.lbls), + }) + } + idx := 0 + for ss.Next() { + s := ss.At() + require.Equal(t, c.expResult[idx].lbls, s.Labels()) + it := s.Iterator(nil) + var actSamples []testSample + for it.Next() != chunkenc.ValNone { + ts, val := it.At() + actSamples = append(actSamples, testSample{t: ts, v: val}) + } + require.Equal(t, c.expResult[idx].values, actSamples) + require.NoError(t, it.Err()) + idx++ + } + require.NoError(t, ss.Err()) + require.Equal(t, len(c.expResult), idx) + }) + } +} + +type testSeries struct { + lbls labels.Labels + values []testSample +} + +type testSample struct { + t int64 + v float64 +} + +type mockChunkStreamer struct { + series []testSeries + next int +} + +func (m *mockChunkStreamer) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { + if m.next >= len(m.series) { + return nil, fmt.Errorf("out of chunks") + } + + if uint64(m.next) != seriesIndex { + return nil, fmt.Errorf("asked for the wrong series, exp: %d, got %d", m.next, seriesIndex) + } + + chk := chunkenc.NewXORChunk() + app, err := chk.Appender() + if err != nil { + return nil, err + } + + samples := m.series[m.next].values + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + for _, s := range samples { + app.Append(s.t, s.v) + if s.t < mint { + mint = s.t + } + if s.t > maxt { + maxt = s.t + } + } + + m.next++ + + return []storepb.AggrChunk{{ + MinTime: mint, + MaxTime: maxt, + Raw: &storepb.Chunk{Data: chk.Bytes()}, + }}, nil +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 4b9dc04ba38..5d1d0996ea3 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -774,20 +774,6 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } } - if ss := resp.GetStreamingSeries(); ss != nil { - for _, s := range ss.Series { - // Add series fingerprint to query limiter; will return error if we are over the limit - limitErr := queryLimiter.AddSeries(s.Labels) - if limitErr != nil { - return validation.LimitError(limitErr.Error()) - } - } - myStreamingSeries = append(myStreamingSeries, ss.Series...) - if ss.IsEndOfSeriesStream { - break - } - } - if w := resp.GetWarning(); w != "" { myWarnings = append(myWarnings, errors.New(w)) } @@ -809,6 +795,21 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor if s := resp.GetStats(); s != nil { indexBytesFetched += s.FetchedIndexBytes } + + if ss := resp.GetStreamingSeries(); ss != nil { + for _, s := range ss.Series { + // Add series fingerprint to query limiter; will return error if we are over the limit + limitErr := queryLimiter.AddSeries(s.Labels) + if limitErr != nil { + return validation.LimitError(limitErr.Error()) + } + } + myStreamingSeries = append(myStreamingSeries, ss.Series...) + if ss.IsEndOfSeriesStream { + // We expect "end of stream" to be sent after the hints and the stats have been sent. + break + } + } } reqStats.AddFetchedIndexBytes(indexBytesFetched) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 95f1bb87f6b..0e28471e09d 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -894,6 +894,7 @@ func generateStreamingResponses(seriesResponses []*storepb.SeriesResponse) []*st seriesResponses = append(seriesResponses[:0], series...) seriesResponses = append(seriesResponses, others...) + // End of stream response goes after the hints and stats. seriesResponses = append(seriesResponses, mockStreamingSeriesBatchResponse(true)) seriesResponses = append(seriesResponses, chunks...) return seriesResponses @@ -1976,7 +1977,7 @@ type storeGatewayClientMock struct { func (m *storeGatewayClientMock) Series(context.Context, *storepb.SeriesRequest, ...grpc.CallOption) (storegatewaypb.StoreGateway_SeriesClient, error) { seriesClient := &storeGatewaySeriesClientMock{ - ClientStream: grpcClientStreamMock{}, + ClientStream: grpcClientStreamMock{}, // Required to not panic. mockedResponses: m.mockedSeriesResponses, } diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index e18d6fcfc2a..8b662369783 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -618,6 +618,9 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie readers = newChunkReaders(chunkReaders) } + // If we are streaming the series labels and chunks separately, we don't need to fetch the postings + // twice. So we use these slices to re-use it. + // Each reusePostings[i] and reusePendingMatchers[i] corresponds to a single block. var reusePostings [][]storage.SeriesRef var reusePendingMatchers [][]*labels.Matcher if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { @@ -628,7 +631,6 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie reusePostings = make([][]storage.SeriesRef, len(blocks)) reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) - // TODO: what to do with hints here? seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err @@ -845,8 +847,8 @@ func (s *BucketStore) streamingSeriesSetForBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, - reusePostings [][]storage.SeriesRef, - reusePendingMatchers [][]*labels.Matcher, + reusePostings [][]storage.SeriesRef, // Used if not empty. + reusePendingMatchers [][]*labels.Matcher, // Used if not empty. ) (storepb.SeriesSet, *hintspb.SeriesResponseHints, error) { var ( resHints = &hintspb.SeriesResponseHints{} diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 3f571232553..c298434214b 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -88,6 +88,7 @@ func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt o if shiftedBlocks { // This shifts the 2nd block ahead by 2hrs. This way the first and the // last blocks created have no overlapping blocks. + // This is used to test some case with streaming series. mint = maxt maxt = timestamp.FromTime(now.Add(2 * time.Hour)) maxTime = maxt diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 12248462d96..d3cf44cb098 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -747,7 +747,7 @@ func openBlockSeriesChunkRefsSetsIterator( minTime, maxTime int64, // Series must have data in this time range to be returned (ignored if skipChunks=true). chunkRangesPerSeries int, stats *safeQueryStats, - ps []storage.SeriesRef, // If this is not empty, these posting are used as it as without fetching new ones. + ps []storage.SeriesRef, // If this is not empty, these posting are used as it is without fetching new ones. pendingMatchers []*labels.Matcher, // This is used in conjunction with 'ps'. logger log.Logger, ) (seriesChunkRefsSetIterator, []storage.SeriesRef, []*labels.Matcher, error) { diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index 055a0d0e999..3bd1d76ec6a 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -47,7 +47,7 @@ message StreamSeriesBatch { } message StreamSeriesChunks { - uint64 series_index = 1; // Index into list of all series previously sent with QueryStreamSeries messages by this ingester during this query response. + uint64 series_index = 1; // Index into list of all series previously sent with SeriesResponse messages by this storegateway during this query response. repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; } From 0f8c53c6653c4d92390fcb0ee92deacbd204f338 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 13 Jun 2023 16:32:38 +0530 Subject: [PATCH 20/75] Refactor Series function to make it smaller Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 340 ++++++++++++++++++++----------------- 1 file changed, 182 insertions(+), 158 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 8b662369783..e4bc781c0a7 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -625,7 +625,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie var reusePendingMatchers [][]*labels.Matcher if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { // The streaming feature is enabled where we stream the series labels first, followed - // by the chunks later. Fetch only the labels here. + // by the chunks later. Send only the labels here. req.SkipChunks = true reusePostings = make([][]storage.SeriesRef, len(blocks)) @@ -636,72 +636,10 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - // TODO: should we pool the seriesBuffer/seriesBatch? - seriesBuffer := make([]*storepb.StreamingSeries, req.StreamingChunksBatchSize) - for i := range seriesBuffer { - seriesBuffer[i] = &storepb.StreamingSeries{} - } - seriesBatch := &storepb.StreamSeriesBatch{ - Series: seriesBuffer[:0], - } - // TODO: can we send this in parallel while we start fetching the chunks below? - for seriesSet.Next() { - var lset labels.Labels - // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle - // because the subsequent call to seriesSet.Next() may release it. - // TODO: check if it is safe to hold the lset. - lset, _ = seriesSet.At() - - // We are re-using the slice for every batch this way. - seriesBatch.Series = seriesBatch.Series[:len(seriesBatch.Series)+1] - seriesBatch.Series[len(seriesBatch.Series)-1].Labels = mimirpb.FromLabelsToLabelAdapters(lset) - - // TODO: Add relevant trace spans and timers. - - if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { - msg := &grpc.PreparedMsg{} - if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) - } - - // Send the message. - if err = srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) - } - - seriesBatch.Series = seriesBatch.Series[:0] - } - } - - // We need to send hints and stats before sending the chunks. - // Also, these need to be sent before we send IsEndOfSeriesStream=true. - var anyHints *types.Any - if anyHints, err = types.MarshalAny(resHints); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) - } - - if err := srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) - } - - unsafeStats := stats.export() - if err := srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) - } - - // Send any remaining series and signal that there are no more series. - msg := &grpc.PreparedMsg{} - seriesBatch.IsEndOfSeriesStream = true - if err = msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) - } - // Send the message. - if err = srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) - } - - if seriesSet.Err() != nil { - return errors.Wrap(seriesSet.Err(), "expand series set") + // This also sends the hints and the stats. + err = s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) + if err != nil { + return err } req.SkipChunks = false @@ -717,116 +655,202 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie // Merge the sub-results from each selected block. tracing.DoWithSpan(ctx, "bucket_store_merge_all", func(ctx context.Context, _ tracing.Span) { - var ( - iterationBegin = time.Now() - encodeDuration = time.Duration(0) - sendDuration = time.Duration(0) - seriesCount int - chunksCount int - ) - - // Once the iteration is done we will update the stats. - defer stats.update(func(stats *queryStats) { - stats.mergedSeriesCount += seriesCount - stats.mergedChunksCount += chunksCount - - // The time spent iterating over the series set is the - // actual time spent fetching series and chunks, encoding and sending them to the client. - // We split the timings to have a better view over how time is spent. - stats.streamingSeriesFetchSeriesAndChunksDuration += stats.streamingSeriesWaitBatchLoadedDuration - stats.streamingSeriesEncodeResponseDuration += encodeDuration - stats.streamingSeriesSendResponseDuration += sendDuration - stats.streamingSeriesOtherDuration += time.Duration(util_math.Max(0, int64(time.Since(iterationBegin)- - stats.streamingSeriesFetchSeriesAndChunksDuration-encodeDuration-sendDuration))) - }) + err = s.sendSeriesChunks(req, srv, seriesSet, stats) + if err != nil { + return + } + }) - for seriesSet.Next() { - // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle - // because the subsequent call to seriesSet.Next() may release it. - lset, chks := seriesSet.At() - seriesCount++ - msg := &grpc.PreparedMsg{} - if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { - // We only need to stream chunks here because the series labels have already - // been sent above. - // TODO: is the 'is end of stream' parameter required here? - streamingChunks := storepb.StreamSeriesChunks{ - SeriesIndex: uint64(seriesCount - 1), - Chunks: chks, - } + if err != nil { + return + } - // Encode the message. We encode it ourselves into a PreparedMsg in order to measure - // the time it takes. - encodeBegin := time.Now() - if err = msg.Encode(srv, storepb.NewStreamSeriesChunksResponse(&streamingChunks)); err != nil { - err = status.Error(codes.Internal, errors.Wrap(err, "encode streaming chunks response").Error()) - return - } - encodeDuration += time.Since(encodeBegin) - } else { - var series storepb.Series - if !req.SkipChunks { - series.Chunks = chks - } - series.Labels = mimirpb.FromLabelsToLabelAdapters(lset) - - // Encode the message. We encode it ourselves into a PreparedMsg in order to measure - // the time it takes. - encodeBegin := time.Now() - if err = msg.Encode(srv, storepb.NewSeriesResponse(&series)); err != nil { - err = status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) - return - } - encodeDuration += time.Since(encodeBegin) - } + if req.StreamingChunksBatchSize == 0 || req.SkipChunks { + // Hints and stats were not sent before, so send it now. + return s.sendHintsAndStats(srv, resHints, stats) + } - if !req.SkipChunks { - chunksCount += len(chks) - s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) + return nil +} + +// sendStreamingSeriesLabelsHintsStats sends the labels of the streaming series. +// Since hints and stats need to be sent before the "end of stream" streaming series message, +// this function also sends the hints and the stats. +func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( + req *storepb.SeriesRequest, + srv storepb.Store_SeriesServer, + stats *safeQueryStats, + seriesSet storepb.SeriesSet, + resHints *hintspb.SeriesResponseHints, +) error { + // TODO: should we pool the seriesBuffer/seriesBatch? + seriesBuffer := make([]*storepb.StreamingSeries, req.StreamingChunksBatchSize) + for i := range seriesBuffer { + seriesBuffer[i] = &storepb.StreamingSeries{} + } + seriesBatch := &storepb.StreamSeriesBatch{ + Series: seriesBuffer[:0], + } + // TODO: can we send this in parallel while we start fetching the chunks below? + for seriesSet.Next() { + var lset labels.Labels + // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle + // because the subsequent call to seriesSet.Next() may release it. + // TODO: check if it is safe to hold the lset. + lset, _ = seriesSet.At() + + // We are re-using the slice for every batch this way. + seriesBatch.Series = seriesBatch.Series[:len(seriesBatch.Series)+1] + seriesBatch.Series[len(seriesBatch.Series)-1].Labels = mimirpb.FromLabelsToLabelAdapters(lset) + + // TODO: Add relevant trace spans and timers. + + if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { + msg := &grpc.PreparedMsg{} + if err := msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) } // Send the message. - sendBegin := time.Now() - if err = srv.SendMsg(msg); err != nil { - // TODO: set the right error wrapper message. - err = status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) - return + if err := srv.SendMsg(msg); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) } - sendDuration += time.Since(sendBegin) - } - if seriesSet.Err() != nil { - err = errors.Wrap(seriesSet.Err(), "expand series set") - return + + seriesBatch.Series = seriesBatch.Series[:0] } + } + if seriesSet.Err() != nil { + return errors.Wrap(seriesSet.Err(), "expand series set") + } - err = nil - }) + // We need to send hints and stats before sending the chunks. + // Also, these need to be sent before we send IsEndOfSeriesStream=true. + if err := s.sendHintsAndStats(srv, resHints, stats); err != nil { + return err + } - if err != nil { - return + // Send any remaining series and signal that there are no more series. + msg := &grpc.PreparedMsg{} + seriesBatch.IsEndOfSeriesStream = true + if err := msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) + } + // Send the message. + if err := srv.SendMsg(msg); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) } - if req.StreamingChunksBatchSize == 0 || req.SkipChunks { - // Hints and stats were not sent before, so send it now. - var anyHints *types.Any - if anyHints, err = types.MarshalAny(resHints); err != nil { - err = status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) - return + if seriesSet.Err() != nil { + return errors.Wrap(seriesSet.Err(), "expand series set") + } + + return nil +} + +func (s *BucketStore) sendSeriesChunks( + req *storepb.SeriesRequest, + srv storepb.Store_SeriesServer, + seriesSet storepb.SeriesSet, + stats *safeQueryStats, +) (err error) { + var ( + iterationBegin = time.Now() + encodeDuration = time.Duration(0) + sendDuration = time.Duration(0) + seriesCount int + chunksCount int + ) + + // Once the iteration is done we will update the stats. + defer stats.update(func(stats *queryStats) { + stats.mergedSeriesCount += seriesCount + stats.mergedChunksCount += chunksCount + + // The time spent iterating over the series set is the + // actual time spent fetching series and chunks, encoding and sending them to the client. + // We split the timings to have a better view over how time is spent. + stats.streamingSeriesFetchSeriesAndChunksDuration += stats.streamingSeriesWaitBatchLoadedDuration + stats.streamingSeriesEncodeResponseDuration += encodeDuration + stats.streamingSeriesSendResponseDuration += sendDuration + stats.streamingSeriesOtherDuration += time.Duration(util_math.Max(0, int64(time.Since(iterationBegin)- + stats.streamingSeriesFetchSeriesAndChunksDuration-encodeDuration-sendDuration))) + }) + + for seriesSet.Next() { + // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle + // because the subsequent call to seriesSet.Next() may release it. + lset, chks := seriesSet.At() + seriesCount++ + msg := &grpc.PreparedMsg{} + if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { + // We only need to stream chunks here because the series labels have already + // been sent above. + // TODO: is the 'is end of stream' parameter required here? + streamingChunks := storepb.StreamSeriesChunks{ + SeriesIndex: uint64(seriesCount - 1), + Chunks: chks, + } + + // Encode the message. We encode it ourselves into a PreparedMsg in order to measure + // the time it takes. + encodeBegin := time.Now() + if err := msg.Encode(srv, storepb.NewStreamSeriesChunksResponse(&streamingChunks)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode streaming chunks response").Error()) + } + encodeDuration += time.Since(encodeBegin) + } else { + var series storepb.Series + if !req.SkipChunks { + series.Chunks = chks + } + series.Labels = mimirpb.FromLabelsToLabelAdapters(lset) + + // Encode the message. We encode it ourselves into a PreparedMsg in order to measure + // the time it takes. + encodeBegin := time.Now() + if err := msg.Encode(srv, storepb.NewSeriesResponse(&series)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) + } + encodeDuration += time.Since(encodeBegin) } - if err = srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { - err = status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) - return + if !req.SkipChunks { + chunksCount += len(chks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) } - unsafeStats := stats.export() - if err = srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { - err = status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) - return + // Send the message. + sendBegin := time.Now() + if err := srv.SendMsg(msg); err != nil { + // TODO: set the right error wrapper message. + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) } + sendDuration += time.Since(sendBegin) + } + if seriesSet.Err() != nil { + return errors.Wrap(seriesSet.Err(), "expand series set") } - return err + return nil +} + +func (s *BucketStore) sendHintsAndStats(srv storepb.Store_SeriesServer, resHints *hintspb.SeriesResponseHints, stats *safeQueryStats) error { + var anyHints *types.Any + var err error + if anyHints, err = types.MarshalAny(resHints); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) + } + + if err := srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) + } + + unsafeStats := stats.export() + if err := srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) + } + + return nil } func chunksSize(chks []storepb.AggrChunk) (size int) { From 513c99b305ba2ff735814f69d8c53a355885add5 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 13 Jun 2023 16:47:11 +0530 Subject: [PATCH 21/75] Fix lint Signed-off-by: Ganesh Vernekar --- pkg/querier/block_streaming.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index e4e2a298d2a..7af36ca22b6 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -43,7 +43,7 @@ func (bqss *blockStreamingQuerierSeriesSet) Next() bool { // Chunks may come in multiple responses, but as soon as the response has chunks for a new series, // we can stop searching. Series are sorted. See documentation for StoreClient.Series call for details. - // The actualy merging of chunks happens in the Iterator() call where chunks are fetched. + // The actually merging of chunks happens in the Iterator() call where chunks are fetched. for bqss.next < len(bqss.series) && labels.Compare(currLabels, mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels)) == 0 { bqss.next++ } From 766e46fdc7cc5d0773cd3fbaca822f1d2e22e190 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 13 Jun 2023 17:55:43 +0530 Subject: [PATCH 22/75] Refactor to send of batches of chunks at a time Signed-off-by: Ganesh Vernekar --- .../blocks_store_querable_streaming.go | 40 ++- pkg/querier/blocks_store_queryable_test.go | 10 +- pkg/storegateway/bucket.go | 90 +++-- pkg/storegateway/bucket_store_server_test.go | 26 +- pkg/storegateway/storepb/custom.go | 2 +- pkg/storegateway/storepb/rpc.pb.go | 108 +++--- pkg/storegateway/storepb/rpc.proto | 2 +- pkg/storegateway/storepb/types.pb.go | 325 +++++++++++++++--- pkg/storegateway/storepb/types.proto | 5 + 9 files changed, 449 insertions(+), 159 deletions(-) diff --git a/pkg/querier/blocks_store_querable_streaming.go b/pkg/querier/blocks_store_querable_streaming.go index 889598f382d..11871c66cca 100644 --- a/pkg/querier/blocks_store_querable_streaming.go +++ b/pkg/querier/blocks_store_querable_streaming.go @@ -41,7 +41,8 @@ type SeriesChunksStreamReader struct { stats *stats.Stats log log.Logger - seriesCunksChan chan *storepb.StreamSeriesChunks + seriesCunksChan chan *storepb.StreamSeriesChunksBatch + chunksBatch []*storepb.StreamSeriesChunks errorChan chan error } @@ -69,7 +70,7 @@ func (s *SeriesChunksStreamReader) Close() { // If an error occurs while streaming, a subsequent call to GetChunks will return an error. // To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. func (s *SeriesChunksStreamReader) StartBuffering() { - s.seriesCunksChan = make(chan *storepb.StreamSeriesChunks, 30) // TODO: increase or reduce the channel size. + s.seriesCunksChan = make(chan *storepb.StreamSeriesChunksBatch, 1) // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. s.errorChan = make(chan error, 1) @@ -151,22 +152,33 @@ func (s *SeriesChunksStreamReader) StartBuffering() { // GetChunks returns the chunks for the series with index seriesIndex. // This method must be called with monotonically increasing values of seriesIndex. func (s *SeriesChunksStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { - chks, haveChunks := <-s.seriesCunksChan - - if !haveChunks { - // If there's an error, report it. - select { - case err, haveError := <-s.errorChan: - if haveError { - if _, ok := err.(validation.LimitError); ok { - return nil, err + if len(s.chunksBatch) == 0 { + chks, haveChunks := <-s.seriesCunksChan + + if !haveChunks { + // If there's an error, report it. + select { + case err, haveError := <-s.errorChan: + if haveError { + if _, ok := err.(validation.LimitError); ok { + return nil, err + } + return nil, errors.Wrapf(err, "attempted to read series at index %v from stream, but the stream has failed", seriesIndex) } - return nil, errors.Wrapf(err, "attempted to read series at index %v from stream, but the stream has failed", seriesIndex) + default: } - default: + + return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has already been exhausted", seriesIndex) } - return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has already been exhausted", seriesIndex) + s.chunksBatch = chks.Chunks + } + + chks := s.chunksBatch[0] + if len(s.chunksBatch) > 1 { + s.chunksBatch = s.chunksBatch[1:] + } else { + s.chunksBatch = nil } if chks.SeriesIndex != seriesIndex { diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 0e28471e09d..1a7da2ce69e 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -2132,9 +2132,13 @@ func mockStreamingSeriesBatchResponse(endOfStream bool, lbls ...[]mimirpb.LabelA func mockStreamingSeriesChunksResponse(index uint64, chks []storepb.AggrChunk) *storepb.SeriesResponse { return &storepb.SeriesResponse{ Result: &storepb.SeriesResponse_StreamingSeriesChunks{ - StreamingSeriesChunks: &storepb.StreamSeriesChunks{ - SeriesIndex: index, - Chunks: chks, + StreamingSeriesChunks: &storepb.StreamSeriesChunksBatch{ + Chunks: []*storepb.StreamSeriesChunks{ + { + SeriesIndex: index, + Chunks: chks, + }, + }, }, }, } diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index e4bc781c0a7..590f98957b1 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -68,6 +68,8 @@ const ( // Labels for metrics. labelEncode = "encode" labelDecode = "decode" + + queryStreamBatchMessageSize = 1 * 1024 * 1024 ) type BucketStoreStats struct { @@ -754,11 +756,12 @@ func (s *BucketStore) sendSeriesChunks( stats *safeQueryStats, ) (err error) { var ( - iterationBegin = time.Now() - encodeDuration = time.Duration(0) - sendDuration = time.Duration(0) - seriesCount int - chunksCount int + iterationBegin = time.Now() + encodeDuration = time.Duration(0) + sendDuration = time.Duration(0) + seriesCount int + chunksCount int + streamingChunks = req.StreamingChunksBatchSize > 0 && !req.SkipChunks ) // Once the iteration is done we will update the stats. @@ -776,28 +779,35 @@ func (s *BucketStore) sendSeriesChunks( stats.streamingSeriesFetchSeriesAndChunksDuration-encodeDuration-sendDuration))) }) + var batchSizeBytes int + var chunksBuffer []*storepb.StreamSeriesChunks + if streamingChunks { + chunksBuffer = make([]*storepb.StreamSeriesChunks, req.StreamingChunksBatchSize) + for i := range chunksBuffer { + chunksBuffer[i] = &storepb.StreamSeriesChunks{} + } + } + chunksBatch := &storepb.StreamSeriesChunksBatch{ + Chunks: chunksBuffer[:0], + } for seriesSet.Next() { // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle // because the subsequent call to seriesSet.Next() may release it. lset, chks := seriesSet.At() seriesCount++ - msg := &grpc.PreparedMsg{} - if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { + var response interface{} + if streamingChunks { // We only need to stream chunks here because the series labels have already // been sent above. - // TODO: is the 'is end of stream' parameter required here? - streamingChunks := storepb.StreamSeriesChunks{ - SeriesIndex: uint64(seriesCount - 1), - Chunks: chks, - } - - // Encode the message. We encode it ourselves into a PreparedMsg in order to measure - // the time it takes. - encodeBegin := time.Now() - if err := msg.Encode(srv, storepb.NewStreamSeriesChunksResponse(&streamingChunks)); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode streaming chunks response").Error()) + chunksBatch.Chunks = chunksBatch.Chunks[:len(chunksBatch.Chunks)+1] + last := chunksBatch.Chunks[len(chunksBatch.Chunks)-1] + last.Chunks = chks + last.SeriesIndex = uint64(seriesCount - 1) + + batchSizeBytes += last.Size() + if (batchSizeBytes > 0 && batchSizeBytes > queryStreamBatchMessageSize) || len(chunksBatch.Chunks) >= int(req.StreamingChunksBatchSize) { + response = storepb.NewStreamSeriesChunksResponse(chunksBatch) } - encodeDuration += time.Since(encodeBegin) } else { var series storepb.Series if !req.SkipChunks { @@ -805,31 +815,57 @@ func (s *BucketStore) sendSeriesChunks( } series.Labels = mimirpb.FromLabelsToLabelAdapters(lset) + response = storepb.NewSeriesResponse(&series) + } + + if !req.SkipChunks { + chunksCount += len(chks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) + } + + if response != nil { // Encode the message. We encode it ourselves into a PreparedMsg in order to measure // the time it takes. encodeBegin := time.Now() - if err := msg.Encode(srv, storepb.NewSeriesResponse(&series)); err != nil { + msg := &grpc.PreparedMsg{} + if err := msg.Encode(srv, response); err != nil { return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) } encodeDuration += time.Since(encodeBegin) + + // Send the message. + sendBegin := time.Now() + if err := srv.SendMsg(msg); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + } + sendDuration += time.Since(sendBegin) + + if streamingChunks { + chunksBatch.Chunks = chunksBatch.Chunks[:0] + batchSizeBytes = 0 + } } + } + if seriesSet.Err() != nil { + return errors.Wrap(seriesSet.Err(), "expand series set") + } - if !req.SkipChunks { - chunksCount += len(chks) - s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) + if streamingChunks && len(chunksBatch.Chunks) > 0 { + // Still some chunks left to send. + encodeBegin := time.Now() + msg := &grpc.PreparedMsg{} + if err := msg.Encode(srv, storepb.NewStreamSeriesChunksResponse(chunksBatch)); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) } + encodeDuration += time.Since(encodeBegin) // Send the message. sendBegin := time.Now() if err := srv.SendMsg(msg); err != nil { - // TODO: set the right error wrapper message. return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) } sendDuration += time.Since(sendBegin) } - if seriesSet.Err() != nil { - return errors.Wrap(seriesSet.Err(), "expand series set") - } return nil } diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index 84904b219bc..80b4424d338 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -98,7 +98,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest conn *grpc.ClientConn stream storepb.Store_SeriesClient res *storepb.SeriesResponse - streamingSeriesSet []*storepb.StreamSeriesBatch + streamingSeriesSet []*storepb.StreamingSeries ) // Create a gRPC connection to the server. @@ -191,7 +191,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest return } - streamingSeriesSet = append(streamingSeriesSet, copiedSeries) + streamingSeriesSet = append(streamingSeriesSet, copiedSeries.Series...) if recvSeries.IsEndOfSeriesStream { break @@ -202,17 +202,17 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { // Get the streaming chunks. idx := -1 - for _, batch := range streamingSeriesSet { - for _, s := range batch.Series { - idx++ - // We don't expect EOF errors here. - res, err = stream.Recv() - if err != nil { - return - } + for idx < len(streamingSeriesSet)-1 { + // We don't expect EOF errors here. + res, err = stream.Recv() + if err != nil { + return + } - chks := res.GetStreamingSeriesChunks() - if chks == nil { + chksBatch := res.GetStreamingSeriesChunks() + for _, chks := range chksBatch.Chunks { + idx++ + if chksBatch == nil { err = errors.Errorf("expected streaming chunks, got something else") return } @@ -237,7 +237,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } seriesSet = append(seriesSet, &storepb.Series{ - Labels: s.Labels, + Labels: streamingSeriesSet[idx].Labels, Chunks: copiedChunks.Chunks, }) } diff --git a/pkg/storegateway/storepb/custom.go b/pkg/storegateway/storepb/custom.go index a974d87415a..5ed4208fbcd 100644 --- a/pkg/storegateway/storepb/custom.go +++ b/pkg/storegateway/storepb/custom.go @@ -47,7 +47,7 @@ func NewStreamSeriesResponse(series *StreamSeriesBatch) *SeriesResponse { } } -func NewStreamSeriesChunksResponse(series *StreamSeriesChunks) *SeriesResponse { +func NewStreamSeriesChunksResponse(series *StreamSeriesChunksBatch) *SeriesResponse { return &SeriesResponse{ Result: &SeriesResponse_StreamingSeriesChunks{ StreamingSeriesChunks: series, diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index f019d6a3a2f..f52a5a64d3e 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -179,7 +179,7 @@ type SeriesResponse_StreamingSeries struct { StreamingSeries *StreamSeriesBatch `protobuf:"bytes,5,opt,name=streaming_series,json=streamingSeries,proto3,oneof"` } type SeriesResponse_StreamingSeriesChunks struct { - StreamingSeriesChunks *StreamSeriesChunks `protobuf:"bytes,6,opt,name=streaming_series_chunks,json=streamingSeriesChunks,proto3,oneof"` + StreamingSeriesChunks *StreamSeriesChunksBatch `protobuf:"bytes,6,opt,name=streaming_series_chunks,json=streamingSeriesChunks,proto3,oneof"` } func (*SeriesResponse_Series) isSeriesResponse_Result() {} @@ -231,7 +231,7 @@ func (m *SeriesResponse) GetStreamingSeries() *StreamSeriesBatch { return nil } -func (m *SeriesResponse) GetStreamingSeriesChunks() *StreamSeriesChunks { +func (m *SeriesResponse) GetStreamingSeriesChunks() *StreamSeriesChunksBatch { if x, ok := m.GetResult().(*SeriesResponse_StreamingSeriesChunks); ok { return x.StreamingSeriesChunks } @@ -430,56 +430,56 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 784 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcf, 0x6f, 0xe2, 0x46, - 0x14, 0xc7, 0x3d, 0x78, 0x6c, 0x86, 0x21, 0xa4, 0xce, 0xe4, 0x47, 0x8d, 0x23, 0x39, 0x08, 0xa9, - 0x12, 0xaa, 0x5a, 0x52, 0xa5, 0x52, 0xab, 0x1e, 0x7a, 0x08, 0x91, 0x22, 0x6a, 0xb5, 0x3d, 0x38, - 0x51, 0x0f, 0xbd, 0x20, 0x03, 0x13, 0xb0, 0x02, 0x36, 0xf5, 0x98, 0x36, 0xe4, 0xd4, 0x3f, 0xa1, - 0x7f, 0x46, 0xa5, 0xfd, 0x0b, 0xf6, 0xba, 0xa7, 0xdc, 0x36, 0xd2, 0x5e, 0x72, 0x5a, 0x2d, 0xe4, - 0xb2, 0xc7, 0xfc, 0x09, 0xab, 0xf9, 0x01, 0x86, 0x88, 0x55, 0x36, 0xd2, 0xde, 0x3c, 0xdf, 0xef, - 0x9b, 0x37, 0xef, 0x7d, 0xde, 0x33, 0x2e, 0x24, 0xa3, 0x4e, 0x7d, 0x94, 0xc4, 0x69, 0x4c, 0xcc, - 0xb4, 0x1f, 0x44, 0x31, 0x73, 0x8a, 0xe9, 0x64, 0x44, 0x99, 0x14, 0x9d, 0x6f, 0x7b, 0x61, 0xda, - 0x1f, 0xb7, 0xeb, 0x9d, 0x78, 0x78, 0xd8, 0x8b, 0x7b, 0xf1, 0xa1, 0x90, 0xdb, 0xe3, 0x0b, 0x71, - 0x12, 0x07, 0xf1, 0xa5, 0xc2, 0xcb, 0xbd, 0x38, 0xee, 0x0d, 0x68, 0x16, 0x15, 0x44, 0x13, 0x69, - 0x55, 0x5f, 0xe6, 0x70, 0xe9, 0x8c, 0x26, 0x21, 0x65, 0x3e, 0xfd, 0x6b, 0x4c, 0x59, 0x4a, 0xca, - 0x18, 0x0d, 0xc3, 0xa8, 0x95, 0x86, 0x43, 0x6a, 0x83, 0x0a, 0xa8, 0xe9, 0x7e, 0x7e, 0x18, 0x46, - 0xe7, 0xe1, 0x90, 0x0a, 0x2b, 0xb8, 0x92, 0x56, 0x4e, 0x59, 0xc1, 0x95, 0xb0, 0x7e, 0xe0, 0x56, - 0xda, 0xe9, 0xd3, 0x84, 0xd9, 0x7a, 0x45, 0xaf, 0x15, 0x8f, 0x76, 0xea, 0xb2, 0xf2, 0xfa, 0xaf, - 0x41, 0x9b, 0x0e, 0x7e, 0x93, 0x66, 0x03, 0xde, 0xbc, 0x3d, 0xd0, 0xfc, 0x45, 0x2c, 0x39, 0xc0, - 0x45, 0x76, 0x19, 0x8e, 0x5a, 0x9d, 0xfe, 0x38, 0xba, 0x64, 0x36, 0xaa, 0x80, 0x1a, 0xf2, 0x31, - 0x97, 0x4e, 0x84, 0x42, 0xbe, 0xc6, 0x46, 0x3f, 0x8c, 0x52, 0x66, 0x17, 0x2a, 0x40, 0x64, 0x95, - 0xbd, 0xd4, 0xe7, 0xbd, 0xd4, 0x8f, 0xa3, 0x89, 0x2f, 0x43, 0xc8, 0xcf, 0x78, 0x9f, 0xa5, 0x09, - 0x0d, 0x86, 0x61, 0xd4, 0x53, 0x19, 0x5b, 0x6d, 0xfe, 0x52, 0x8b, 0x85, 0xd7, 0xd4, 0xee, 0x56, - 0x40, 0x0d, 0xfa, 0xf6, 0x22, 0x44, 0xbe, 0xd0, 0xe0, 0x01, 0x67, 0xe1, 0x35, 0xf5, 0x20, 0x82, - 0x96, 0xe1, 0x41, 0x64, 0x58, 0xa6, 0x07, 0x91, 0x69, 0xe5, 0x3d, 0x88, 0xf2, 0x16, 0xf2, 0x20, - 0xc2, 0x56, 0xd1, 0x83, 0xa8, 0x68, 0x6d, 0x78, 0x10, 0x6d, 0x58, 0x25, 0x0f, 0xa2, 0x92, 0xb5, - 0x59, 0xfd, 0x11, 0x1b, 0x67, 0x69, 0x90, 0x32, 0x52, 0xc7, 0xdb, 0x17, 0x94, 0x37, 0xd4, 0x6d, - 0x85, 0x51, 0x97, 0x5e, 0xb5, 0xda, 0x93, 0x94, 0x32, 0x41, 0x0f, 0xfa, 0x5b, 0xca, 0xfa, 0x85, - 0x3b, 0x0d, 0x6e, 0x54, 0xdf, 0xe4, 0xf0, 0xe6, 0x1c, 0x3a, 0x1b, 0xc5, 0x11, 0xa3, 0xa4, 0x86, - 0x4d, 0x26, 0x14, 0x71, 0xab, 0x78, 0xb4, 0x39, 0xa7, 0x27, 0xe3, 0x9a, 0x9a, 0xaf, 0x7c, 0xe2, - 0xe0, 0xfc, 0x3f, 0x41, 0x12, 0x85, 0x51, 0x4f, 0xcc, 0xa0, 0xd0, 0xd4, 0xfc, 0xb9, 0x40, 0xbe, - 0x99, 0xc3, 0xd2, 0x3f, 0x0e, 0xab, 0xa9, 0xcd, 0x71, 0x7d, 0x85, 0x0d, 0xc6, 0xeb, 0xb7, 0xa1, - 0x88, 0x2e, 0x2d, 0x9e, 0xe4, 0x22, 0x0f, 0x13, 0x2e, 0x39, 0xc5, 0x56, 0x46, 0x55, 0x15, 0x69, - 0x88, 0x1b, 0xe5, 0xec, 0x06, 0xf7, 0x65, 0xa9, 0x82, 0x67, 0x53, 0xf3, 0xbf, 0x58, 0x5c, 0x92, - 0x3a, 0x39, 0xc7, 0x5f, 0x3e, 0xce, 0x33, 0x1f, 0xbb, 0x29, 0xd2, 0x39, 0xeb, 0xd2, 0xc9, 0x21, - 0x35, 0x35, 0x7f, 0xf7, 0x51, 0x3e, 0x35, 0x3d, 0x84, 0xcd, 0x84, 0xb2, 0xf1, 0x20, 0xad, 0xbe, - 0x00, 0x78, 0x4b, 0xec, 0xda, 0xef, 0xc1, 0x30, 0x5b, 0xe7, 0x1d, 0xd1, 0x64, 0x92, 0x0a, 0x24, - 0xba, 0x2f, 0x0f, 0xc4, 0xc2, 0x3a, 0x8d, 0xba, 0xa2, 0x71, 0xdd, 0xe7, 0x9f, 0xd9, 0x9e, 0x19, - 0x4f, 0xef, 0xd9, 0xf2, 0xb2, 0x9b, 0x9f, 0xbe, 0xec, 0x1e, 0x44, 0xc0, 0xca, 0x79, 0x10, 0xe5, - 0x2c, 0xbd, 0x9a, 0x60, 0xb2, 0x5c, 0xac, 0x5a, 0x83, 0x1d, 0x6c, 0x44, 0x5c, 0xb0, 0x41, 0x45, - 0xaf, 0x15, 0x7c, 0x79, 0x20, 0x0e, 0x46, 0x6a, 0xc2, 0xcc, 0xce, 0x09, 0x63, 0x71, 0xce, 0xea, - 0xd6, 0x9f, 0xac, 0xbb, 0xfa, 0x0a, 0xa8, 0x47, 0xff, 0x08, 0x06, 0xe3, 0x15, 0x44, 0x03, 0xae, - 0x8a, 0xd5, 0x2b, 0xf8, 0xf2, 0x90, 0x81, 0x83, 0x6b, 0xc0, 0x19, 0x6b, 0xc0, 0x99, 0xcf, 0x03, - 0x97, 0x7f, 0x16, 0xb8, 0x9c, 0xa5, 0x7b, 0x10, 0xe9, 0x16, 0xac, 0x8e, 0xf1, 0xf6, 0x4a, 0x0f, - 0x8a, 0xdc, 0x1e, 0x36, 0xff, 0x16, 0x8a, 0x42, 0xa7, 0x4e, 0x9f, 0x8b, 0xdd, 0xd1, 0x6b, 0xc0, - 0xff, 0xf6, 0x38, 0xa1, 0xe4, 0x27, 0x6c, 0xaa, 0x8d, 0xde, 0x5d, 0xfd, 0x49, 0x15, 0x4f, 0x67, - 0xef, 0xb1, 0x2c, 0x4b, 0xfc, 0x0e, 0x90, 0x13, 0x8c, 0xb3, 0xa1, 0x93, 0xf2, 0x4a, 0xef, 0xcb, - 0x5b, 0xeb, 0x38, 0xeb, 0x2c, 0xd5, 0xe9, 0x29, 0x2e, 0x2e, 0x01, 0x20, 0xab, 0xa1, 0x2b, 0x93, - 0x75, 0xf6, 0xd7, 0x7a, 0x32, 0x4f, 0xe3, 0xf8, 0x66, 0xea, 0x6a, 0xb7, 0x53, 0x57, 0xbb, 0x9b, - 0xba, 0xda, 0xc3, 0xd4, 0x05, 0xff, 0xce, 0x5c, 0xf0, 0xff, 0xcc, 0x05, 0x37, 0x33, 0x17, 0xdc, - 0xce, 0x5c, 0xf0, 0x6e, 0xe6, 0x82, 0xf7, 0x33, 0x57, 0x7b, 0x98, 0xb9, 0xe0, 0xbf, 0x7b, 0x57, - 0xbb, 0xbd, 0x77, 0xb5, 0xbb, 0x7b, 0x57, 0xfb, 0x33, 0xcf, 0x38, 0x88, 0x51, 0xbb, 0x6d, 0x0a, - 0x52, 0xdf, 0x7f, 0x08, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x72, 0x44, 0x83, 0xb0, 0x06, 0x00, 0x00, + // 783 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0xc7, 0xb9, 0xe2, 0x92, 0x5a, 0xad, 0x2c, 0x97, 0x5e, 0x7f, 0x94, 0xa2, 0x01, 0x5a, 0x10, + 0x50, 0x40, 0x28, 0x5a, 0xb9, 0x70, 0x81, 0x16, 0x3d, 0xf4, 0x60, 0x19, 0x30, 0x54, 0xa2, 0xed, + 0x81, 0x2e, 0x0a, 0xb4, 0x17, 0x81, 0x92, 0xd6, 0x12, 0x61, 0x89, 0x54, 0xb9, 0x54, 0x63, 0xf9, + 0x94, 0x47, 0xc8, 0x63, 0x04, 0xc8, 0x13, 0xe4, 0x9a, 0x93, 0x6f, 0xf1, 0xd1, 0xa7, 0x20, 0x92, + 0x2f, 0x39, 0xfa, 0x11, 0x82, 0xfd, 0x90, 0x28, 0x19, 0x0a, 0x1c, 0x03, 0xb9, 0x71, 0xfe, 0xff, + 0xd9, 0xd9, 0x99, 0xdf, 0x0e, 0x71, 0x21, 0x19, 0x75, 0xea, 0xa3, 0x24, 0x4e, 0x63, 0x62, 0xa6, + 0xfd, 0x20, 0x8a, 0x99, 0x53, 0x4c, 0x27, 0x23, 0xca, 0xa4, 0xe8, 0x7c, 0xdf, 0x0b, 0xd3, 0xfe, + 0xb8, 0x5d, 0xef, 0xc4, 0xc3, 0xc3, 0x5e, 0xdc, 0x8b, 0x0f, 0x85, 0xdc, 0x1e, 0x9f, 0x8b, 0x48, + 0x04, 0xe2, 0x4b, 0xa5, 0x97, 0x7b, 0x71, 0xdc, 0x1b, 0xd0, 0x2c, 0x2b, 0x88, 0x26, 0xd2, 0xaa, + 0xbe, 0xce, 0xe1, 0xd2, 0x19, 0x4d, 0x42, 0xca, 0x7c, 0xfa, 0xdf, 0x98, 0xb2, 0x94, 0x94, 0x31, + 0x1a, 0x86, 0x51, 0x2b, 0x0d, 0x87, 0xd4, 0x06, 0x15, 0x50, 0xd3, 0xfd, 0xfc, 0x30, 0x8c, 0xfe, + 0x0a, 0x87, 0x54, 0x58, 0xc1, 0xa5, 0xb4, 0x72, 0xca, 0x0a, 0x2e, 0x85, 0xf5, 0x13, 0xb7, 0xd2, + 0x4e, 0x9f, 0x26, 0xcc, 0xd6, 0x2b, 0x7a, 0xad, 0x78, 0xb4, 0x53, 0x97, 0x9d, 0xd7, 0x7f, 0x0f, + 0xda, 0x74, 0xf0, 0x87, 0x34, 0x1b, 0xf0, 0xfa, 0xdd, 0x81, 0xe6, 0x2f, 0x72, 0xc9, 0x01, 0x2e, + 0xb2, 0x8b, 0x70, 0xd4, 0xea, 0xf4, 0xc7, 0xd1, 0x05, 0xb3, 0x51, 0x05, 0xd4, 0x90, 0x8f, 0xb9, + 0x74, 0x22, 0x14, 0xf2, 0x2d, 0x36, 0xfa, 0x61, 0x94, 0x32, 0xbb, 0x50, 0x01, 0xa2, 0xaa, 0x9c, + 0xa5, 0x3e, 0x9f, 0xa5, 0x7e, 0x1c, 0x4d, 0x7c, 0x99, 0x42, 0x7e, 0xc5, 0xfb, 0x2c, 0x4d, 0x68, + 0x30, 0x0c, 0xa3, 0x9e, 0xaa, 0xd8, 0x6a, 0xf3, 0x9b, 0x5a, 0x2c, 0xbc, 0xa2, 0x76, 0xb7, 0x02, + 0x6a, 0xd0, 0xb7, 0x17, 0x29, 0xf2, 0x86, 0x06, 0x4f, 0x38, 0x0b, 0xaf, 0xa8, 0x07, 0x11, 0xb4, + 0x0c, 0x0f, 0x22, 0xc3, 0x32, 0x3d, 0x88, 0x4c, 0x2b, 0xef, 0x41, 0x94, 0xb7, 0x90, 0x07, 0x11, + 0xb6, 0x8a, 0x1e, 0x44, 0x45, 0x6b, 0xc3, 0x83, 0x68, 0xc3, 0x2a, 0x79, 0x10, 0x95, 0xac, 0xcd, + 0xea, 0xcf, 0xd8, 0x38, 0x4b, 0x83, 0x94, 0x91, 0x3a, 0xde, 0x3e, 0xa7, 0x7c, 0xa0, 0x6e, 0x2b, + 0x8c, 0xba, 0xf4, 0xb2, 0xd5, 0x9e, 0xa4, 0x94, 0x09, 0x7a, 0xd0, 0xdf, 0x52, 0xd6, 0x6f, 0xdc, + 0x69, 0x70, 0xa3, 0x7a, 0x9b, 0xc3, 0x9b, 0x73, 0xe8, 0x6c, 0x14, 0x47, 0x8c, 0x92, 0x1a, 0x36, + 0x99, 0x50, 0xc4, 0xa9, 0xe2, 0xd1, 0xe6, 0x9c, 0x9e, 0xcc, 0x6b, 0x6a, 0xbe, 0xf2, 0x89, 0x83, + 0xf3, 0xcf, 0x82, 0x24, 0x0a, 0xa3, 0x9e, 0x78, 0x83, 0x42, 0x53, 0xf3, 0xe7, 0x02, 0xf9, 0x6e, + 0x0e, 0x4b, 0xff, 0x34, 0xac, 0xa6, 0x36, 0xc7, 0xf5, 0x0d, 0x36, 0x18, 0xef, 0xdf, 0x86, 0x22, + 0xbb, 0xb4, 0xb8, 0x92, 0x8b, 0x3c, 0x4d, 0xb8, 0xe4, 0x14, 0x5b, 0x19, 0x55, 0xd5, 0xa4, 0x21, + 0x4e, 0x94, 0xb3, 0x13, 0xdc, 0x97, 0xad, 0x0a, 0x9e, 0x4d, 0xcd, 0xff, 0x6a, 0x71, 0x48, 0xea, + 0xe4, 0x1f, 0xfc, 0xf5, 0xc3, 0x3a, 0xf3, 0x67, 0x37, 0x45, 0xb9, 0x83, 0x75, 0xe5, 0x96, 0x1e, + 0xa9, 0xa9, 0xf9, 0xbb, 0x0f, 0x8a, 0x2a, 0x17, 0x61, 0x33, 0xa1, 0x6c, 0x3c, 0x48, 0xab, 0xaf, + 0x00, 0xde, 0x12, 0x0b, 0xf7, 0x67, 0x30, 0xcc, 0x76, 0x7a, 0x47, 0x4c, 0x9a, 0xa4, 0x82, 0x8b, + 0xee, 0xcb, 0x80, 0x58, 0x58, 0xa7, 0x51, 0x57, 0x4c, 0xaf, 0xfb, 0xfc, 0x33, 0x5b, 0x36, 0xe3, + 0xf1, 0x65, 0x5b, 0xde, 0x78, 0xf3, 0xf3, 0x37, 0xde, 0x83, 0x08, 0x58, 0x39, 0x0f, 0xa2, 0x9c, + 0xa5, 0x57, 0x13, 0x4c, 0x96, 0x9b, 0x55, 0xbb, 0xb0, 0x83, 0x8d, 0x88, 0x0b, 0x36, 0xa8, 0xe8, + 0xb5, 0x82, 0x2f, 0x03, 0xe2, 0x60, 0xa4, 0x9e, 0x99, 0xd9, 0x39, 0x61, 0x2c, 0xe2, 0xac, 0x6f, + 0xfd, 0xd1, 0xbe, 0xab, 0x6f, 0x80, 0xba, 0xf4, 0xef, 0x60, 0x30, 0x5e, 0x41, 0x34, 0xe0, 0xaa, + 0xd8, 0xbf, 0x82, 0x2f, 0x83, 0x0c, 0x1c, 0x5c, 0x03, 0xce, 0x58, 0x03, 0xce, 0x7c, 0x1a, 0xb8, + 0xfc, 0x93, 0xc0, 0xe5, 0x2c, 0xdd, 0x83, 0x48, 0xb7, 0x60, 0x75, 0x8c, 0xb7, 0x57, 0x66, 0x50, + 0xe4, 0xf6, 0xb0, 0xf9, 0xbf, 0x50, 0x14, 0x3a, 0x15, 0x7d, 0x29, 0x76, 0x47, 0x6f, 0x01, 0xff, + 0xe5, 0xe3, 0x84, 0x92, 0x5f, 0xb0, 0xa9, 0xd6, 0x7a, 0x77, 0xf5, 0x4f, 0x55, 0x3c, 0x9d, 0xbd, + 0x87, 0xb2, 0x6c, 0xf1, 0x07, 0x40, 0x4e, 0x30, 0xce, 0x1e, 0x9d, 0x94, 0x57, 0x66, 0x5f, 0xde, + 0x5a, 0xc7, 0x59, 0x67, 0xa9, 0x49, 0x4f, 0x71, 0x71, 0x09, 0x00, 0x59, 0x4d, 0x5d, 0x79, 0x59, + 0x67, 0x7f, 0xad, 0x27, 0xeb, 0x34, 0x8e, 0xaf, 0xa7, 0xae, 0x76, 0x33, 0x75, 0xb5, 0xdb, 0xa9, + 0xab, 0xdd, 0x4f, 0x5d, 0xf0, 0x7c, 0xe6, 0x82, 0x97, 0x33, 0x17, 0x5c, 0xcf, 0x5c, 0x70, 0x33, + 0x73, 0xc1, 0xfb, 0x99, 0x0b, 0x3e, 0xcc, 0x5c, 0xed, 0x7e, 0xe6, 0x82, 0x17, 0x77, 0xae, 0x76, + 0x73, 0xe7, 0x6a, 0xb7, 0x77, 0xae, 0xf6, 0x6f, 0x9e, 0x71, 0x10, 0xa3, 0x76, 0xdb, 0x14, 0xa4, + 0x7e, 0xfc, 0x18, 0x00, 0x00, 0xff, 0xff, 0xb4, 0xb0, 0xa8, 0x21, 0xb5, 0x06, 0x00, 0x00, } func (this *SeriesRequest) Equal(that interface{}) bool { @@ -2079,7 +2079,7 @@ func (this *SeriesResponse_StreamingSeriesChunks) String() string { return "nil" } s := strings.Join([]string{`&SeriesResponse_StreamingSeriesChunks{`, - `StreamingSeriesChunks:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeriesChunks), "StreamSeriesChunks", "StreamSeriesChunks", 1) + `,`, + `StreamingSeriesChunks:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeriesChunks), "StreamSeriesChunksBatch", "StreamSeriesChunksBatch", 1) + `,`, `}`, }, "") return s @@ -2655,7 +2655,7 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &StreamSeriesChunks{} + v := &StreamSeriesChunksBatch{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index 688f013c340..f2239a8a11f 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -111,7 +111,7 @@ message SeriesResponse { StreamSeriesBatch streaming_series = 5; - StreamSeriesChunks streaming_series_chunks = 6; + StreamSeriesChunksBatch streaming_series_chunks = 6; } } diff --git a/pkg/storegateway/storepb/types.pb.go b/pkg/storegateway/storepb/types.pb.go index 93491a92a81..a7f65a4df9e 100644 --- a/pkg/storegateway/storepb/types.pb.go +++ b/pkg/storegateway/storepb/types.pb.go @@ -76,7 +76,7 @@ var LabelMatcher_Type_value = map[string]int32{ } func (LabelMatcher_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6, 0} + return fileDescriptor_d938547f84707355, []int{7, 0} } type Chunk struct { @@ -263,6 +263,42 @@ func (m *StreamSeriesChunks) XXX_DiscardUnknown() { var xxx_messageInfo_StreamSeriesChunks proto.InternalMessageInfo +type StreamSeriesChunksBatch struct { + Chunks []*StreamSeriesChunks `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` +} + +func (m *StreamSeriesChunksBatch) Reset() { *m = StreamSeriesChunksBatch{} } +func (*StreamSeriesChunksBatch) ProtoMessage() {} +func (*StreamSeriesChunksBatch) Descriptor() ([]byte, []int) { + return fileDescriptor_d938547f84707355, []int{5} +} +func (m *StreamSeriesChunksBatch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StreamSeriesChunksBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StreamSeriesChunksBatch.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StreamSeriesChunksBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamSeriesChunksBatch.Merge(m, src) +} +func (m *StreamSeriesChunksBatch) XXX_Size() int { + return m.Size() +} +func (m *StreamSeriesChunksBatch) XXX_DiscardUnknown() { + xxx_messageInfo_StreamSeriesChunksBatch.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamSeriesChunksBatch proto.InternalMessageInfo + type AggrChunk struct { MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` MaxTime int64 `protobuf:"varint,2,opt,name=max_time,json=maxTime,proto3" json:"max_time,omitempty"` @@ -272,7 +308,7 @@ type AggrChunk struct { func (m *AggrChunk) Reset() { *m = AggrChunk{} } func (*AggrChunk) ProtoMessage() {} func (*AggrChunk) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{5} + return fileDescriptor_d938547f84707355, []int{6} } func (m *AggrChunk) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -311,7 +347,7 @@ type LabelMatcher struct { func (m *LabelMatcher) Reset() { *m = LabelMatcher{} } func (*LabelMatcher) ProtoMessage() {} func (*LabelMatcher) Descriptor() ([]byte, []int) { - return fileDescriptor_d938547f84707355, []int{6} + return fileDescriptor_d938547f84707355, []int{7} } func (m *LabelMatcher) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -348,6 +384,7 @@ func init() { proto.RegisterType((*StreamingSeries)(nil), "thanos.StreamingSeries") proto.RegisterType((*StreamSeriesBatch)(nil), "thanos.StreamSeriesBatch") proto.RegisterType((*StreamSeriesChunks)(nil), "thanos.StreamSeriesChunks") + proto.RegisterType((*StreamSeriesChunksBatch)(nil), "thanos.StreamSeriesChunksBatch") proto.RegisterType((*AggrChunk)(nil), "thanos.AggrChunk") proto.RegisterType((*LabelMatcher)(nil), "thanos.LabelMatcher") } @@ -355,49 +392,50 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 668 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0xf6, 0x24, 0x4e, 0xe2, 0x4c, 0xda, 0x5b, 0x77, 0x5a, 0xdd, 0xa6, 0x5d, 0x4c, 0x73, 0xbd, - 0x8a, 0xae, 0x54, 0x07, 0x4a, 0x37, 0x48, 0x6c, 0x1a, 0x14, 0x54, 0x22, 0xa0, 0xed, 0xb4, 0x48, - 0x08, 0x21, 0x45, 0xe3, 0x64, 0xe2, 0x8c, 0x1a, 0xff, 0xc8, 0x9e, 0x40, 0xc2, 0xaa, 0x8f, 0xc0, - 0x2b, 0xb0, 0xe3, 0x45, 0x90, 0xba, 0xa3, 0xcb, 0x8a, 0x45, 0x45, 0xdc, 0x0d, 0xcb, 0x3e, 0x02, - 0xf2, 0x8c, 0x03, 0x29, 0xdd, 0x94, 0x0d, 0xab, 0xcc, 0x39, 0xdf, 0x77, 0xce, 0xf7, 0xcd, 0xc9, - 0x1c, 0xc3, 0x8a, 0x98, 0x84, 0x2c, 0xb6, 0xc3, 0x28, 0x10, 0x01, 0x2a, 0x8a, 0x01, 0xf5, 0x83, - 0x78, 0x63, 0xcb, 0xe5, 0x62, 0x30, 0x72, 0xec, 0x6e, 0xe0, 0x35, 0xdc, 0xc0, 0x0d, 0x1a, 0x12, - 0x76, 0x46, 0x7d, 0x19, 0xc9, 0x40, 0x9e, 0x54, 0xd9, 0xc6, 0xbd, 0x79, 0x7a, 0x44, 0xfb, 0xd4, - 0xa7, 0x0d, 0x8f, 0x7b, 0x3c, 0x6a, 0x84, 0x27, 0xae, 0x3a, 0x85, 0x8e, 0xfa, 0x55, 0x15, 0xd6, - 0x17, 0x00, 0x0b, 0x8f, 0x07, 0x23, 0xff, 0x04, 0xfd, 0x0f, 0xf5, 0xd4, 0x41, 0x15, 0xd4, 0x40, - 0xfd, 0x9f, 0xed, 0x7f, 0x6d, 0xe5, 0xc0, 0x96, 0xa0, 0xdd, 0xf2, 0xbb, 0x41, 0x8f, 0xfb, 0x2e, - 0x91, 0x1c, 0x74, 0x00, 0xf5, 0x1e, 0x15, 0xb4, 0x9a, 0xab, 0x81, 0xfa, 0x42, 0xf3, 0xd1, 0xd9, - 0xe5, 0xa6, 0xf6, 0xf5, 0x72, 0x73, 0xe7, 0x2e, 0xea, 0xf6, 0x4b, 0x3f, 0xa6, 0x7d, 0xd6, 0x9c, - 0x08, 0x76, 0x34, 0xe4, 0x5d, 0x46, 0x64, 0x27, 0x6b, 0x0f, 0x1a, 0x33, 0x0d, 0xb4, 0x08, 0xcb, - 0x52, 0xb5, 0xf3, 0x6a, 0x9f, 0x98, 0x1a, 0x5a, 0x81, 0x4b, 0x2a, 0xdc, 0xe3, 0xb1, 0x08, 0xdc, - 0x88, 0x7a, 0x26, 0x40, 0x55, 0xb8, 0xaa, 0x92, 0x4f, 0x86, 0x01, 0x15, 0xbf, 0x90, 0x9c, 0xf5, - 0x11, 0xc0, 0xe2, 0x11, 0x8b, 0x38, 0x8b, 0x51, 0x1f, 0x16, 0x87, 0xd4, 0x61, 0xc3, 0xb8, 0x0a, - 0x6a, 0xf9, 0x7a, 0x65, 0x7b, 0xc5, 0xee, 0x06, 0x91, 0x60, 0xe3, 0xd0, 0xb1, 0x9f, 0xa5, 0xf9, - 0x03, 0xca, 0xa3, 0xe6, 0xc3, 0xcc, 0xfd, 0xfd, 0x3b, 0xb9, 0x97, 0x75, 0xbb, 0x3d, 0x1a, 0x0a, - 0x16, 0x91, 0xac, 0x3b, 0x6a, 0xc0, 0x62, 0x37, 0x35, 0x13, 0x57, 0x73, 0x52, 0x67, 0x79, 0x36, - 0xbc, 0x5d, 0xd7, 0x8d, 0xa4, 0xcd, 0xa6, 0x9e, 0xaa, 0x90, 0x8c, 0x66, 0x4d, 0xe0, 0xd2, 0x91, - 0x88, 0x18, 0xf5, 0xb8, 0xef, 0xfe, 0x5d, 0xaf, 0xd6, 0x7b, 0xb8, 0xac, 0xa4, 0x95, 0x6e, 0x93, - 0x8a, 0xee, 0x20, 0xbd, 0x40, 0x2c, 0xc3, 0x4c, 0x7c, 0x6d, 0x76, 0x81, 0xdf, 0x5c, 0x92, 0x8c, - 0x86, 0x76, 0xe0, 0x1a, 0x8f, 0x3b, 0xcc, 0xef, 0x75, 0x82, 0x7e, 0x47, 0xe5, 0x3a, 0xb1, 0xe4, - 0xca, 0x37, 0x61, 0x90, 0x15, 0x1e, 0xb7, 0xfc, 0xde, 0x7e, 0x5f, 0xd5, 0xa9, 0x36, 0xd6, 0x00, - 0xa2, 0x79, 0x6d, 0x39, 0x99, 0x18, 0xfd, 0x07, 0x17, 0xb2, 0x0e, 0xdc, 0xef, 0xb1, 0xb1, 0x7c, - 0x80, 0x3a, 0xa9, 0xa8, 0xdc, 0xd3, 0x34, 0xf5, 0xe7, 0x03, 0x3e, 0x05, 0xb0, 0xfc, 0x13, 0x43, - 0xeb, 0xd0, 0xf0, 0xb8, 0xdf, 0x11, 0xdc, 0x53, 0xcf, 0x3b, 0x4f, 0x4a, 0x1e, 0xf7, 0x8f, 0xb9, - 0xc7, 0x24, 0x44, 0xc7, 0x0a, 0xca, 0x65, 0x10, 0x1d, 0x4b, 0x68, 0x13, 0xe6, 0x23, 0xfa, 0xae, - 0x9a, 0xaf, 0x81, 0x7a, 0x65, 0x7b, 0xf1, 0xc6, 0x3e, 0x90, 0x14, 0x69, 0xeb, 0x86, 0x6e, 0x16, - 0xda, 0xba, 0x51, 0x30, 0x8b, 0x6d, 0xdd, 0x28, 0x9a, 0xa5, 0xb6, 0x6e, 0x94, 0x4c, 0xa3, 0xad, - 0x1b, 0x86, 0x59, 0xb6, 0x3e, 0x03, 0xb8, 0x20, 0xff, 0x81, 0xe7, 0xe9, 0x88, 0x59, 0x84, 0xb6, - 0x6e, 0x2c, 0xd8, 0xfa, 0xac, 0xe1, 0x3c, 0xc7, 0x3e, 0x9e, 0x84, 0x2c, 0xdb, 0x31, 0x04, 0x75, - 0x9f, 0x66, 0xae, 0xca, 0x44, 0x9e, 0xd1, 0x2a, 0x2c, 0xbc, 0xa5, 0xc3, 0x11, 0x93, 0xa6, 0xca, - 0x44, 0x05, 0xd6, 0x1b, 0xa8, 0xa7, 0x75, 0xe9, 0xa2, 0xcc, 0x37, 0xeb, 0xb4, 0x0e, 0x4d, 0x0d, - 0xad, 0x42, 0xf3, 0x46, 0xf2, 0x45, 0xeb, 0xd0, 0x04, 0xb7, 0xa8, 0xa4, 0x65, 0xe6, 0x6e, 0x53, - 0x49, 0xcb, 0xcc, 0x37, 0x77, 0xcf, 0xa6, 0x58, 0x3b, 0x9f, 0x62, 0xed, 0x62, 0x8a, 0xb5, 0xeb, - 0x29, 0x06, 0xa7, 0x09, 0x06, 0x9f, 0x12, 0x0c, 0xce, 0x12, 0x0c, 0xce, 0x13, 0x0c, 0xbe, 0x25, - 0x18, 0x7c, 0x4f, 0xb0, 0x76, 0x9d, 0x60, 0xf0, 0xe1, 0x0a, 0x6b, 0xe7, 0x57, 0x58, 0xbb, 0xb8, - 0xc2, 0xda, 0xeb, 0x52, 0x2c, 0x82, 0x88, 0x85, 0x8e, 0x53, 0x94, 0xdf, 0x9a, 0x07, 0x3f, 0x02, - 0x00, 0x00, 0xff, 0xff, 0xb3, 0x55, 0x72, 0x7c, 0xe3, 0x04, 0x00, 0x00, + // 685 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4f, 0x6f, 0x12, 0x41, + 0x14, 0xdf, 0x81, 0x05, 0x96, 0xa1, 0xb5, 0xdb, 0x69, 0x63, 0x69, 0x0f, 0x53, 0xdc, 0x13, 0x31, + 0xe9, 0xa2, 0xd8, 0x8b, 0x89, 0x97, 0x62, 0x30, 0x95, 0x58, 0xdb, 0x4e, 0x6b, 0x62, 0x8c, 0x09, + 0x19, 0x60, 0x58, 0x26, 0x65, 0xff, 0x64, 0x77, 0x50, 0xf0, 0xd4, 0x8f, 0xe0, 0x57, 0xf0, 0xe6, + 0x17, 0x31, 0xe9, 0xcd, 0x1e, 0x1b, 0x0f, 0x8d, 0xd0, 0x8b, 0xc7, 0x7e, 0x04, 0xb3, 0x33, 0x4b, + 0x05, 0x7b, 0xa9, 0x17, 0x4f, 0xcc, 0x7b, 0xef, 0xf7, 0x7e, 0xbf, 0xdf, 0x1b, 0xe6, 0x2d, 0x2c, + 0x88, 0x51, 0xc0, 0x22, 0x3b, 0x08, 0x7d, 0xe1, 0xa3, 0xac, 0xe8, 0x51, 0xcf, 0x8f, 0x36, 0xb6, + 0x1c, 0x2e, 0x7a, 0x83, 0x96, 0xdd, 0xf6, 0xdd, 0x8a, 0xe3, 0x3b, 0x7e, 0x45, 0x96, 0x5b, 0x83, + 0xae, 0x8c, 0x64, 0x20, 0x4f, 0xaa, 0x6d, 0xe3, 0xd1, 0x2c, 0x3c, 0xa4, 0x5d, 0xea, 0xd1, 0x8a, + 0xcb, 0x5d, 0x1e, 0x56, 0x82, 0x13, 0x47, 0x9d, 0x82, 0x96, 0xfa, 0x55, 0x1d, 0xd6, 0x77, 0x00, + 0x33, 0xcf, 0x7b, 0x03, 0xef, 0x04, 0x3d, 0x84, 0x7a, 0xec, 0xa0, 0x08, 0x4a, 0xa0, 0x7c, 0xaf, + 0x7a, 0xdf, 0x56, 0x0e, 0x6c, 0x59, 0xb4, 0xeb, 0x5e, 0xdb, 0xef, 0x70, 0xcf, 0x21, 0x12, 0x83, + 0x0e, 0xa0, 0xde, 0xa1, 0x82, 0x16, 0x53, 0x25, 0x50, 0x5e, 0xa8, 0x3d, 0x3b, 0xbb, 0xdc, 0xd4, + 0x7e, 0x5c, 0x6e, 0x6e, 0xdf, 0x45, 0xdd, 0x7e, 0xe3, 0x45, 0xb4, 0xcb, 0x6a, 0x23, 0xc1, 0x8e, + 0xfa, 0xbc, 0xcd, 0x88, 0x64, 0xb2, 0x76, 0xa1, 0x31, 0xd5, 0x40, 0x8b, 0x30, 0x2f, 0x55, 0x9b, + 0x6f, 0xf7, 0x89, 0xa9, 0xa1, 0x15, 0xb8, 0xa4, 0xc2, 0x5d, 0x1e, 0x09, 0xdf, 0x09, 0xa9, 0x6b, + 0x02, 0x54, 0x84, 0xab, 0x2a, 0xf9, 0xa2, 0xef, 0x53, 0xf1, 0xa7, 0x92, 0xb2, 0xbe, 0x00, 0x98, + 0x3d, 0x62, 0x21, 0x67, 0x11, 0xea, 0xc2, 0x6c, 0x9f, 0xb6, 0x58, 0x3f, 0x2a, 0x82, 0x52, 0xba, + 0x5c, 0xa8, 0xae, 0xd8, 0x6d, 0x3f, 0x14, 0x6c, 0x18, 0xb4, 0xec, 0x57, 0x71, 0xfe, 0x80, 0xf2, + 0xb0, 0xf6, 0x34, 0x71, 0xff, 0xf8, 0x4e, 0xee, 0x65, 0xdf, 0x4e, 0x87, 0x06, 0x82, 0x85, 0x24, + 0x61, 0x47, 0x15, 0x98, 0x6d, 0xc7, 0x66, 0xa2, 0x62, 0x4a, 0xea, 0x2c, 0x4f, 0x2f, 0x6f, 0xc7, + 0x71, 0x42, 0x69, 0xb3, 0xa6, 0xc7, 0x2a, 0x24, 0x81, 0x59, 0x23, 0xb8, 0x74, 0x24, 0x42, 0x46, + 0x5d, 0xee, 0x39, 0xff, 0xd7, 0xab, 0xf5, 0x09, 0x2e, 0x2b, 0x69, 0xa5, 0x5b, 0xa3, 0xa2, 0xdd, + 0x8b, 0x07, 0x88, 0x64, 0x98, 0x88, 0xaf, 0x4d, 0x07, 0xf8, 0xcb, 0x25, 0x49, 0x60, 0x68, 0x1b, + 0xae, 0xf1, 0xa8, 0xc9, 0xbc, 0x4e, 0xd3, 0xef, 0x36, 0x55, 0xae, 0x19, 0x49, 0xac, 0x7c, 0x13, + 0x06, 0x59, 0xe1, 0x51, 0xdd, 0xeb, 0xec, 0x77, 0x55, 0x9f, 0xa2, 0xb1, 0x7a, 0x10, 0xcd, 0x6a, + 0xcb, 0x9b, 0x89, 0xd0, 0x03, 0xb8, 0x90, 0x30, 0x70, 0xaf, 0xc3, 0x86, 0xf2, 0x01, 0xea, 0xa4, + 0xa0, 0x72, 0x2f, 0xe3, 0xd4, 0xbf, 0x5f, 0xf0, 0x1e, 0x5c, 0xbb, 0xad, 0xa4, 0x66, 0xad, 0xde, + 0x70, 0xa9, 0x59, 0x37, 0xe6, 0x67, 0x9d, 0x6d, 0xb8, 0xa1, 0x3b, 0x05, 0x30, 0x7f, 0x23, 0x85, + 0xd6, 0xa1, 0xe1, 0x72, 0xaf, 0x29, 0xb8, 0xab, 0xb6, 0x25, 0x4d, 0x72, 0x2e, 0xf7, 0x8e, 0xb9, + 0xcb, 0x64, 0x89, 0x0e, 0x55, 0x29, 0x95, 0x94, 0xe8, 0x50, 0x96, 0x36, 0x61, 0x3a, 0xa4, 0x1f, + 0x8b, 0xe9, 0x12, 0x28, 0x17, 0xaa, 0x8b, 0x73, 0xeb, 0x45, 0xe2, 0x4a, 0x43, 0x37, 0x74, 0x33, + 0xd3, 0xd0, 0x8d, 0x8c, 0x99, 0x6d, 0xe8, 0x46, 0xd6, 0xcc, 0x35, 0x74, 0x23, 0x67, 0x1a, 0x0d, + 0xdd, 0x30, 0xcc, 0xbc, 0xf5, 0x0d, 0xc0, 0x05, 0xf9, 0x87, 0xee, 0xc5, 0x53, 0xb0, 0x10, 0x6d, + 0xcd, 0xed, 0xeb, 0xfa, 0x94, 0x70, 0x16, 0x63, 0x1f, 0x8f, 0x02, 0x96, 0xac, 0x2c, 0x82, 0xba, + 0x47, 0x13, 0x57, 0x79, 0x22, 0xcf, 0x68, 0x15, 0x66, 0x3e, 0xd0, 0xfe, 0x80, 0x49, 0x53, 0x79, + 0xa2, 0x02, 0xeb, 0x3d, 0xd4, 0xe3, 0xbe, 0x78, 0xef, 0x66, 0xc9, 0x9a, 0xf5, 0x43, 0x53, 0x43, + 0xab, 0xd0, 0x9c, 0x4b, 0xbe, 0xae, 0x1f, 0x9a, 0xe0, 0x16, 0x94, 0xd4, 0xcd, 0xd4, 0x6d, 0x28, + 0xa9, 0x9b, 0xe9, 0xda, 0xce, 0xd9, 0x18, 0x6b, 0xe7, 0x63, 0xac, 0x5d, 0x8c, 0xb1, 0x76, 0x3d, + 0xc6, 0xe0, 0x74, 0x82, 0xc1, 0xd7, 0x09, 0x06, 0x67, 0x13, 0x0c, 0xce, 0x27, 0x18, 0xfc, 0x9c, + 0x60, 0xf0, 0x6b, 0x82, 0xb5, 0xeb, 0x09, 0x06, 0x9f, 0xaf, 0xb0, 0x76, 0x7e, 0x85, 0xb5, 0x8b, + 0x2b, 0xac, 0xbd, 0xcb, 0x45, 0xc2, 0x0f, 0x59, 0xd0, 0x6a, 0x65, 0xe5, 0xa7, 0xeb, 0xc9, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xdd, 0x36, 0x74, 0x32, 0x05, 0x00, 0x00, } func (x Chunk_Encoding) String() string { @@ -571,6 +609,35 @@ func (this *StreamSeriesChunks) Equal(that interface{}) bool { } return true } +func (this *StreamSeriesChunksBatch) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StreamSeriesChunksBatch) + if !ok { + that2, ok := that.(StreamSeriesChunksBatch) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Chunks) != len(that1.Chunks) { + return false + } + for i := range this.Chunks { + if !this.Chunks[i].Equal(that1.Chunks[i]) { + return false + } + } + return true +} func (this *AggrChunk) Equal(that interface{}) bool { if that == nil { return this == nil @@ -699,6 +766,18 @@ func (this *StreamSeriesChunks) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *StreamSeriesChunksBatch) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&storepb.StreamSeriesChunksBatch{") + if this.Chunks != nil { + s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *AggrChunk) GoString() string { if this == nil { return "nil" @@ -948,6 +1027,43 @@ func (m *StreamSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *StreamSeriesChunksBatch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StreamSeriesChunksBatch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StreamSeriesChunksBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Chunks) > 0 { + for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + func (m *AggrChunk) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -1132,6 +1248,21 @@ func (m *StreamSeriesChunks) Size() (n int) { return n } +func (m *StreamSeriesChunksBatch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Chunks) > 0 { + for _, e := range m.Chunks { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + return n +} + func (m *AggrChunk) Size() (n int) { if m == nil { return 0 @@ -1246,6 +1377,21 @@ func (this *StreamSeriesChunks) String() string { }, "") return s } +func (this *StreamSeriesChunksBatch) String() string { + if this == nil { + return "nil" + } + repeatedStringForChunks := "[]*StreamSeriesChunks{" + for _, f := range this.Chunks { + repeatedStringForChunks += strings.Replace(f.String(), "StreamSeriesChunks", "StreamSeriesChunks", 1) + "," + } + repeatedStringForChunks += "}" + s := strings.Join([]string{`&StreamSeriesChunksBatch{`, + `Chunks:` + repeatedStringForChunks + `,`, + `}`, + }, "") + return s +} func (this *AggrChunk) String() string { if this == nil { return "nil" @@ -1804,6 +1950,93 @@ func (m *StreamSeriesChunks) Unmarshal(dAtA []byte) error { } return nil } +func (m *StreamSeriesChunksBatch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StreamSeriesChunksBatch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StreamSeriesChunksBatch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunks = append(m.Chunks, &StreamSeriesChunks{}) + if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *AggrChunk) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index 3bd1d76ec6a..7f8c1ba3d69 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -51,6 +51,11 @@ message StreamSeriesChunks { repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; } +message StreamSeriesChunksBatch { + repeated StreamSeriesChunks chunks = 1; +} + + message AggrChunk { int64 min_time = 1; int64 max_time = 2; From 725f759632faf649c23dd312be21a025881b14a7 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 13 Jun 2023 18:07:36 +0530 Subject: [PATCH 23/75] lint Signed-off-by: Ganesh Vernekar --- pkg/querier/block_streaming_test.go | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/pkg/querier/block_streaming_test.go b/pkg/querier/block_streaming_test.go index 97ff63586bc..53848679912 100644 --- a/pkg/querier/block_streaming_test.go +++ b/pkg/querier/block_streaming_test.go @@ -4,13 +4,15 @@ package querier import ( "fmt" - "github.com/grafana/mimir/pkg/mimirpb" - "github.com/grafana/mimir/pkg/storegateway/storepb" + "math" + "testing" + "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" - "math" - "testing" + + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/storegateway/storepb" ) func TestBlockStreamingQuerierSeriesSet(t *testing.T) { From 391a8a63647e83c8854e4674a4805fab4484abf7 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 13 Jun 2023 18:25:54 +0530 Subject: [PATCH 24/75] goroutine leak Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_querable_streaming.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/blocks_store_querable_streaming.go b/pkg/querier/blocks_store_querable_streaming.go index 11871c66cca..a86d798342a 100644 --- a/pkg/querier/blocks_store_querable_streaming.go +++ b/pkg/querier/blocks_store_querable_streaming.go @@ -70,7 +70,7 @@ func (s *SeriesChunksStreamReader) Close() { // If an error occurs while streaming, a subsequent call to GetChunks will return an error. // To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. func (s *SeriesChunksStreamReader) StartBuffering() { - s.seriesCunksChan = make(chan *storepb.StreamSeriesChunksBatch, 1) + s.seriesCunksChan = make(chan *storepb.StreamSeriesChunksBatch, 10) // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. s.errorChan = make(chan error, 1) From 3d11540ef6a7d31408a4636590bded9ac5747baa Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 14 Jun 2023 14:28:22 +0530 Subject: [PATCH 25/75] Fix most of Charles' comments Signed-off-by: Ganesh Vernekar --- pkg/querier/block.go | 33 +++-- pkg/querier/block_streaming.go | 28 ++--- pkg/querier/block_streaming_test.go | 55 +++++++-- pkg/querier/block_test.go | 2 +- pkg/querier/blocks_store_queryable.go | 12 +- pkg/querier/blocks_store_queryable_test.go | 6 +- pkg/querier/querier_test.go | 2 +- pkg/storegateway/bucket.go | 68 +++++------ pkg/storegateway/bucket_store_server_test.go | 2 +- .../bucket_streaming_readers.go} | 41 +++---- pkg/storegateway/limiter.go | 6 +- pkg/storegateway/series_refs_test.go | 8 +- pkg/storegateway/storepb/types.pb.go | 114 +++++++++--------- pkg/storegateway/storepb/types.proto | 2 +- 14 files changed, 207 insertions(+), 172 deletions(-) rename pkg/{querier/blocks_store_querable_streaming.go => storegateway/bucket_streaming_readers.go} (84%) diff --git a/pkg/querier/block.go b/pkg/querier/block.go index 01d28ac232f..d7259f9793e 100644 --- a/pkg/querier/block.go +++ b/pkg/querier/block.go @@ -108,13 +108,13 @@ func (bqs *blockQuerierSeries) Labels() labels.Labels { return bqs.labels } -func (bqs *blockQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { +func (bqs *blockQuerierSeries) Iterator(reuse chunkenc.Iterator) chunkenc.Iterator { if len(bqs.chunks) == 0 { // should not happen in practice, but we have a unit test for it return series.NewErrIterator(errors.New("no chunks")) } - it, err := newBlockQuerierSeriesIterator(bqs.Labels(), bqs.chunks) + it, err := newBlockQuerierSeriesIterator(reuse, bqs.Labels(), bqs.chunks) if err != nil { return series.NewErrIterator(err) } @@ -122,10 +122,23 @@ func (bqs *blockQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { return it } -func newBlockQuerierSeriesIterator(labels labels.Labels, chunks []storepb.AggrChunk) (*blockQuerierSeriesIterator, error) { - its := make([]iteratorWithMaxTime, 0, len(chunks)) +func newBlockQuerierSeriesIterator(reuse chunkenc.Iterator, lbls labels.Labels, chunks []storepb.AggrChunk) (*blockQuerierSeriesIterator, error) { + var it *blockQuerierSeriesIterator + r, ok := reuse.(*blockQuerierSeriesIterator) + if ok { + it = r + it.i = 0 + } else { + it = &blockQuerierSeriesIterator{} + } + if cap(it.iterators) < len(chunks) { + it.iterators = make([]iteratorWithMaxTime, len(chunks)) + } + it.iterators = it.iterators[:len(chunks)] + it.labels = lbls + it.lastT = math.MinInt64 - for _, c := range chunks { + for i, c := range chunks { var ( ch chunkenc.Chunk err error @@ -138,18 +151,18 @@ func newBlockQuerierSeriesIterator(labels labels.Labels, chunks []storepb.AggrCh case storepb.Chunk_FloatHistogram: ch, err = chunkenc.FromData(chunkenc.EncFloatHistogram, c.Raw.Data) default: - return nil, errors.Wrapf(err, "failed to initialize chunk from unknown type (%v) encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, labels, c.MinTime, c.MaxTime) + return nil, errors.Wrapf(err, "failed to initialize chunk from unknown type (%v) encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, lbls, c.MinTime, c.MaxTime) } if err != nil { - return nil, errors.Wrapf(err, "failed to initialize chunk from %v type encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, labels, c.MinTime, c.MaxTime) + return nil, errors.Wrapf(err, "failed to initialize chunk from %v type encoded raw data (series: %v min time: %d max time: %d)", c.Raw.Type, lbls, c.MinTime, c.MaxTime) } - it := ch.Iterator(nil) - its = append(its, iteratorWithMaxTime{it, c.MaxTime}) + it.iterators[i].Iterator = ch.Iterator(it.iterators[i].Iterator) + it.iterators[i].maxT = c.MaxTime } - return &blockQuerierSeriesIterator{labels: labels, iterators: its, lastT: math.MinInt64}, nil + return it, nil } // iteratorWithMaxTime is an iterator which is aware of the maxT of its embedded iterator. diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index 7af36ca22b6..ed53e9e3970 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -18,37 +18,37 @@ import ( // Implementation of storage.SeriesSet, based on individual responses from store client. type blockStreamingQuerierSeriesSet struct { series []*storepb.StreamingSeries - streamReader chunkStreamer + streamReader chunkStreamReader // next response to process - next int + nextSeriesIndex int currSeries storage.Series } -type chunkStreamer interface { +type chunkStreamReader interface { GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) } func (bqss *blockStreamingQuerierSeriesSet) Next() bool { bqss.currSeries = nil - if bqss.next >= len(bqss.series) { + if bqss.nextSeriesIndex >= len(bqss.series) { return false } - currLabels := mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels) - seriesIdxStart := bqss.next // First series in this group. We might merge with more below. - bqss.next++ + currLabels := mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.nextSeriesIndex].Labels) + seriesIdxStart := bqss.nextSeriesIndex // First series in this group. We might merge with more below. + bqss.nextSeriesIndex++ // Chunks may come in multiple responses, but as soon as the response has chunks for a new series, // we can stop searching. Series are sorted. See documentation for StoreClient.Series call for details. // The actually merging of chunks happens in the Iterator() call where chunks are fetched. - for bqss.next < len(bqss.series) && labels.Compare(currLabels, mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.next].Labels)) == 0 { - bqss.next++ + for bqss.nextSeriesIndex < len(bqss.series) && labels.Compare(currLabels, mimirpb.FromLabelAdaptersToLabels(bqss.series[bqss.nextSeriesIndex].Labels)) == 0 { + bqss.nextSeriesIndex++ } - bqss.currSeries = newBlockStreamingQuerierSeries(currLabels, seriesIdxStart, bqss.next-1, bqss.streamReader) + bqss.currSeries = newBlockStreamingQuerierSeries(currLabels, seriesIdxStart, bqss.nextSeriesIndex-1, bqss.streamReader) return true } @@ -65,7 +65,7 @@ func (bqss *blockStreamingQuerierSeriesSet) Warnings() storage.Warnings { } // newBlockStreamingQuerierSeries makes a new blockQuerierSeries. Input labels must be already sorted by name. -func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesIdxEnd int, streamReader chunkStreamer) *blockStreamingQuerierSeries { +func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesIdxEnd int, streamReader chunkStreamReader) *blockStreamingQuerierSeries { return &blockStreamingQuerierSeries{ labels: lbls, seriesIdxStart: seriesIdxStart, @@ -77,14 +77,14 @@ func newBlockStreamingQuerierSeries(lbls labels.Labels, seriesIdxStart, seriesId type blockStreamingQuerierSeries struct { labels labels.Labels seriesIdxStart, seriesIdxEnd int - streamReader chunkStreamer + streamReader chunkStreamReader } func (bqs *blockStreamingQuerierSeries) Labels() labels.Labels { return bqs.labels } -func (bqs *blockStreamingQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.Iterator { +func (bqs *blockStreamingQuerierSeries) Iterator(reuse chunkenc.Iterator) chunkenc.Iterator { // Fetch the chunks from the stream. var allChunks []storepb.AggrChunk for i := bqs.seriesIdxStart; i <= bqs.seriesIdxEnd; i++ { @@ -103,7 +103,7 @@ func (bqs *blockStreamingQuerierSeries) Iterator(_ chunkenc.Iterator) chunkenc.I return allChunks[i].MinTime < allChunks[j].MinTime }) - it, err := newBlockQuerierSeriesIterator(bqs.Labels(), allChunks) + it, err := newBlockQuerierSeriesIterator(reuse, bqs.Labels(), allChunks) if err != nil { return series.NewErrIterator(err) } diff --git a/pkg/querier/block_streaming_test.go b/pkg/querier/block_streaming_test.go index 53848679912..a65015ffc95 100644 --- a/pkg/querier/block_streaming_test.go +++ b/pkg/querier/block_streaming_test.go @@ -16,10 +16,10 @@ import ( ) func TestBlockStreamingQuerierSeriesSet(t *testing.T) { - cases := map[string]struct { - input []testSeries - expResult []testSeries + input []testSeries + expResult []testSeries + errorChunkStreamer bool }{ "simple case of one series": { input: []testSeries{ @@ -121,21 +121,58 @@ func TestBlockStreamingQuerierSeriesSet(t *testing.T) { }, }, }, + "multiple unique series but with erroring chunk streamer": { + errorChunkStreamer: true, + input: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + values: []testSample{{1, 1}, {2, 1}, {5, 10}}, + }, + { + lbls: labels.FromStrings("foo", "bar2"), + values: []testSample{{2, 2}, {9, 2}}, + }, + { + lbls: labels.FromStrings("foo", "bar3"), + values: []testSample{{3, 3}}, + }, + }, + expResult: []testSeries{ + { + lbls: labels.FromStrings("foo", "bar1"), + }, + { + lbls: labels.FromStrings("foo", "bar2"), + }, + { + lbls: labels.FromStrings("foo", "bar3"), + }, + }, + }, } for name, c := range cases { t.Run(name, func(t *testing.T) { - ss := &blockStreamingQuerierSeriesSet{streamReader: &mockChunkStreamer{series: c.input}} + ss := &blockStreamingQuerierSeriesSet{streamReader: &mockChunkStreamer{series: c.input, causeError: c.errorChunkStreamer}} for _, s := range c.input { ss.series = append(ss.series, &storepb.StreamingSeries{ Labels: mimirpb.FromLabelsToLabelAdapters(s.lbls), }) } idx := 0 + var it chunkenc.Iterator for ss.Next() { s := ss.At() require.Equal(t, c.expResult[idx].lbls, s.Labels()) - it := s.Iterator(nil) + it = s.Iterator(it) + if c.errorChunkStreamer { + require.Error(t, it.Err()) + idx++ + // If chunk streamer errors out, we still go through every + // series but we don't get any samples. So we continue here + // and check all the series. + continue + } var actSamples []testSample for it.Next() != chunkenc.ValNone { ts, val := it.At() @@ -162,11 +199,15 @@ type testSample struct { } type mockChunkStreamer struct { - series []testSeries - next int + series []testSeries + next int + causeError bool } func (m *mockChunkStreamer) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { + if m.causeError { + return nil, fmt.Errorf("mocked error") + } if m.next >= len(m.series) { return nil, fmt.Errorf("out of chunks") } diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go index 3b640c669c1..97d66ce8bb6 100644 --- a/pkg/querier/block_test.go +++ b/pkg/querier/block_test.go @@ -231,7 +231,7 @@ func TestBlockQuerierSeriesSet(t *testing.T) { { Labels: mkZLabels("__name__", "overlapping2"), Chunks: []storepb.AggrChunk{ - // entire range overlaps with the next chunk, so this chunks contributes 0 samples (it will be sorted as second) + // entire range overlaps with the nextSeriesIndex chunk, so this chunks contributes 0 samples (it will be sorted as second) createAggrChunkWithSineSamples(now.Add(3*time.Second), now.Add(7*time.Second-5*time.Millisecond), 5*time.Millisecond), }, }, diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 5d1d0996ea3..d1c432f06f8 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -583,7 +583,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg level.Debug(logger).Log("msg", "consistency check failed", "attempt", attempt, "missing blocks", strings.Join(convertULIDsToString(missingBlocks), " ")) - // The next attempt should just query the missing blocks. + // The nextSeriesIndex attempt should just query the missing blocks. remainingBlocks = missingBlocks } @@ -697,7 +697,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor spanLog = spanlogger.FromContext(ctx, q.logger) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) reqStats = stats.FromContext(ctx) - streamReaders []*SeriesChunksStreamReader + streamReaders []*storegateway.SeriesChunksStreamReader ) // Concurrently fetch series from all clients. @@ -813,7 +813,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } reqStats.AddFetchedIndexBytes(indexBytesFetched) - var streamReader *SeriesChunksStreamReader + var streamReader *storegateway.SeriesChunksStreamReader if len(mySeries) > 0 { chunksFetched, chunkBytes := countChunksAndBytes(mySeries...) @@ -830,11 +830,11 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) } else if len(myStreamingSeries) > 0 { - reqStats.AddFetchedSeries(uint64(len(mySeries))) - streamReader = NewSeriesChunksStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) + reqStats.AddFetchedSeries(uint64(len(myStreamingSeries))) + streamReader = storegateway.NewSeriesChunksStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) level.Debug(spanLog).Log("msg", "received streaming series from store-gateway", "instance", c.RemoteAddress(), - "fetched series", len(mySeries), + "fetched series", len(myStreamingSeries), "fetched index bytes", indexBytesFetched, "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 1a7da2ce69e..063bd15443a 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -821,9 +821,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { err := it.Err() if err != nil { assert.ErrorContains(t, err, testData.expectedErr.Error()) - // TODO: it is non-trivial to match the type here. The error - // gets wrapping multiple times. Is it necessary to return the exact type? - //assert.IsType(t, testData.expectedErr, err) + assert.ErrorIs(t, err, testData.expectedErr) foundErr = true break } @@ -2133,7 +2131,7 @@ func mockStreamingSeriesChunksResponse(index uint64, chks []storepb.AggrChunk) * return &storepb.SeriesResponse{ Result: &storepb.SeriesResponse_StreamingSeriesChunks{ StreamingSeriesChunks: &storepb.StreamSeriesChunksBatch{ - Chunks: []*storepb.StreamSeriesChunks{ + Series: []*storepb.StreamSeriesChunks{ { SeriesIndex: index, Chunks: chks, diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 97b0dda7887..6c15677c01e 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -438,7 +438,7 @@ func mockTSDB(t *testing.T, mint model.Time, samples int, step, chunkOffset time ts = ts.Add(step) if cnt%samplesPerChunk == 0 { - // Simulate next chunk, restart timestamp. + // Simulate nextSeriesIndex chunk, restart timestamp. chunkStartTs = chunkStartTs.Add(chunkOffset) ts = chunkStartTs } diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 590f98957b1..f0793802b88 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -119,9 +119,9 @@ type BucketStore struct { // Query gate which limits the maximum amount of concurrent queries. queryGate gate.Gate - // chunksLimiterFactory creates a new limiter used to limit the number of chunks fetched by each Series() call. + // chunksLimiterFactory creates a new mockLimiter used to limit the number of chunks fetched by each Series() call. chunksLimiterFactory ChunksLimiterFactory - // seriesLimiterFactory creates a new limiter used to limit the number of touched series by each Series() call, + // seriesLimiterFactory creates a new mockLimiter used to limit the number of touched series by each Series() call, // or LabelName and LabelValues calls when used with matchers. seriesLimiterFactory SeriesLimiterFactory partitioners blockPartitioners @@ -788,7 +788,7 @@ func (s *BucketStore) sendSeriesChunks( } } chunksBatch := &storepb.StreamSeriesChunksBatch{ - Chunks: chunksBuffer[:0], + Series: chunksBuffer[:0], } for seriesSet.Next() { // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle @@ -799,13 +799,13 @@ func (s *BucketStore) sendSeriesChunks( if streamingChunks { // We only need to stream chunks here because the series labels have already // been sent above. - chunksBatch.Chunks = chunksBatch.Chunks[:len(chunksBatch.Chunks)+1] - last := chunksBatch.Chunks[len(chunksBatch.Chunks)-1] + chunksBatch.Series = chunksBatch.Series[:len(chunksBatch.Series)+1] + last := chunksBatch.Series[len(chunksBatch.Series)-1] last.Chunks = chks last.SeriesIndex = uint64(seriesCount - 1) batchSizeBytes += last.Size() - if (batchSizeBytes > 0 && batchSizeBytes > queryStreamBatchMessageSize) || len(chunksBatch.Chunks) >= int(req.StreamingChunksBatchSize) { + if (batchSizeBytes > 0 && batchSizeBytes > queryStreamBatchMessageSize) || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { response = storepb.NewStreamSeriesChunksResponse(chunksBatch) } } else { @@ -824,24 +824,13 @@ func (s *BucketStore) sendSeriesChunks( } if response != nil { - // Encode the message. We encode it ourselves into a PreparedMsg in order to measure - // the time it takes. - encodeBegin := time.Now() - msg := &grpc.PreparedMsg{} - if err := msg.Encode(srv, response); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) - } - encodeDuration += time.Since(encodeBegin) - - // Send the message. - sendBegin := time.Now() - if err := srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + err := s.sendChunks(srv, response, &encodeDuration, &sendDuration) + if err != nil { + return err } - sendDuration += time.Since(sendBegin) if streamingChunks { - chunksBatch.Chunks = chunksBatch.Chunks[:0] + chunksBatch.Series = chunksBatch.Series[:0] batchSizeBytes = 0 } } @@ -850,22 +839,29 @@ func (s *BucketStore) sendSeriesChunks( return errors.Wrap(seriesSet.Err(), "expand series set") } - if streamingChunks && len(chunksBatch.Chunks) > 0 { + if streamingChunks && len(chunksBatch.Series) > 0 { // Still some chunks left to send. - encodeBegin := time.Now() - msg := &grpc.PreparedMsg{} - if err := msg.Encode(srv, storepb.NewStreamSeriesChunksResponse(chunksBatch)); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) - } - encodeDuration += time.Since(encodeBegin) + return s.sendChunks(srv, storepb.NewStreamSeriesChunksResponse(chunksBatch), &encodeDuration, &sendDuration) + } - // Send the message. - sendBegin := time.Now() - if err := srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) - } - sendDuration += time.Since(sendBegin) + return nil +} + +func (s *BucketStore) sendChunks(srv storepb.Store_SeriesServer, chunks interface{}, encodeDuration, sendDuration *time.Duration) error { + // We encode it ourselves into a PreparedMsg in order to measure the time it takes. + encodeBegin := time.Now() + msg := &grpc.PreparedMsg{} + if err := msg.Encode(srv, chunks); err != nil { + return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) + } + *encodeDuration += time.Since(encodeBegin) + + // Send the message. + sendBegin := time.Now() + if err := srv.SendMsg(msg); err != nil { + return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) } + *sendDuration += time.Since(sendBegin) return nil } @@ -904,8 +900,8 @@ func (s *BucketStore) streamingSeriesSetForBlocks( chunkReaders *bucketChunkReaders, shardSelector *sharding.ShardSelector, matchers []*labels.Matcher, - chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. - seriesLimiter SeriesLimiter, // Rate limiter for loading series. + chunksLimiter ChunksLimiter, // Rate mockLimiter for loading chunks. + seriesLimiter SeriesLimiter, // Rate mockLimiter for loading series. stats *safeQueryStats, reusePostings [][]storage.SeriesRef, // Used if not empty. reusePendingMatchers [][]*labels.Matcher, // Used if not empty. diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index 80b4424d338..6f9236cf86c 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -210,7 +210,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest } chksBatch := res.GetStreamingSeriesChunks() - for _, chks := range chksBatch.Chunks { + for _, chks := range chksBatch.Series { idx++ if chksBatch == nil { err = errors.Errorf("expected streaming chunks, got something else") diff --git a/pkg/querier/blocks_store_querable_streaming.go b/pkg/storegateway/bucket_streaming_readers.go similarity index 84% rename from pkg/querier/blocks_store_querable_streaming.go rename to pkg/storegateway/bucket_streaming_readers.go index a86d798342a..2073c1d8275 100644 --- a/pkg/querier/blocks_store_querable_streaming.go +++ b/pkg/storegateway/bucket_streaming_readers.go @@ -1,6 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-only -package querier +package storegateway import ( "fmt" @@ -10,7 +10,6 @@ import ( "github.com/go-kit/log/level" "github.com/pkg/errors" - "github.com/grafana/mimir/pkg/mimirpb" "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storegateway/storegatewaypb" "github.com/grafana/mimir/pkg/storegateway/storepb" @@ -18,19 +17,7 @@ import ( "github.com/grafana/mimir/pkg/util/validation" ) -// StreamingSeries holds the labels of the streaming series and the source to get the chunks -// for the series. -type StreamingSeries struct { - Labels []mimirpb.LabelAdapter - Source StreamingSeriesSource -} - -// StreamingSeriesSource holds the relationship between a stream of chunks from a SeriesChunksStreamReader -// and the expected position of a series' chunks in that stream. -type StreamingSeriesSource struct { - StreamReader *SeriesChunksStreamReader - SeriesIndex uint64 -} +// The code in this file is used by the queriers to read the streaming chunks from the storegateway. // SeriesChunksStreamReader is responsible for managing the streaming of chunks from a storegateway and buffering // chunks in memory until they are consumed by the PromQL engine. @@ -41,9 +28,9 @@ type SeriesChunksStreamReader struct { stats *stats.Stats log log.Logger - seriesCunksChan chan *storepb.StreamSeriesChunksBatch - chunksBatch []*storepb.StreamSeriesChunks - errorChan chan error + seriesChunksChan chan *storepb.StreamSeriesChunksBatch + chunksBatch []*storepb.StreamSeriesChunks + errorChan chan error } func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *SeriesChunksStreamReader { @@ -70,7 +57,7 @@ func (s *SeriesChunksStreamReader) Close() { // If an error occurs while streaming, a subsequent call to GetChunks will return an error. // To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. func (s *SeriesChunksStreamReader) StartBuffering() { - s.seriesCunksChan = make(chan *storepb.StreamSeriesChunksBatch, 10) + s.seriesChunksChan = make(chan *storepb.StreamSeriesChunksBatch, 2) // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. s.errorChan = make(chan error, 1) @@ -82,7 +69,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) } - close(s.seriesCunksChan) + close(s.seriesChunksChan) close(s.errorChan) }() @@ -114,13 +101,13 @@ func (s *SeriesChunksStreamReader) StartBuffering() { return } - if err := s.queryLimiter.AddChunks(len(c.Chunks)); err != nil { + if err := s.queryLimiter.AddChunks(len(c.Series)); err != nil { s.errorChan <- validation.LimitError(err.Error()) return } chunkBytes := 0 - for _, ch := range c.Chunks { + for _, ch := range c.Series { chunkBytes += ch.Size() } if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { @@ -128,7 +115,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { return } - s.stats.AddFetchedChunks(uint64(len(c.Chunks))) + s.stats.AddFetchedChunks(uint64(len(c.Series))) s.stats.AddFetchedChunkBytes(uint64(chunkBytes)) select { @@ -142,7 +129,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { // which is true at the time of writing. s.errorChan <- s.client.Context().Err() return - case s.seriesCunksChan <- c: + case s.seriesChunksChan <- c: // Batch enqueued successfully, nothing else to do for this batch. } } @@ -153,9 +140,9 @@ func (s *SeriesChunksStreamReader) StartBuffering() { // This method must be called with monotonically increasing values of seriesIndex. func (s *SeriesChunksStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { if len(s.chunksBatch) == 0 { - chks, haveChunks := <-s.seriesCunksChan + chks, channelOpen := <-s.seriesChunksChan - if !haveChunks { + if !channelOpen { // If there's an error, report it. select { case err, haveError := <-s.errorChan: @@ -171,7 +158,7 @@ func (s *SeriesChunksStreamReader) GetChunks(seriesIndex uint64) ([]storepb.Aggr return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has already been exhausted", seriesIndex) } - s.chunksBatch = chks.Chunks + s.chunksBatch = chks.Series } chks := s.chunksBatch[0] diff --git a/pkg/storegateway/limiter.go b/pkg/storegateway/limiter.go index ad01ac19759..86388ecc389 100644 --- a/pkg/storegateway/limiter.go +++ b/pkg/storegateway/limiter.go @@ -15,14 +15,14 @@ import ( ) type ChunksLimiter interface { - // Reserve num chunks out of the total number of chunks enforced by the limiter. + // Reserve num chunks out of the total number of chunks enforced by the mockLimiter. // Returns an error if the limit has been exceeded. This function must be // goroutine safe. Reserve(num uint64) error } type SeriesLimiter interface { - // Reserve num series out of the total number of series enforced by the limiter. + // Reserve num series out of the total number of series enforced by the mockLimiter. // Returns an error if the limit has been exceeded. This function must be // goroutine safe. Reserve(num uint64) error @@ -45,7 +45,7 @@ type Limiter struct { failedOnce sync.Once } -// NewLimiter returns a new limiter with a specified limit. 0 disables the limit. +// NewLimiter returns a new mockLimiter with a specified limit. 0 disables the limit. func NewLimiter(limit uint64, ctr prometheus.Counter) *Limiter { return &Limiter{limit: limit, failedCounter: ctr} } diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 48726540a0d..e5962711e46 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1083,8 +1083,8 @@ func TestLimitingSeriesChunkRefsSetIterator(t *testing.T) { t.Run(testName, func(t *testing.T) { iterator := newLimitingSeriesChunkRefsSetIterator( newSliceSeriesChunkRefsSetIterator(testCase.upstreamErr, testCase.sets...), - &limiter{limit: testCase.chunksLimit}, - &limiter{limit: testCase.seriesLimit}, + &mockLimiter{limit: testCase.chunksLimit}, + &mockLimiter{limit: testCase.seriesLimit}, ) sets := readAllSeriesChunkRefsSet(iterator) @@ -2604,12 +2604,12 @@ func (s *sliceSeriesChunkRefsSetIterator) Err() error { return nil } -type limiter struct { +type mockLimiter struct { limit int current atomic.Uint64 } -func (l *limiter) Reserve(num uint64) error { +func (l *mockLimiter) Reserve(num uint64) error { if l.current.Add(num) > uint64(l.limit) { return errors.New("test limit exceeded") } diff --git a/pkg/storegateway/storepb/types.pb.go b/pkg/storegateway/storepb/types.pb.go index a7f65a4df9e..42c838093c9 100644 --- a/pkg/storegateway/storepb/types.pb.go +++ b/pkg/storegateway/storepb/types.pb.go @@ -264,7 +264,7 @@ func (m *StreamSeriesChunks) XXX_DiscardUnknown() { var xxx_messageInfo_StreamSeriesChunks proto.InternalMessageInfo type StreamSeriesChunksBatch struct { - Chunks []*StreamSeriesChunks `protobuf:"bytes,1,rep,name=chunks,proto3" json:"chunks,omitempty"` + Series []*StreamSeriesChunks `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` } func (m *StreamSeriesChunksBatch) Reset() { *m = StreamSeriesChunksBatch{} } @@ -392,50 +392,50 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 685 bytes of a gzipped FileDescriptorProto + // 684 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4f, 0x6f, 0x12, 0x41, 0x14, 0xdf, 0x81, 0x05, 0x96, 0xa1, 0xb5, 0xdb, 0x69, 0x63, 0x69, 0x0f, 0x53, 0xdc, 0x13, 0x31, 0xe9, 0xa2, 0xd8, 0x8b, 0x89, 0x97, 0x62, 0x30, 0x95, 0x58, 0xdb, 0x4e, 0x6b, 0x62, 0x8c, 0x09, 0x19, 0x60, 0x58, 0x26, 0x65, 0xff, 0x64, 0x77, 0x50, 0xf0, 0xd4, 0x8f, 0xe0, 0x57, 0xf0, 0xe6, - 0x17, 0x31, 0xe9, 0xcd, 0x1e, 0x1b, 0x0f, 0x8d, 0xd0, 0x8b, 0xc7, 0x7e, 0x04, 0xb3, 0x33, 0x4b, - 0x05, 0x7b, 0xa9, 0x17, 0x4f, 0xcc, 0x7b, 0xef, 0xf7, 0x7e, 0xbf, 0xdf, 0x1b, 0xe6, 0x2d, 0x2c, + 0x17, 0x31, 0xe9, 0xcd, 0x1e, 0x1b, 0x0f, 0x8d, 0xd0, 0x8b, 0xc7, 0x7e, 0x04, 0xb3, 0x33, 0x8b, + 0x42, 0x7b, 0xa9, 0x17, 0x4f, 0xcc, 0x7b, 0xbf, 0xdf, 0x7b, 0xbf, 0xdf, 0x1b, 0xe6, 0x2d, 0x2c, 0x88, 0x51, 0xc0, 0x22, 0x3b, 0x08, 0x7d, 0xe1, 0xa3, 0xac, 0xe8, 0x51, 0xcf, 0x8f, 0x36, 0xb6, - 0x1c, 0x2e, 0x7a, 0x83, 0x96, 0xdd, 0xf6, 0xdd, 0x8a, 0xe3, 0x3b, 0x7e, 0x45, 0x96, 0x5b, 0x83, - 0xae, 0x8c, 0x64, 0x20, 0x4f, 0xaa, 0x6d, 0xe3, 0xd1, 0x2c, 0x3c, 0xa4, 0x5d, 0xea, 0xd1, 0x8a, - 0xcb, 0x5d, 0x1e, 0x56, 0x82, 0x13, 0x47, 0x9d, 0x82, 0x96, 0xfa, 0x55, 0x1d, 0xd6, 0x77, 0x00, - 0x33, 0xcf, 0x7b, 0x03, 0xef, 0x04, 0x3d, 0x84, 0x7a, 0xec, 0xa0, 0x08, 0x4a, 0xa0, 0x7c, 0xaf, - 0x7a, 0xdf, 0x56, 0x0e, 0x6c, 0x59, 0xb4, 0xeb, 0x5e, 0xdb, 0xef, 0x70, 0xcf, 0x21, 0x12, 0x83, - 0x0e, 0xa0, 0xde, 0xa1, 0x82, 0x16, 0x53, 0x25, 0x50, 0x5e, 0xa8, 0x3d, 0x3b, 0xbb, 0xdc, 0xd4, - 0x7e, 0x5c, 0x6e, 0x6e, 0xdf, 0x45, 0xdd, 0x7e, 0xe3, 0x45, 0xb4, 0xcb, 0x6a, 0x23, 0xc1, 0x8e, - 0xfa, 0xbc, 0xcd, 0x88, 0x64, 0xb2, 0x76, 0xa1, 0x31, 0xd5, 0x40, 0x8b, 0x30, 0x2f, 0x55, 0x9b, - 0x6f, 0xf7, 0x89, 0xa9, 0xa1, 0x15, 0xb8, 0xa4, 0xc2, 0x5d, 0x1e, 0x09, 0xdf, 0x09, 0xa9, 0x6b, - 0x02, 0x54, 0x84, 0xab, 0x2a, 0xf9, 0xa2, 0xef, 0x53, 0xf1, 0xa7, 0x92, 0xb2, 0xbe, 0x00, 0x98, - 0x3d, 0x62, 0x21, 0x67, 0x11, 0xea, 0xc2, 0x6c, 0x9f, 0xb6, 0x58, 0x3f, 0x2a, 0x82, 0x52, 0xba, - 0x5c, 0xa8, 0xae, 0xd8, 0x6d, 0x3f, 0x14, 0x6c, 0x18, 0xb4, 0xec, 0x57, 0x71, 0xfe, 0x80, 0xf2, - 0xb0, 0xf6, 0x34, 0x71, 0xff, 0xf8, 0x4e, 0xee, 0x65, 0xdf, 0x4e, 0x87, 0x06, 0x82, 0x85, 0x24, - 0x61, 0x47, 0x15, 0x98, 0x6d, 0xc7, 0x66, 0xa2, 0x62, 0x4a, 0xea, 0x2c, 0x4f, 0x2f, 0x6f, 0xc7, - 0x71, 0x42, 0x69, 0xb3, 0xa6, 0xc7, 0x2a, 0x24, 0x81, 0x59, 0x23, 0xb8, 0x74, 0x24, 0x42, 0x46, - 0x5d, 0xee, 0x39, 0xff, 0xd7, 0xab, 0xf5, 0x09, 0x2e, 0x2b, 0x69, 0xa5, 0x5b, 0xa3, 0xa2, 0xdd, - 0x8b, 0x07, 0x88, 0x64, 0x98, 0x88, 0xaf, 0x4d, 0x07, 0xf8, 0xcb, 0x25, 0x49, 0x60, 0x68, 0x1b, - 0xae, 0xf1, 0xa8, 0xc9, 0xbc, 0x4e, 0xd3, 0xef, 0x36, 0x55, 0xae, 0x19, 0x49, 0xac, 0x7c, 0x13, - 0x06, 0x59, 0xe1, 0x51, 0xdd, 0xeb, 0xec, 0x77, 0x55, 0x9f, 0xa2, 0xb1, 0x7a, 0x10, 0xcd, 0x6a, - 0xcb, 0x9b, 0x89, 0xd0, 0x03, 0xb8, 0x90, 0x30, 0x70, 0xaf, 0xc3, 0x86, 0xf2, 0x01, 0xea, 0xa4, + 0x1c, 0x2e, 0x7a, 0x83, 0x96, 0xdd, 0xf6, 0xdd, 0x8a, 0xe3, 0x3b, 0x7e, 0x45, 0xc2, 0xad, 0x41, + 0x57, 0x46, 0x32, 0x90, 0x27, 0x55, 0xb6, 0xf1, 0x68, 0x96, 0x1e, 0xd2, 0x2e, 0xf5, 0x68, 0xc5, + 0xe5, 0x2e, 0x0f, 0x2b, 0xc1, 0x89, 0xa3, 0x4e, 0x41, 0x4b, 0xfd, 0xaa, 0x0a, 0xeb, 0x3b, 0x80, + 0x99, 0xe7, 0xbd, 0x81, 0x77, 0x82, 0x1e, 0x42, 0x3d, 0x76, 0x50, 0x04, 0x25, 0x50, 0xbe, 0x57, + 0xbd, 0x6f, 0x2b, 0x07, 0xb6, 0x04, 0xed, 0xba, 0xd7, 0xf6, 0x3b, 0xdc, 0x73, 0x88, 0xe4, 0xa0, + 0x03, 0xa8, 0x77, 0xa8, 0xa0, 0xc5, 0x54, 0x09, 0x94, 0x17, 0x6a, 0xcf, 0xce, 0x2e, 0x37, 0xb5, + 0x1f, 0x97, 0x9b, 0xdb, 0x77, 0x51, 0xb7, 0xdf, 0x78, 0x11, 0xed, 0xb2, 0xda, 0x48, 0xb0, 0xa3, + 0x3e, 0x6f, 0x33, 0x22, 0x3b, 0x59, 0xbb, 0xd0, 0x98, 0x6a, 0xa0, 0x45, 0x98, 0x97, 0xaa, 0xcd, + 0xb7, 0xfb, 0xc4, 0xd4, 0xd0, 0x0a, 0x5c, 0x52, 0xe1, 0x2e, 0x8f, 0x84, 0xef, 0x84, 0xd4, 0x35, + 0x01, 0x2a, 0xc2, 0x55, 0x95, 0x7c, 0xd1, 0xf7, 0xa9, 0xf8, 0x8b, 0xa4, 0xac, 0x2f, 0x00, 0x66, + 0x8f, 0x58, 0xc8, 0x59, 0x84, 0xba, 0x30, 0xdb, 0xa7, 0x2d, 0xd6, 0x8f, 0x8a, 0xa0, 0x94, 0x2e, + 0x17, 0xaa, 0x2b, 0x76, 0xdb, 0x0f, 0x05, 0x1b, 0x06, 0x2d, 0xfb, 0x55, 0x9c, 0x3f, 0xa0, 0x3c, + 0xac, 0x3d, 0x4d, 0xdc, 0x3f, 0xbe, 0x93, 0x7b, 0x59, 0xb7, 0xd3, 0xa1, 0x81, 0x60, 0x21, 0x49, + 0xba, 0xa3, 0x0a, 0xcc, 0xb6, 0x63, 0x33, 0x51, 0x31, 0x25, 0x75, 0x96, 0xa7, 0x97, 0xb7, 0xe3, + 0x38, 0xa1, 0xb4, 0x59, 0xd3, 0x63, 0x15, 0x92, 0xd0, 0xac, 0x11, 0x5c, 0x3a, 0x12, 0x21, 0xa3, + 0x2e, 0xf7, 0x9c, 0xff, 0xeb, 0xd5, 0xfa, 0x04, 0x97, 0x95, 0xb4, 0xd2, 0xad, 0x51, 0xd1, 0xee, + 0xc5, 0x03, 0x44, 0x32, 0x4c, 0xc4, 0xd7, 0xa6, 0x03, 0xdc, 0x70, 0x49, 0x12, 0x1a, 0xda, 0x86, + 0x6b, 0x3c, 0x6a, 0x32, 0xaf, 0xd3, 0xf4, 0xbb, 0x4d, 0x95, 0x6b, 0x46, 0x92, 0x2b, 0xdf, 0x84, + 0x41, 0x56, 0x78, 0x54, 0xf7, 0x3a, 0xfb, 0x5d, 0x55, 0xa7, 0xda, 0x58, 0x3d, 0x88, 0x66, 0xb5, + 0xe5, 0xcd, 0x44, 0xe8, 0x01, 0x5c, 0x48, 0x3a, 0x70, 0xaf, 0xc3, 0x86, 0xf2, 0x01, 0xea, 0xa4, 0xa0, 0x72, 0x2f, 0xe3, 0xd4, 0xbf, 0x5f, 0xf0, 0x1e, 0x5c, 0xbb, 0xad, 0xa4, 0x66, 0xad, 0xde, - 0x70, 0xa9, 0x59, 0x37, 0xe6, 0x67, 0x9d, 0x6d, 0xb8, 0xa1, 0x3b, 0x05, 0x30, 0x7f, 0x23, 0x85, - 0xd6, 0xa1, 0xe1, 0x72, 0xaf, 0x29, 0xb8, 0xab, 0xb6, 0x25, 0x4d, 0x72, 0x2e, 0xf7, 0x8e, 0xb9, - 0xcb, 0x64, 0x89, 0x0e, 0x55, 0x29, 0x95, 0x94, 0xe8, 0x50, 0x96, 0x36, 0x61, 0x3a, 0xa4, 0x1f, - 0x8b, 0xe9, 0x12, 0x28, 0x17, 0xaa, 0x8b, 0x73, 0xeb, 0x45, 0xe2, 0x4a, 0x43, 0x37, 0x74, 0x33, - 0xd3, 0xd0, 0x8d, 0x8c, 0x99, 0x6d, 0xe8, 0x46, 0xd6, 0xcc, 0x35, 0x74, 0x23, 0x67, 0x1a, 0x0d, - 0xdd, 0x30, 0xcc, 0xbc, 0xf5, 0x0d, 0xc0, 0x05, 0xf9, 0x87, 0xee, 0xc5, 0x53, 0xb0, 0x10, 0x6d, - 0xcd, 0xed, 0xeb, 0xfa, 0x94, 0x70, 0x16, 0x63, 0x1f, 0x8f, 0x02, 0x96, 0xac, 0x2c, 0x82, 0xba, - 0x47, 0x13, 0x57, 0x79, 0x22, 0xcf, 0x68, 0x15, 0x66, 0x3e, 0xd0, 0xfe, 0x80, 0x49, 0x53, 0x79, - 0xa2, 0x02, 0xeb, 0x3d, 0xd4, 0xe3, 0xbe, 0x78, 0xef, 0x66, 0xc9, 0x9a, 0xf5, 0x43, 0x53, 0x43, - 0xab, 0xd0, 0x9c, 0x4b, 0xbe, 0xae, 0x1f, 0x9a, 0xe0, 0x16, 0x94, 0xd4, 0xcd, 0xd4, 0x6d, 0x28, - 0xa9, 0x9b, 0xe9, 0xda, 0xce, 0xd9, 0x18, 0x6b, 0xe7, 0x63, 0xac, 0x5d, 0x8c, 0xb1, 0x76, 0x3d, - 0xc6, 0xe0, 0x74, 0x82, 0xc1, 0xd7, 0x09, 0x06, 0x67, 0x13, 0x0c, 0xce, 0x27, 0x18, 0xfc, 0x9c, - 0x60, 0xf0, 0x6b, 0x82, 0xb5, 0xeb, 0x09, 0x06, 0x9f, 0xaf, 0xb0, 0x76, 0x7e, 0x85, 0xb5, 0x8b, - 0x2b, 0xac, 0xbd, 0xcb, 0x45, 0xc2, 0x0f, 0x59, 0xd0, 0x6a, 0x65, 0xe5, 0xa7, 0xeb, 0xc9, 0xef, - 0x00, 0x00, 0x00, 0xff, 0xff, 0xc0, 0xdd, 0x36, 0x74, 0x32, 0x05, 0x00, 0x00, + 0x98, 0x75, 0x63, 0x7e, 0xd6, 0xd9, 0x82, 0xe9, 0xb8, 0xd6, 0x29, 0x80, 0xf9, 0x3f, 0x52, 0x68, + 0x1d, 0x1a, 0x2e, 0xf7, 0x9a, 0x82, 0xbb, 0x6a, 0x5b, 0xd2, 0x24, 0xe7, 0x72, 0xef, 0x98, 0xbb, + 0x4c, 0x42, 0x74, 0xa8, 0xa0, 0x54, 0x02, 0xd1, 0xa1, 0x84, 0x36, 0x61, 0x3a, 0xa4, 0x1f, 0x8b, + 0xe9, 0x12, 0x28, 0x17, 0xaa, 0x8b, 0x73, 0xeb, 0x45, 0x62, 0xa4, 0xa1, 0x1b, 0xba, 0x99, 0x69, + 0xe8, 0x46, 0xc6, 0xcc, 0x36, 0x74, 0x23, 0x6b, 0xe6, 0x1a, 0xba, 0x91, 0x33, 0x8d, 0x86, 0x6e, + 0x18, 0x66, 0xde, 0xfa, 0x06, 0xe0, 0x82, 0xfc, 0x43, 0xf7, 0xe2, 0x29, 0x58, 0x88, 0xb6, 0xe6, + 0xf6, 0x75, 0x7d, 0xda, 0x70, 0x96, 0x63, 0x1f, 0x8f, 0x02, 0x96, 0xac, 0x2c, 0x82, 0xba, 0x47, + 0x13, 0x57, 0x79, 0x22, 0xcf, 0x68, 0x15, 0x66, 0x3e, 0xd0, 0xfe, 0x80, 0x49, 0x53, 0x79, 0xa2, + 0x02, 0xeb, 0x3d, 0xd4, 0xe3, 0xba, 0x78, 0xef, 0x66, 0x9b, 0x35, 0xeb, 0x87, 0xa6, 0x86, 0x56, + 0xa1, 0x39, 0x97, 0x7c, 0x5d, 0x3f, 0x34, 0xc1, 0x2d, 0x2a, 0xa9, 0x9b, 0xa9, 0xdb, 0x54, 0x52, + 0x37, 0xd3, 0xb5, 0x9d, 0xb3, 0x31, 0xd6, 0xce, 0xc7, 0x58, 0xbb, 0x18, 0x63, 0xed, 0x7a, 0x8c, + 0xc1, 0xe9, 0x04, 0x83, 0xaf, 0x13, 0x0c, 0xce, 0x26, 0x18, 0x9c, 0x4f, 0x30, 0xf8, 0x39, 0xc1, + 0xe0, 0xd7, 0x04, 0x6b, 0xd7, 0x13, 0x0c, 0x3e, 0x5f, 0x61, 0xed, 0xfc, 0x0a, 0x6b, 0x17, 0x57, + 0x58, 0x7b, 0x97, 0x8b, 0x84, 0x1f, 0xb2, 0xa0, 0xd5, 0xca, 0xca, 0x4f, 0xd7, 0x93, 0xdf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xf2, 0x10, 0x58, 0x47, 0x32, 0x05, 0x00, 0x00, } func (x Chunk_Encoding) String() string { @@ -628,11 +628,11 @@ func (this *StreamSeriesChunksBatch) Equal(that interface{}) bool { } else if this == nil { return false } - if len(this.Chunks) != len(that1.Chunks) { + if len(this.Series) != len(that1.Series) { return false } - for i := range this.Chunks { - if !this.Chunks[i].Equal(that1.Chunks[i]) { + for i := range this.Series { + if !this.Series[i].Equal(that1.Series[i]) { return false } } @@ -772,8 +772,8 @@ func (this *StreamSeriesChunksBatch) GoString() string { } s := make([]string, 0, 5) s = append(s, "&storepb.StreamSeriesChunksBatch{") - if this.Chunks != nil { - s = append(s, "Chunks: "+fmt.Sprintf("%#v", this.Chunks)+",\n") + if this.Series != nil { + s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") } s = append(s, "}") return strings.Join(s, "") @@ -1047,10 +1047,10 @@ func (m *StreamSeriesChunksBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) _ = i var l int _ = l - if len(m.Chunks) > 0 { - for iNdEx := len(m.Chunks) - 1; iNdEx >= 0; iNdEx-- { + if len(m.Series) > 0 { + for iNdEx := len(m.Series) - 1; iNdEx >= 0; iNdEx-- { { - size, err := m.Chunks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + size, err := m.Series[iNdEx].MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1254,8 +1254,8 @@ func (m *StreamSeriesChunksBatch) Size() (n int) { } var l int _ = l - if len(m.Chunks) > 0 { - for _, e := range m.Chunks { + if len(m.Series) > 0 { + for _, e := range m.Series { l = e.Size() n += 1 + l + sovTypes(uint64(l)) } @@ -1381,13 +1381,13 @@ func (this *StreamSeriesChunksBatch) String() string { if this == nil { return "nil" } - repeatedStringForChunks := "[]*StreamSeriesChunks{" - for _, f := range this.Chunks { - repeatedStringForChunks += strings.Replace(f.String(), "StreamSeriesChunks", "StreamSeriesChunks", 1) + "," + repeatedStringForSeries := "[]*StreamSeriesChunks{" + for _, f := range this.Series { + repeatedStringForSeries += strings.Replace(f.String(), "StreamSeriesChunks", "StreamSeriesChunks", 1) + "," } - repeatedStringForChunks += "}" + repeatedStringForSeries += "}" s := strings.Join([]string{`&StreamSeriesChunksBatch{`, - `Chunks:` + repeatedStringForChunks + `,`, + `Series:` + repeatedStringForSeries + `,`, `}`, }, "") return s @@ -1981,7 +1981,7 @@ func (m *StreamSeriesChunksBatch) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2008,8 +2008,8 @@ func (m *StreamSeriesChunksBatch) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Chunks = append(m.Chunks, &StreamSeriesChunks{}) - if err := m.Chunks[len(m.Chunks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Series = append(m.Series, &StreamSeriesChunks{}) + if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index 7f8c1ba3d69..cd789bebbfe 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -52,7 +52,7 @@ message StreamSeriesChunks { } message StreamSeriesChunksBatch { - repeated StreamSeriesChunks chunks = 1; + repeated StreamSeriesChunks series = 1; } From 21de9389cfbc45b2de1463ad9c15fd18c9d3e861 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 14 Jun 2023 16:51:55 +0530 Subject: [PATCH 26/75] Fix comments, fix stats, extend tests Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 3 +- pkg/querier/blocks_store_queryable_test.go | 57 +++++++++++++++++--- pkg/storegateway/bucket_streaming_readers.go | 10 ++-- 3 files changed, 58 insertions(+), 12 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index d1c432f06f8..8db1370d23d 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -718,7 +718,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return errors.Wrapf(err, "failed to create series request") } - stream, err := c.Series(gCtx, req) + stream, err := c.Series(q.ctx, req) if err != nil { if shouldStopQueryFunc(err) { return err @@ -830,6 +830,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor "requested blocks", strings.Join(convertULIDsToString(blockIDs), " "), "queried blocks", strings.Join(convertULIDsToString(myQueriedBlocks), " ")) } else if len(myStreamingSeries) > 0 { + // FetchedChunks and FetchedChunkBytes are added by the SeriesChunksStreamReader. reqStats.AddFetchedSeries(uint64(len(myStreamingSeries))) streamReader = storegateway.NewSeriesChunksStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) level.Debug(spanLog).Log("msg", "received streaming series from store-gateway", diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 063bd15443a..f3d96da0a22 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc/metadata" "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storage/sharding" "github.com/grafana/mimir/pkg/storage/tsdb/bucketindex" "github.com/grafana/mimir/pkg/storegateway/hintspb" @@ -135,6 +136,35 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, }, }, + "a single store-gateway instance holds the required blocks (single returned series) - multiple chunks per series for stats": { + finderResult: bucketindex.Blocks{ + {ID: block1}, + {ID: block2}, + }, + storeSetResponses: []interface{}{ + map[BlocksStoreClient][]ulid.ULID{ + &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ + mockSeriesResponseWithChunks(metricNameLabel, + createAggrChunkWithSamples(promql.FPoint{minT, 1}), + createAggrChunkWithSamples(promql.FPoint{minT + 1, 2}), + ), + mockHintsResponse(block1, block2), + mockStatsResponse(50), + }}: {block1, block2}, + }, + }, + limits: &blocksStoreLimitsMock{}, + queryLimiter: noOpQueryLimiter, + expectedSeries: []seriesResult{ + { + lbls: metricNameLabel, + values: []valueResult{ + {t: minT, v: 1}, + {t: minT + 1, v: 2}, + }, + }, + }, + }, "a single store-gateway instance holds the required blocks (multiple returned series)": { finderResult: bucketindex.Blocks{ {ID: block1}, @@ -768,16 +798,23 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { // the below code changes the testData in-place. for _, streaming := range []bool{false, true} { t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { - ctx := limiter.AddQueryLimiterToContext(context.Background(), testData.queryLimiter) reg := prometheus.NewPedanticRegistry() - if streaming { - // Convert the storegateway response to streaming response. - for _, res := range testData.storeSetResponses { - m, ok := res.(map[BlocksStoreClient][]ulid.ULID) - if ok { - for k := range m { - mockClient := k.(*storeGatewayClientMock) + // Count the number of series to check the stats later. + seriesCount, chunksCount := 0, 0 + for _, res := range testData.storeSetResponses { + m, ok := res.(map[BlocksStoreClient][]ulid.ULID) + if ok { + for k := range m { + mockClient := k.(*storeGatewayClientMock) + for _, sr := range mockClient.mockedSeriesResponses { + if s := sr.GetSeries(); s != nil { + seriesCount++ + chunksCount += len(s.Chunks) + } + } + if streaming { + // Convert the storegateway response to streaming response. mockClient.mockedSeriesResponses = generateStreamingResponses(mockClient.mockedSeriesResponses) } } @@ -788,6 +825,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { finder := &blocksFinderMock{} finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) + ctx := limiter.AddQueryLimiterToContext(context.Background(), testData.queryLimiter) + st, ctx := stats.ContextWithEmptyStats(ctx) q := &blocksStoreQuerier{ ctx: ctx, minT: minT, @@ -864,6 +903,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { } require.NoError(t, set.Err()) assert.Equal(t, testData.expectedSeries, actualSeries) + assert.Equal(t, seriesCount, int(st.FetchedSeriesCount)) + assert.Equal(t, chunksCount, int(st.FetchedChunksCount)) // Assert on metrics (optional, only for test cases defining it). if testData.expectedMetrics != "" { diff --git a/pkg/storegateway/bucket_streaming_readers.go b/pkg/storegateway/bucket_streaming_readers.go index 2073c1d8275..25686b6f799 100644 --- a/pkg/storegateway/bucket_streaming_readers.go +++ b/pkg/storegateway/bucket_streaming_readers.go @@ -107,15 +107,19 @@ func (s *SeriesChunksStreamReader) StartBuffering() { } chunkBytes := 0 - for _, ch := range c.Series { - chunkBytes += ch.Size() + numChunks := 0 + for _, s := range c.Series { + numChunks += len(s.Chunks) + for _, ch := range s.Chunks { + chunkBytes += ch.Size() + } } if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { s.errorChan <- validation.LimitError(err.Error()) return } - s.stats.AddFetchedChunks(uint64(len(c.Series))) + s.stats.AddFetchedChunks(uint64(numChunks)) s.stats.AddFetchedChunkBytes(uint64(chunkBytes)) select { From e22c08b40b2660eb7994781f6388b8ba24d2ec95 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 14 Jun 2023 17:08:48 +0530 Subject: [PATCH 27/75] Improve tests to run streaming and non-streaming in any order Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable_test.go | 56 ++++++++++++---------- 1 file changed, 31 insertions(+), 25 deletions(-) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index f3d96da0a22..467d0b798ba 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -794,34 +794,42 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - // Non-streaming should be tested first because in the streaming case, - // the below code changes the testData in-place. - for _, streaming := range []bool{false, true} { + for _, streaming := range []bool{true, false} { t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { reg := prometheus.NewPedanticRegistry() // Count the number of series to check the stats later. + // We also make a copy of the testData.storeSetResponses where relevant so that + // we can run the streaming and non-streaming case in any order. + var storeSetResponses []interface{} seriesCount, chunksCount := 0, 0 for _, res := range testData.storeSetResponses { m, ok := res.(map[BlocksStoreClient][]ulid.ULID) - if ok { - for k := range m { - mockClient := k.(*storeGatewayClientMock) - for _, sr := range mockClient.mockedSeriesResponses { - if s := sr.GetSeries(); s != nil { - seriesCount++ - chunksCount += len(s.Chunks) - } - } - if streaming { - // Convert the storegateway response to streaming response. - mockClient.mockedSeriesResponses = generateStreamingResponses(mockClient.mockedSeriesResponses) + if !ok { + storeSetResponses = append(storeSetResponses, res) + continue + } + newMap := make(map[BlocksStoreClient][]ulid.ULID, len(m)) + for k, v := range m { + mockClient := k.(*storeGatewayClientMock) + for _, sr := range mockClient.mockedSeriesResponses { + if s := sr.GetSeries(); s != nil { + seriesCount++ + chunksCount += len(s.Chunks) } } + + shallowCopy := *mockClient + if streaming { + // Convert the storegateway response to streaming response. + shallowCopy.mockedSeriesResponses = generateStreamingResponses(shallowCopy.mockedSeriesResponses) + } + newMap[&shallowCopy] = v } + storeSetResponses = append(storeSetResponses, newMap) } - stores := &blocksStoreSetMock{mockedResponses: testData.storeSetResponses} + stores := &blocksStoreSetMock{mockedResponses: storeSetResponses} finder := &blocksFinderMock{} finder.On("GetBlocks", mock.Anything, "user-1", minT, maxT).Return(testData.finderResult, map[ulid.ULID]*bucketindex.BlockDeletionMark(nil), testData.finderErr) @@ -919,7 +927,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { } func generateStreamingResponses(seriesResponses []*storepb.SeriesResponse) []*storepb.SeriesResponse { - var series, chunks, others []*storepb.SeriesResponse + var series, chunks, others, final []*storepb.SeriesResponse for i, mr := range seriesResponses { s := mr.GetSeries() if s != nil { @@ -931,12 +939,12 @@ func generateStreamingResponses(seriesResponses []*storepb.SeriesResponse) []*st break } - seriesResponses = append(seriesResponses[:0], series...) - seriesResponses = append(seriesResponses, others...) + final = append(final, series...) + final = append(final, others...) // End of stream response goes after the hints and stats. - seriesResponses = append(seriesResponses, mockStreamingSeriesBatchResponse(true)) - seriesResponses = append(seriesResponses, chunks...) - return seriesResponses + final = append(final, mockStreamingSeriesBatchResponse(true)) + final = append(final, chunks...) + return final } func TestBlocksStoreQuerier_Select_cancelledContext(t *testing.T) { @@ -1804,9 +1812,7 @@ func TestBlocksStoreQuerier_PromQLExecution(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - // Non-streaming should be tested first because in the streaming case, - // the below code changes the testData in-place. - for _, streaming := range []bool{false, true} { + for _, streaming := range []bool{true, false} { t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { block1 := ulid.MustNew(1, nil) block2 := ulid.MustNew(2, nil) From 50979909bd3d4e7eea2108b1150a21e940d70a36 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 14 Jun 2023 17:25:47 +0530 Subject: [PATCH 28/75] Integration test Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 15 ++++++++++++--- pkg/querier/blocks_store_queryable.go | 2 +- pkg/querier/blocks_store_queryable_test.go | 4 ++-- 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index e447b45a657..4ad27869722 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -28,14 +28,22 @@ import ( ) func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { - testQuerierWithBlocksStorageRunningInMicroservicesMode(t, generateFloatSeries) + for _, streamingEnabled := range []bool{true, false} { + t.Run(fmt.Sprintf("streaming=%t", streamingEnabled), func(t *testing.T) { + testQuerierWithBlocksStorageRunningInMicroservicesMode(t, streamingEnabled, generateFloatSeries) + }) + } } func TestQuerierWithBlocksStorageRunningInMicroservicesModeWithHistograms(t *testing.T) { - testQuerierWithBlocksStorageRunningInMicroservicesMode(t, generateHistogramSeries) + for _, streamingEnabled := range []bool{true, false} { + t.Run(fmt.Sprintf("streaming=%t", streamingEnabled), func(t *testing.T) { + testQuerierWithBlocksStorageRunningInMicroservicesMode(t, streamingEnabled, generateHistogramSeries) + }) + } } -func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, seriesGenerator func(name string, ts time.Time, additionalLabels ...prompb.Label) (series []prompb.TimeSeries, vector model.Vector, matrix model.Matrix)) { +func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, streamingEnabled bool, seriesGenerator func(name string, ts time.Time, additionalLabels ...prompb.Label) (series []prompb.TimeSeries, vector model.Vector, matrix model.Matrix)) { tests := map[string]struct { tenantShardSize int indexCacheBackend string @@ -162,6 +170,7 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, series "-store-gateway.tenant-shard-size": fmt.Sprintf("%d", testCfg.tenantShardSize), "-query-frontend.query-stats-enabled": "true", "-query-frontend.parallelize-shardable-queries": strconv.FormatBool(testCfg.queryShardingEnabled), + "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), }) // Start store-gateways. diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 8db1370d23d..22442f597dc 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -718,7 +718,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return errors.Wrapf(err, "failed to create series request") } - stream, err := c.Series(q.ctx, req) + stream, err := c.Series(gCtx, req) if err != nil { if shouldStopQueryFunc(err) { return err diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 467d0b798ba..ac25b305660 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -145,8 +145,8 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { map[BlocksStoreClient][]ulid.ULID{ &storeGatewayClientMock{remoteAddr: "1.1.1.1", mockedSeriesResponses: []*storepb.SeriesResponse{ mockSeriesResponseWithChunks(metricNameLabel, - createAggrChunkWithSamples(promql.FPoint{minT, 1}), - createAggrChunkWithSamples(promql.FPoint{minT + 1, 2}), + createAggrChunkWithSamples(promql.FPoint{T: minT, F: 1}), + createAggrChunkWithSamples(promql.FPoint{T: minT + 1, F: 2}), ), mockHintsResponse(block1, block2), mockStatsResponse(50), From d5d13beb76551505a70728d03b4413c3977715ef Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 13:53:57 +0530 Subject: [PATCH 29/75] Fix race Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index f0793802b88..e942545f2b7 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -801,8 +801,20 @@ func (s *BucketStore) sendSeriesChunks( // been sent above. chunksBatch.Series = chunksBatch.Series[:len(chunksBatch.Series)+1] last := chunksBatch.Series[len(chunksBatch.Series)-1] - last.Chunks = chks + last.Chunks = append(last.Chunks[:0], chks...) last.SeriesIndex = uint64(seriesCount - 1) + // Copy the chunk bytes to avoid race. + for i := range last.Chunks { + raw := last.Chunks[i].Raw + if raw == nil { + continue + } + newChk := storepb.Chunk{ + Type: raw.Type, + Data: append(make([]byte, 0, len(raw.Data)), raw.Data...), + } + last.Chunks[i].Raw = &newChk + } batchSizeBytes += last.Size() if (batchSizeBytes > 0 && batchSizeBytes > queryStreamBatchMessageSize) || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { From 2b2ce57fe8f491c591713e7b6dd7355fd7922646 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 14:57:54 +0530 Subject: [PATCH 30/75] Integration test attempt 0 Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 22442f597dc..8db1370d23d 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -718,7 +718,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return errors.Wrapf(err, "failed to create series request") } - stream, err := c.Series(gCtx, req) + stream, err := c.Series(q.ctx, req) if err != nil { if shouldStopQueryFunc(err) { return err From ab258b49da4748651fcfe4d5c52d1a9fe38e34df Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 15:16:52 +0530 Subject: [PATCH 31/75] Attempt Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 8db1370d23d..ce5d6826a77 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -718,7 +718,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return errors.Wrapf(err, "failed to create series request") } - stream, err := c.Series(q.ctx, req) + stream, err := c.Series(context.Background(), req) if err != nil { if shouldStopQueryFunc(err) { return err From 656d2a4ef8af5435a14814558c4b111900892573 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 15:26:44 +0530 Subject: [PATCH 32/75] Another attempt Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index ce5d6826a77..0591372de1b 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -688,7 +688,7 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // considered serious errors. All other errors are not returned, but they give rise to fetch retrials. func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(q.ctx, storegateway.GrpcContextMetadataTenantID, q.userID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} seriesSets = []storage.SeriesSet(nil) @@ -718,7 +718,10 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return errors.Wrapf(err, "failed to create series request") } - stream, err := c.Series(context.Background(), req) + stream, err := c.Series(reqCtx, req) + if err == nil { + err = gCtx.Err() + } if err != nil { if shouldStopQueryFunc(err) { return err From 2ae1e9beb4471d62b6f29e396407b339e43e13b7 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 15:47:18 +0530 Subject: [PATCH 33/75] context.Background() Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 0591372de1b..780df005a3f 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -688,7 +688,7 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // considered serious errors. All other errors are not returned, but they give rise to fetch retrials. func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(q.ctx, storegateway.GrpcContextMetadataTenantID, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(context.Background(), storegateway.GrpcContextMetadataTenantID, q.userID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} seriesSets = []storage.SeriesSet(nil) From 351e514b40a9e976b76853690ed67662a5182e67 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 16:16:26 +0530 Subject: [PATCH 34/75] q.ctx in queryWithConsistencyCheck Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 780df005a3f..45ccc33ca53 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -435,7 +435,7 @@ func (q *blocksStoreQuerier) Close() error { } func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanLog, spanCtx := spanlogger.NewWithLogger(q.ctx, q.logger, "blocksStoreQuerier.selectSorted") + spanLog, _ := spanlogger.NewWithLogger(q.ctx, q.logger, "blocksStoreQuerier.selectSorted") defer spanLog.Span.Finish() minT, maxT := sp.Start, sp.End @@ -452,7 +452,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, convertedMatchers) + seriesSets, queriedBlocks, warnings, err := q.fetchSeriesFromStores(q.ctx, sp, clients, minT, maxT, convertedMatchers) if err != nil { return nil, err } @@ -463,7 +463,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* return queriedBlocks, nil } - err = q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, shard, queryFunc) + err = q.queryWithConsistencyCheck(q.ctx, spanLog, minT, maxT, shard, queryFunc) if err != nil { return storage.ErrSeriesSet(err) } @@ -688,7 +688,7 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // considered serious errors. All other errors are not returned, but they give rise to fetch retrials. func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, error) { var ( - reqCtx = grpc_metadata.AppendToOutgoingContext(context.Background(), storegateway.GrpcContextMetadataTenantID, q.userID) + reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) g, gCtx = errgroup.WithContext(reqCtx) mtx = sync.Mutex{} seriesSets = []storage.SeriesSet(nil) From de46d40ded2dc3067314956ddd7cc4ce60858188 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 15 Jun 2023 16:53:53 +0530 Subject: [PATCH 35/75] rollback some things Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 45ccc33ca53..4ce378b69e9 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -435,7 +435,7 @@ func (q *blocksStoreQuerier) Close() error { } func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { - spanLog, _ := spanlogger.NewWithLogger(q.ctx, q.logger, "blocksStoreQuerier.selectSorted") + spanLog, spanCtx := spanlogger.NewWithLogger(q.ctx, q.logger, "blocksStoreQuerier.selectSorted") defer spanLog.Span.Finish() minT, maxT := sp.Start, sp.End @@ -452,7 +452,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, err := q.fetchSeriesFromStores(q.ctx, sp, clients, minT, maxT, convertedMatchers) + seriesSets, queriedBlocks, warnings, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, convertedMatchers) if err != nil { return nil, err } @@ -463,7 +463,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* return queriedBlocks, nil } - err = q.queryWithConsistencyCheck(q.ctx, spanLog, minT, maxT, shard, queryFunc) + err = q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, shard, queryFunc) if err != nil { return storage.ErrSeriesSet(err) } From 969714b86a7ddbe7a2d18e666afe4a20a51ddaa4 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Fri, 16 Jun 2023 14:41:51 +0530 Subject: [PATCH 36/75] Update integration test metrics with TODO Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 38 ++++++++++++++++++++++--------------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index 4ad27869722..de8980996df 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -249,14 +249,20 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream instantQueriesCount++ // Check the in-memory index cache metrics (in the store-gateway). - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty + // TODO: metrics when streaming enabled. + if !streamingEnabled { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty + } - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2), "thanos_memcached_operations_total")) // one set for each get + // TODO: metrics when streaming enabled. + if !streamingEnabled { + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2), "thanos_memcached_operations_total")) // one set for each get + } } // Query back again the 1st series from storage. This time it should use the index cache. @@ -266,14 +272,16 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream assert.Equal(t, expectedVector1, result.(model.Vector)) expectedFetchedSeries++ // Storage only. - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+2), "thanos_store_index_cache_requests_total")) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache - - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+2), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) + // TODO: metrics when streaming enabled. + if !streamingEnabled { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+2), "thanos_store_index_cache_requests_total")) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+2), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) + } } // Query range. We expect 1 data point with a value of 3 (number of series). From 07d63fe15d547126f5e4a1f7e041092fce867e88 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Sun, 18 Jun 2023 19:27:01 +0530 Subject: [PATCH 37/75] Fix a bunch of comments Signed-off-by: Ganesh Vernekar --- .../configuration-parameters/index.md | 11 + .../helm/charts/mimir-distributed/README.md | 2 +- pkg/old.txt | 0 pkg/querier/blocks_store_queryable.go | 16 +- pkg/querier/blocks_store_queryable_test.go | 8 +- pkg/querier/querier.go | 12 +- pkg/querier/querier_test.go | 2 +- pkg/storegateway/bucket.go | 91 ++++-- pkg/storegateway/bucket_store_server_test.go | 6 +- pkg/storegateway/bucket_streaming_readers.go | 8 +- pkg/storegateway/limiter.go | 6 +- pkg/storegateway/series_refs_test.go | 9 +- pkg/storegateway/storepb/custom.go | 8 +- pkg/storegateway/storepb/rpc.pb.go | 176 ++++++------ pkg/storegateway/storepb/rpc.proto | 9 +- pkg/storegateway/storepb/types.pb.go | 264 +++++++++--------- pkg/storegateway/storepb/types.proto | 10 +- 17 files changed, 349 insertions(+), 289 deletions(-) create mode 100644 pkg/old.txt diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index 233c146ae4d..7141f5618b2 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -1070,11 +1070,22 @@ store_gateway_client: # CLI flag: -querier.prefer-streaming-chunks [prefer_streaming_chunks: | default = false] +# (experimental) Request storegateway stream chunks. Storegateway will only +# respond with a stream of chunks if the target storegateway supports this, and +# this preference will be ignored by storegateway that do not support this. +# CLI flag: -querier.prefer-streaming-chunks-storegateway +[prefer_streaming_chunks_storegateway: | default = false] + # (experimental) Number of series to buffer per ingester when streaming chunks # from ingesters. # CLI flag: -querier.streaming-chunks-per-ingester-buffer-size [streaming_chunks_per_ingester_series_buffer_size: | default = 256] +# (experimental) Number of series to buffer per storegateway when streaming +# chunks from storegateway. +# CLI flag: -querier.streaming-chunks-per-ingester-buffer-size-storegateway +[streaming_chunks_per_ingester_series_buffer_size_storegateway: | default = 256] + # (experimental) If true, when querying ingesters, only the minimum required # ingesters required to reach quorum will be queried initially, with other # ingesters queried only if needed due to failures from the initial set of diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index 686e4ab6e2a..c2156cbe4ee 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -4,7 +4,7 @@ Helm chart for deploying [Grafana Mimir](https://grafana.com/docs/mimir/latest/) For the full documentation, visit [Grafana mimir-distributed Helm chart documentation](https://grafana.com/docs/helm-charts/mimir-distributed/latest/). -> **Note:** The documentation version is derived from the Helm chart version which is 4.5.0-weekly.240. +> **Note:** The documentation version is derived from the Helm chart version which is 4.5.0-weekly.241. When upgrading from Helm chart version 3.x, please see [Migrate from single zone to zone-aware replication with Helm](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-from-single-zone-with-helm/). When upgrading from Helm chart version 2.1, please see [Upgrade the Grafana Mimir Helm chart from version 2.1 to 3.0](https://grafana.com/docs/helm-charts/mimir-distributed/latest/migration-guides/migrate-helm-chart-2.1-to-3.0/) as well. diff --git a/pkg/old.txt b/pkg/old.txt new file mode 100644 index 00000000000..e69de29bb2d diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 4ce378b69e9..cfdc219c18b 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -264,8 +264,8 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa reg, ) - streamingBufferSize := querierCfg.StreamingChunksPerIngesterSeriesBufferSize - if !querierCfg.PreferStreamingChunks { + streamingBufferSize := querierCfg.StreamingChunksPerIngesterSeriesBufferSizeStoregateway + if !querierCfg.PreferStreamingChunksStoregateway { streamingBufferSize = 0 } @@ -698,6 +698,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) reqStats = stats.FromContext(ctx) streamReaders []*storegateway.SeriesChunksStreamReader + streams []storegatewaypb.StoreGateway_SeriesClient ) // Concurrently fetch series from all clients. @@ -720,6 +721,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor stream, err := c.Series(reqCtx, req) if err == nil { + streams = append(streams, stream) err = gCtx.Err() } if err != nil { @@ -862,8 +864,10 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor // Wait until all client requests complete. if err := g.Wait(); err != nil { - for _, sr := range streamReaders { - sr.Close() + for _, stream := range streams { + if err := stream.CloseSend(); err != nil { + level.Warn(q.logger).Log("msg", "closing storegateway client stream failed", "err", err) + } } return nil, nil, nil, err } @@ -1071,6 +1075,10 @@ func createSeriesRequest(minT, maxT int64, matchers []storepb.LabelMatcher, skip return nil, errors.Wrapf(err, "failed to marshal series request hints") } + if skipChunks { + // We don't do the streaming call if we are not requesting the chunks. + streamingBatchSize = 0 + } return &storepb.SeriesRequest{ MinTime: minT, MaxTime: maxT, diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index ac25b305660..ca6019b8012 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -2162,7 +2162,7 @@ func mockSeriesResponseWithChunks(lbls labels.Labels, chunks ...storepb.AggrChun } func mockStreamingSeriesBatchResponse(endOfStream bool, lbls ...[]mimirpb.LabelAdapter) *storepb.SeriesResponse { - res := &storepb.StreamSeriesBatch{} + res := &storepb.StreamingSeriesBatch{} for _, l := range lbls { res.Series = append(res.Series, &storepb.StreamingSeries{Labels: l}) } @@ -2176,9 +2176,9 @@ func mockStreamingSeriesBatchResponse(endOfStream bool, lbls ...[]mimirpb.LabelA func mockStreamingSeriesChunksResponse(index uint64, chks []storepb.AggrChunk) *storepb.SeriesResponse { return &storepb.SeriesResponse{ - Result: &storepb.SeriesResponse_StreamingSeriesChunks{ - StreamingSeriesChunks: &storepb.StreamSeriesChunksBatch{ - Series: []*storepb.StreamSeriesChunks{ + Result: &storepb.SeriesResponse_StreamingChunks{ + StreamingChunks: &storepb.StreamingChunksBatch{ + Series: []*storepb.StreamingChunks{ { SeriesIndex: index, Chunks: chks, diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index b218d553fac..007a748e81c 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -51,9 +51,11 @@ type Config struct { ShuffleShardingIngestersEnabled bool `yaml:"shuffle_sharding_ingesters_enabled" category:"advanced"` - PreferStreamingChunks bool `yaml:"prefer_streaming_chunks" category:"experimental"` - StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size" category:"experimental"` - MinimizeIngesterRequests bool `yaml:"minimize_ingester_requests" category:"experimental"` + PreferStreamingChunks bool `yaml:"prefer_streaming_chunks" category:"experimental"` + PreferStreamingChunksStoregateway bool `yaml:"prefer_streaming_chunks_storegateway" category:"experimental"` + StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size" category:"experimental"` + StreamingChunksPerIngesterSeriesBufferSizeStoregateway uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size_storegateway" category:"experimental"` + MinimizeIngesterRequests bool `yaml:"minimize_ingester_requests" category:"experimental"` // PromQL engine config. EngineConfig engine.Config `yaml:",inline"` @@ -83,11 +85,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.QueryStoreAfter, queryStoreAfterFlag, 12*time.Hour, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.BoolVar(&cfg.ShuffleShardingIngestersEnabled, "querier.shuffle-sharding-ingesters-enabled", true, fmt.Sprintf("Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -%s. If this setting is false or -%s is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", validation.QueryIngestersWithinFlag, validation.QueryIngestersWithinFlag)) f.BoolVar(&cfg.PreferStreamingChunks, "querier.prefer-streaming-chunks", false, "Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this.") + f.BoolVar(&cfg.PreferStreamingChunksStoregateway, "querier.prefer-streaming-chunks-storegateway", false, "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.") f.BoolVar(&cfg.MinimizeIngesterRequests, "querier.minimize-ingester-requests", false, "If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path.") - // Why 256 series / ingester? + // Why 256 series / ingester/storegateway? // Based on our testing, 256 series / ingester was a good balance between memory consumption and the CPU overhead of managing a batch of series. f.Uint64Var(&cfg.StreamingChunksPerIngesterSeriesBufferSize, "querier.streaming-chunks-per-ingester-buffer-size", 256, "Number of series to buffer per ingester when streaming chunks from ingesters.") + f.Uint64Var(&cfg.StreamingChunksPerIngesterSeriesBufferSizeStoregateway, "querier.streaming-chunks-per-ingester-buffer-size-storegateway", 256, "Number of series to buffer per storegateway when streaming chunks from storegateway.") // The querier.query-ingesters-within flag has been moved to the limits.go file // We still need to set a default value for cfg.QueryIngestersWithin since we need to keep supporting the querier yaml field until Mimir 2.11.0 diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 6c15677c01e..97b0dda7887 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -438,7 +438,7 @@ func mockTSDB(t *testing.T, mint model.Time, samples int, step, chunkOffset time ts = ts.Add(step) if cnt%samplesPerChunk == 0 { - // Simulate nextSeriesIndex chunk, restart timestamp. + // Simulate next chunk, restart timestamp. chunkStartTs = chunkStartTs.Add(chunkOffset) ts = chunkStartTs } diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index e942545f2b7..85b796f3602 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -69,7 +69,7 @@ const ( labelEncode = "encode" labelDecode = "decode" - queryStreamBatchMessageSize = 1 * 1024 * 1024 + targetQueryStreamBatchMessageSize = 1 * 1024 * 1024 ) type BucketStoreStats struct { @@ -119,9 +119,9 @@ type BucketStore struct { // Query gate which limits the maximum amount of concurrent queries. queryGate gate.Gate - // chunksLimiterFactory creates a new mockLimiter used to limit the number of chunks fetched by each Series() call. + // chunksLimiterFactory creates a new limiter used to limit the number of chunks fetched by each Series() call. chunksLimiterFactory ChunksLimiterFactory - // seriesLimiterFactory creates a new mockLimiter used to limit the number of touched series by each Series() call, + // seriesLimiterFactory creates a new limiter used to limit the number of touched series by each Series() call, // or LabelName and LabelValues calls when used with matchers. seriesLimiterFactory SeriesLimiterFactory partitioners blockPartitioners @@ -535,6 +535,7 @@ type seriesChunks struct { // Series implements the storepb.StoreServer interface. func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_SeriesServer) (err error) { if req.SkipChunks { + // We don't do the streaming call if we are not requesting the chunks. req.StreamingChunksBatchSize = 0 } defer func() { @@ -621,11 +622,14 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } // If we are streaming the series labels and chunks separately, we don't need to fetch the postings - // twice. So we use these slices to re-use it. + // twice. So we use these slices to re-use them. // Each reusePostings[i] and reusePendingMatchers[i] corresponds to a single block. - var reusePostings [][]storage.SeriesRef - var reusePendingMatchers [][]*labels.Matcher - if req.StreamingChunksBatchSize > 0 && !req.SkipChunks { + var ( + reusePostings [][]storage.SeriesRef + reusePendingMatchers [][]*labels.Matcher + iterationBegin time.Time + ) + if req.StreamingChunksBatchSize > 0 { // The streaming feature is enabled where we stream the series labels first, followed // by the chunks later. Send only the labels here. req.SkipChunks = true @@ -638,7 +642,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - // This also sends the hints and the stats. + iterationBegin = time.Now() err = s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) if err != nil { return err @@ -657,7 +661,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie // Merge the sub-results from each selected block. tracing.DoWithSpan(ctx, "bucket_store_merge_all", func(ctx context.Context, _ tracing.Span) { - err = s.sendSeriesChunks(req, srv, seriesSet, stats) + err = s.sendSeriesChunks(req, srv, seriesSet, stats, iterationBegin) if err != nil { return } @@ -685,12 +689,25 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( seriesSet storepb.SeriesSet, resHints *hintspb.SeriesResponseHints, ) error { + var ( + encodeDuration = time.Duration(0) + sendDuration = time.Duration(0) + ) + // Once the iteration is done we will update the stats. + defer stats.update(func(stats *queryStats) { + // The time spent iterating over the series set is the + // actual time spent fetching series and chunks, encoding and sending them to the client. + // We split the timings to have a better view over how time is spent. + stats.streamingSeriesEncodeResponseDuration += encodeDuration + stats.streamingSeriesSendResponseDuration += sendDuration + }) + // TODO: should we pool the seriesBuffer/seriesBatch? seriesBuffer := make([]*storepb.StreamingSeries, req.StreamingChunksBatchSize) for i := range seriesBuffer { seriesBuffer[i] = &storepb.StreamingSeries{} } - seriesBatch := &storepb.StreamSeriesBatch{ + seriesBatch := &storepb.StreamingSeriesBatch{ Series: seriesBuffer[:0], } // TODO: can we send this in parallel while we start fetching the chunks below? @@ -709,14 +726,18 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { msg := &grpc.PreparedMsg{} - if err := msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { + + encodeBegin := time.Now() + if err := msg.Encode(srv, storepb.NewStreamingSeriesResponse(seriesBatch)); err != nil { return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) } + encodeDuration += time.Since(encodeBegin) - // Send the message. + sendBegin := time.Now() if err := srv.SendMsg(msg); err != nil { return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) } + sendDuration += time.Since(sendBegin) seriesBatch.Series = seriesBatch.Series[:0] } @@ -734,13 +755,18 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( // Send any remaining series and signal that there are no more series. msg := &grpc.PreparedMsg{} seriesBatch.IsEndOfSeriesStream = true - if err := msg.Encode(srv, storepb.NewStreamSeriesResponse(seriesBatch)); err != nil { + + encodeBegin := time.Now() + if err := msg.Encode(srv, storepb.NewStreamingSeriesResponse(seriesBatch)); err != nil { return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) } - // Send the message. + encodeDuration += time.Since(encodeBegin) + + sendBegin := time.Now() if err := srv.SendMsg(msg); err != nil { return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) } + sendDuration += time.Since(sendBegin) if seriesSet.Err() != nil { return errors.Wrap(seriesSet.Err(), "expand series set") @@ -754,16 +780,20 @@ func (s *BucketStore) sendSeriesChunks( srv storepb.Store_SeriesServer, seriesSet storepb.SeriesSet, stats *safeQueryStats, + iterationBegin time.Time, ) (err error) { var ( - iterationBegin = time.Now() encodeDuration = time.Duration(0) sendDuration = time.Duration(0) seriesCount int chunksCount int - streamingChunks = req.StreamingChunksBatchSize > 0 && !req.SkipChunks + streamingChunks = req.StreamingChunksBatchSize > 0 ) + if iterationBegin.Equal(time.Time{}) { + iterationBegin = time.Now() + } + // Once the iteration is done we will update the stats. defer stats.update(func(stats *queryStats) { stats.mergedSeriesCount += seriesCount @@ -779,15 +809,17 @@ func (s *BucketStore) sendSeriesChunks( stats.streamingSeriesFetchSeriesAndChunksDuration-encodeDuration-sendDuration))) }) - var batchSizeBytes int - var chunksBuffer []*storepb.StreamSeriesChunks + var ( + batchSizeBytes int + chunksBuffer []*storepb.StreamingChunks + ) if streamingChunks { - chunksBuffer = make([]*storepb.StreamSeriesChunks, req.StreamingChunksBatchSize) + chunksBuffer = make([]*storepb.StreamingChunks, req.StreamingChunksBatchSize) for i := range chunksBuffer { - chunksBuffer[i] = &storepb.StreamSeriesChunks{} + chunksBuffer[i] = &storepb.StreamingChunks{} } } - chunksBatch := &storepb.StreamSeriesChunksBatch{ + chunksBatch := &storepb.StreamingChunksBatch{ Series: chunksBuffer[:0], } for seriesSet.Next() { @@ -795,7 +827,7 @@ func (s *BucketStore) sendSeriesChunks( // because the subsequent call to seriesSet.Next() may release it. lset, chks := seriesSet.At() seriesCount++ - var response interface{} + var response *storepb.SeriesResponse if streamingChunks { // We only need to stream chunks here because the series labels have already // been sent above. @@ -817,8 +849,10 @@ func (s *BucketStore) sendSeriesChunks( } batchSizeBytes += last.Size() - if (batchSizeBytes > 0 && batchSizeBytes > queryStreamBatchMessageSize) || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { - response = storepb.NewStreamSeriesChunksResponse(chunksBatch) + // We are not strictly required to be under targetQueryStreamBatchMessageSize. + // The aim is to not hit gRPC and TCP limits, hence some overage is ok. + if (batchSizeBytes > 0 && batchSizeBytes > targetQueryStreamBatchMessageSize) || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { + response = storepb.NewStreamingChunksResponse(chunksBatch) } } else { var series storepb.Series @@ -853,13 +887,13 @@ func (s *BucketStore) sendSeriesChunks( if streamingChunks && len(chunksBatch.Series) > 0 { // Still some chunks left to send. - return s.sendChunks(srv, storepb.NewStreamSeriesChunksResponse(chunksBatch), &encodeDuration, &sendDuration) + return s.sendChunks(srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) } return nil } -func (s *BucketStore) sendChunks(srv storepb.Store_SeriesServer, chunks interface{}, encodeDuration, sendDuration *time.Duration) error { +func (s *BucketStore) sendChunks(srv storepb.Store_SeriesServer, chunks *storepb.SeriesResponse, encodeDuration, sendDuration *time.Duration) error { // We encode it ourselves into a PreparedMsg in order to measure the time it takes. encodeBegin := time.Now() msg := &grpc.PreparedMsg{} @@ -868,7 +902,6 @@ func (s *BucketStore) sendChunks(srv storepb.Store_SeriesServer, chunks interfac } *encodeDuration += time.Since(encodeBegin) - // Send the message. sendBegin := time.Now() if err := srv.SendMsg(msg); err != nil { return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) @@ -912,8 +945,8 @@ func (s *BucketStore) streamingSeriesSetForBlocks( chunkReaders *bucketChunkReaders, shardSelector *sharding.ShardSelector, matchers []*labels.Matcher, - chunksLimiter ChunksLimiter, // Rate mockLimiter for loading chunks. - seriesLimiter SeriesLimiter, // Rate mockLimiter for loading series. + chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. + seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, reusePostings [][]storage.SeriesRef, // Used if not empty. reusePendingMatchers [][]*labels.Matcher, // Used if not empty. diff --git a/pkg/storegateway/bucket_store_server_test.go b/pkg/storegateway/bucket_store_server_test.go index 6f9236cf86c..84bf25a9119 100644 --- a/pkg/storegateway/bucket_store_server_test.go +++ b/pkg/storegateway/bucket_store_server_test.go @@ -185,7 +185,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest return } - copiedSeries := &storepb.StreamSeriesBatch{} + copiedSeries := &storepb.StreamingSeriesBatch{} if err = copiedSeries.Unmarshal(recvSeriesData); err != nil { err = errors.Wrap(err, "unmarshal received series") return @@ -209,7 +209,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest return } - chksBatch := res.GetStreamingSeriesChunks() + chksBatch := res.GetStreamingChunks() for _, chks := range chksBatch.Series { idx++ if chksBatch == nil { @@ -230,7 +230,7 @@ func (s *storeTestServer) Series(ctx context.Context, req *storepb.SeriesRequest return } - copiedChunks := &storepb.StreamSeriesChunks{} + copiedChunks := &storepb.StreamingChunks{} if err = copiedChunks.Unmarshal(data); err != nil { err = errors.Wrap(err, "unmarshal received series") return diff --git a/pkg/storegateway/bucket_streaming_readers.go b/pkg/storegateway/bucket_streaming_readers.go index 25686b6f799..ee1643988db 100644 --- a/pkg/storegateway/bucket_streaming_readers.go +++ b/pkg/storegateway/bucket_streaming_readers.go @@ -28,8 +28,8 @@ type SeriesChunksStreamReader struct { stats *stats.Stats log log.Logger - seriesChunksChan chan *storepb.StreamSeriesChunksBatch - chunksBatch []*storepb.StreamSeriesChunks + seriesChunksChan chan *storepb.StreamingChunksBatch + chunksBatch []*storepb.StreamingChunks errorChan chan error } @@ -57,7 +57,7 @@ func (s *SeriesChunksStreamReader) Close() { // If an error occurs while streaming, a subsequent call to GetChunks will return an error. // To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. func (s *SeriesChunksStreamReader) StartBuffering() { - s.seriesChunksChan = make(chan *storepb.StreamSeriesChunksBatch, 2) + s.seriesChunksChan = make(chan *storepb.StreamingChunksBatch, 1) // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. s.errorChan = make(chan error, 1) @@ -89,7 +89,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { return } - c := msg.GetStreamingSeriesChunks() + c := msg.GetStreamingChunks() if c == nil { s.errorChan <- fmt.Errorf("expected to receive StreamingSeriesChunks, but got something else") return diff --git a/pkg/storegateway/limiter.go b/pkg/storegateway/limiter.go index 86388ecc389..ad01ac19759 100644 --- a/pkg/storegateway/limiter.go +++ b/pkg/storegateway/limiter.go @@ -15,14 +15,14 @@ import ( ) type ChunksLimiter interface { - // Reserve num chunks out of the total number of chunks enforced by the mockLimiter. + // Reserve num chunks out of the total number of chunks enforced by the limiter. // Returns an error if the limit has been exceeded. This function must be // goroutine safe. Reserve(num uint64) error } type SeriesLimiter interface { - // Reserve num series out of the total number of series enforced by the mockLimiter. + // Reserve num series out of the total number of series enforced by the limiter. // Returns an error if the limit has been exceeded. This function must be // goroutine safe. Reserve(num uint64) error @@ -45,7 +45,7 @@ type Limiter struct { failedOnce sync.Once } -// NewLimiter returns a new mockLimiter with a specified limit. 0 disables the limit. +// NewLimiter returns a new limiter with a specified limit. 0 disables the limit. func NewLimiter(limit uint64, ctr prometheus.Counter) *Limiter { return &Limiter{limit: limit, failedCounter: ctr} } diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index e5962711e46..d77047b224d 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1083,8 +1083,8 @@ func TestLimitingSeriesChunkRefsSetIterator(t *testing.T) { t.Run(testName, func(t *testing.T) { iterator := newLimitingSeriesChunkRefsSetIterator( newSliceSeriesChunkRefsSetIterator(testCase.upstreamErr, testCase.sets...), - &mockLimiter{limit: testCase.chunksLimit}, - &mockLimiter{limit: testCase.seriesLimit}, + &staticLimiter{limit: testCase.chunksLimit}, + &staticLimiter{limit: testCase.seriesLimit}, ) sets := readAllSeriesChunkRefsSet(iterator) @@ -1874,6 +1874,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { hashCache := hashcache.NewSeriesHashCache(1024 * 1024).GetBlockCache(block.meta.ULID.String()) b.ResetTimer() + b.ReportAllocs() for i := 0; i < b.N; i++ { iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( @@ -2604,12 +2605,12 @@ func (s *sliceSeriesChunkRefsSetIterator) Err() error { return nil } -type mockLimiter struct { +type staticLimiter struct { limit int current atomic.Uint64 } -func (l *mockLimiter) Reserve(num uint64) error { +func (l *staticLimiter) Reserve(num uint64) error { if l.current.Add(num) > uint64(l.limit) { return errors.New("test limit exceeded") } diff --git a/pkg/storegateway/storepb/custom.go b/pkg/storegateway/storepb/custom.go index 5ed4208fbcd..3c8653e6bd3 100644 --- a/pkg/storegateway/storepb/custom.go +++ b/pkg/storegateway/storepb/custom.go @@ -39,7 +39,7 @@ func NewStatsResponse(indexBytesFetched int) *SeriesResponse { } } -func NewStreamSeriesResponse(series *StreamSeriesBatch) *SeriesResponse { +func NewStreamingSeriesResponse(series *StreamingSeriesBatch) *SeriesResponse { return &SeriesResponse{ Result: &SeriesResponse_StreamingSeries{ StreamingSeries: series, @@ -47,10 +47,10 @@ func NewStreamSeriesResponse(series *StreamSeriesBatch) *SeriesResponse { } } -func NewStreamSeriesChunksResponse(series *StreamSeriesChunksBatch) *SeriesResponse { +func NewStreamingChunksResponse(series *StreamingChunksBatch) *SeriesResponse { return &SeriesResponse{ - Result: &SeriesResponse_StreamingSeriesChunks{ - StreamingSeriesChunks: series, + Result: &SeriesResponse_StreamingChunks{ + StreamingChunks: series, }, } } diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index f52a5a64d3e..ed55eba7f98 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -40,7 +40,7 @@ type SeriesRequest struct { // The content of this field and whether it's supported depends on the // implementation of a specific store. Hints *types.Any `protobuf:"bytes,9,opt,name=hints,proto3" json:"hints,omitempty"` - // Why 100? TBD, it is just copied from ingesters and need to see if we still need 100 here. + // It is 100 so that we have an option to bring back compatibility with Thanos' storage API. StreamingChunksBatchSize uint64 `protobuf:"varint,100,opt,name=streaming_chunks_batch_size,json=streamingChunksBatchSize,proto3" json:"streaming_chunks_batch_size,omitempty"` } @@ -120,7 +120,7 @@ type SeriesResponse struct { // *SeriesResponse_Hints // *SeriesResponse_Stats // *SeriesResponse_StreamingSeries - // *SeriesResponse_StreamingSeriesChunks + // *SeriesResponse_StreamingChunks Result isSeriesResponse_Result `protobuf_oneof:"result"` } @@ -176,18 +176,18 @@ type SeriesResponse_Stats struct { Stats *Stats `protobuf:"bytes,4,opt,name=stats,proto3,oneof"` } type SeriesResponse_StreamingSeries struct { - StreamingSeries *StreamSeriesBatch `protobuf:"bytes,5,opt,name=streaming_series,json=streamingSeries,proto3,oneof"` + StreamingSeries *StreamingSeriesBatch `protobuf:"bytes,5,opt,name=streaming_series,json=streamingSeries,proto3,oneof"` } -type SeriesResponse_StreamingSeriesChunks struct { - StreamingSeriesChunks *StreamSeriesChunksBatch `protobuf:"bytes,6,opt,name=streaming_series_chunks,json=streamingSeriesChunks,proto3,oneof"` +type SeriesResponse_StreamingChunks struct { + StreamingChunks *StreamingChunksBatch `protobuf:"bytes,6,opt,name=streaming_chunks,json=streamingChunks,proto3,oneof"` } -func (*SeriesResponse_Series) isSeriesResponse_Result() {} -func (*SeriesResponse_Warning) isSeriesResponse_Result() {} -func (*SeriesResponse_Hints) isSeriesResponse_Result() {} -func (*SeriesResponse_Stats) isSeriesResponse_Result() {} -func (*SeriesResponse_StreamingSeries) isSeriesResponse_Result() {} -func (*SeriesResponse_StreamingSeriesChunks) isSeriesResponse_Result() {} +func (*SeriesResponse_Series) isSeriesResponse_Result() {} +func (*SeriesResponse_Warning) isSeriesResponse_Result() {} +func (*SeriesResponse_Hints) isSeriesResponse_Result() {} +func (*SeriesResponse_Stats) isSeriesResponse_Result() {} +func (*SeriesResponse_StreamingSeries) isSeriesResponse_Result() {} +func (*SeriesResponse_StreamingChunks) isSeriesResponse_Result() {} func (m *SeriesResponse) GetResult() isSeriesResponse_Result { if m != nil { @@ -224,16 +224,16 @@ func (m *SeriesResponse) GetStats() *Stats { return nil } -func (m *SeriesResponse) GetStreamingSeries() *StreamSeriesBatch { +func (m *SeriesResponse) GetStreamingSeries() *StreamingSeriesBatch { if x, ok := m.GetResult().(*SeriesResponse_StreamingSeries); ok { return x.StreamingSeries } return nil } -func (m *SeriesResponse) GetStreamingSeriesChunks() *StreamSeriesChunksBatch { - if x, ok := m.GetResult().(*SeriesResponse_StreamingSeriesChunks); ok { - return x.StreamingSeriesChunks +func (m *SeriesResponse) GetStreamingChunks() *StreamingChunksBatch { + if x, ok := m.GetResult().(*SeriesResponse_StreamingChunks); ok { + return x.StreamingChunks } return nil } @@ -246,7 +246,7 @@ func (*SeriesResponse) XXX_OneofWrappers() []interface{} { (*SeriesResponse_Hints)(nil), (*SeriesResponse_Stats)(nil), (*SeriesResponse_StreamingSeries)(nil), - (*SeriesResponse_StreamingSeriesChunks)(nil), + (*SeriesResponse_StreamingChunks)(nil), } } @@ -430,56 +430,56 @@ func init() { func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) } var fileDescriptor_77a6da22d6a3feb1 = []byte{ - // 783 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0xc7, 0xb9, 0xe2, 0x92, 0x5a, 0xad, 0x2c, 0x97, 0x5e, 0x7f, 0x94, 0xa2, 0x01, 0x5a, 0x10, - 0x50, 0x40, 0x28, 0x5a, 0xb9, 0x70, 0x81, 0x16, 0x3d, 0xf4, 0x60, 0x19, 0x30, 0x54, 0xa2, 0xed, - 0x81, 0x2e, 0x0a, 0xb4, 0x17, 0x81, 0x92, 0xd6, 0x12, 0x61, 0x89, 0x54, 0xb9, 0x54, 0x63, 0xf9, - 0x94, 0x47, 0xc8, 0x63, 0x04, 0xc8, 0x13, 0xe4, 0x9a, 0x93, 0x6f, 0xf1, 0xd1, 0xa7, 0x20, 0x92, - 0x2f, 0x39, 0xfa, 0x11, 0x82, 0xfd, 0x90, 0x28, 0x19, 0x0a, 0x1c, 0x03, 0xb9, 0x71, 0xfe, 0xff, - 0xd9, 0xd9, 0x99, 0xdf, 0x0e, 0x71, 0x21, 0x19, 0x75, 0xea, 0xa3, 0x24, 0x4e, 0x63, 0x62, 0xa6, - 0xfd, 0x20, 0x8a, 0x99, 0x53, 0x4c, 0x27, 0x23, 0xca, 0xa4, 0xe8, 0x7c, 0xdf, 0x0b, 0xd3, 0xfe, - 0xb8, 0x5d, 0xef, 0xc4, 0xc3, 0xc3, 0x5e, 0xdc, 0x8b, 0x0f, 0x85, 0xdc, 0x1e, 0x9f, 0x8b, 0x48, - 0x04, 0xe2, 0x4b, 0xa5, 0x97, 0x7b, 0x71, 0xdc, 0x1b, 0xd0, 0x2c, 0x2b, 0x88, 0x26, 0xd2, 0xaa, - 0xbe, 0xce, 0xe1, 0xd2, 0x19, 0x4d, 0x42, 0xca, 0x7c, 0xfa, 0xdf, 0x98, 0xb2, 0x94, 0x94, 0x31, - 0x1a, 0x86, 0x51, 0x2b, 0x0d, 0x87, 0xd4, 0x06, 0x15, 0x50, 0xd3, 0xfd, 0xfc, 0x30, 0x8c, 0xfe, - 0x0a, 0x87, 0x54, 0x58, 0xc1, 0xa5, 0xb4, 0x72, 0xca, 0x0a, 0x2e, 0x85, 0xf5, 0x13, 0xb7, 0xd2, - 0x4e, 0x9f, 0x26, 0xcc, 0xd6, 0x2b, 0x7a, 0xad, 0x78, 0xb4, 0x53, 0x97, 0x9d, 0xd7, 0x7f, 0x0f, - 0xda, 0x74, 0xf0, 0x87, 0x34, 0x1b, 0xf0, 0xfa, 0xdd, 0x81, 0xe6, 0x2f, 0x72, 0xc9, 0x01, 0x2e, - 0xb2, 0x8b, 0x70, 0xd4, 0xea, 0xf4, 0xc7, 0xd1, 0x05, 0xb3, 0x51, 0x05, 0xd4, 0x90, 0x8f, 0xb9, - 0x74, 0x22, 0x14, 0xf2, 0x2d, 0x36, 0xfa, 0x61, 0x94, 0x32, 0xbb, 0x50, 0x01, 0xa2, 0xaa, 0x9c, - 0xa5, 0x3e, 0x9f, 0xa5, 0x7e, 0x1c, 0x4d, 0x7c, 0x99, 0x42, 0x7e, 0xc5, 0xfb, 0x2c, 0x4d, 0x68, - 0x30, 0x0c, 0xa3, 0x9e, 0xaa, 0xd8, 0x6a, 0xf3, 0x9b, 0x5a, 0x2c, 0xbc, 0xa2, 0x76, 0xb7, 0x02, - 0x6a, 0xd0, 0xb7, 0x17, 0x29, 0xf2, 0x86, 0x06, 0x4f, 0x38, 0x0b, 0xaf, 0xa8, 0x07, 0x11, 0xb4, - 0x0c, 0x0f, 0x22, 0xc3, 0x32, 0x3d, 0x88, 0x4c, 0x2b, 0xef, 0x41, 0x94, 0xb7, 0x90, 0x07, 0x11, - 0xb6, 0x8a, 0x1e, 0x44, 0x45, 0x6b, 0xc3, 0x83, 0x68, 0xc3, 0x2a, 0x79, 0x10, 0x95, 0xac, 0xcd, - 0xea, 0xcf, 0xd8, 0x38, 0x4b, 0x83, 0x94, 0x91, 0x3a, 0xde, 0x3e, 0xa7, 0x7c, 0xa0, 0x6e, 0x2b, - 0x8c, 0xba, 0xf4, 0xb2, 0xd5, 0x9e, 0xa4, 0x94, 0x09, 0x7a, 0xd0, 0xdf, 0x52, 0xd6, 0x6f, 0xdc, - 0x69, 0x70, 0xa3, 0x7a, 0x9b, 0xc3, 0x9b, 0x73, 0xe8, 0x6c, 0x14, 0x47, 0x8c, 0x92, 0x1a, 0x36, - 0x99, 0x50, 0xc4, 0xa9, 0xe2, 0xd1, 0xe6, 0x9c, 0x9e, 0xcc, 0x6b, 0x6a, 0xbe, 0xf2, 0x89, 0x83, - 0xf3, 0xcf, 0x82, 0x24, 0x0a, 0xa3, 0x9e, 0x78, 0x83, 0x42, 0x53, 0xf3, 0xe7, 0x02, 0xf9, 0x6e, - 0x0e, 0x4b, 0xff, 0x34, 0xac, 0xa6, 0x36, 0xc7, 0xf5, 0x0d, 0x36, 0x18, 0xef, 0xdf, 0x86, 0x22, - 0xbb, 0xb4, 0xb8, 0x92, 0x8b, 0x3c, 0x4d, 0xb8, 0xe4, 0x14, 0x5b, 0x19, 0x55, 0xd5, 0xa4, 0x21, - 0x4e, 0x94, 0xb3, 0x13, 0xdc, 0x97, 0xad, 0x0a, 0x9e, 0x4d, 0xcd, 0xff, 0x6a, 0x71, 0x48, 0xea, - 0xe4, 0x1f, 0xfc, 0xf5, 0xc3, 0x3a, 0xf3, 0x67, 0x37, 0x45, 0xb9, 0x83, 0x75, 0xe5, 0x96, 0x1e, - 0xa9, 0xa9, 0xf9, 0xbb, 0x0f, 0x8a, 0x2a, 0x17, 0x61, 0x33, 0xa1, 0x6c, 0x3c, 0x48, 0xab, 0xaf, - 0x00, 0xde, 0x12, 0x0b, 0xf7, 0x67, 0x30, 0xcc, 0x76, 0x7a, 0x47, 0x4c, 0x9a, 0xa4, 0x82, 0x8b, - 0xee, 0xcb, 0x80, 0x58, 0x58, 0xa7, 0x51, 0x57, 0x4c, 0xaf, 0xfb, 0xfc, 0x33, 0x5b, 0x36, 0xe3, - 0xf1, 0x65, 0x5b, 0xde, 0x78, 0xf3, 0xf3, 0x37, 0xde, 0x83, 0x08, 0x58, 0x39, 0x0f, 0xa2, 0x9c, - 0xa5, 0x57, 0x13, 0x4c, 0x96, 0x9b, 0x55, 0xbb, 0xb0, 0x83, 0x8d, 0x88, 0x0b, 0x36, 0xa8, 0xe8, - 0xb5, 0x82, 0x2f, 0x03, 0xe2, 0x60, 0xa4, 0x9e, 0x99, 0xd9, 0x39, 0x61, 0x2c, 0xe2, 0xac, 0x6f, - 0xfd, 0xd1, 0xbe, 0xab, 0x6f, 0x80, 0xba, 0xf4, 0xef, 0x60, 0x30, 0x5e, 0x41, 0x34, 0xe0, 0xaa, - 0xd8, 0xbf, 0x82, 0x2f, 0x83, 0x0c, 0x1c, 0x5c, 0x03, 0xce, 0x58, 0x03, 0xce, 0x7c, 0x1a, 0xb8, - 0xfc, 0x93, 0xc0, 0xe5, 0x2c, 0xdd, 0x83, 0x48, 0xb7, 0x60, 0x75, 0x8c, 0xb7, 0x57, 0x66, 0x50, - 0xe4, 0xf6, 0xb0, 0xf9, 0xbf, 0x50, 0x14, 0x3a, 0x15, 0x7d, 0x29, 0x76, 0x47, 0x6f, 0x01, 0xff, - 0xe5, 0xe3, 0x84, 0x92, 0x5f, 0xb0, 0xa9, 0xd6, 0x7a, 0x77, 0xf5, 0x4f, 0x55, 0x3c, 0x9d, 0xbd, - 0x87, 0xb2, 0x6c, 0xf1, 0x07, 0x40, 0x4e, 0x30, 0xce, 0x1e, 0x9d, 0x94, 0x57, 0x66, 0x5f, 0xde, - 0x5a, 0xc7, 0x59, 0x67, 0xa9, 0x49, 0x4f, 0x71, 0x71, 0x09, 0x00, 0x59, 0x4d, 0x5d, 0x79, 0x59, - 0x67, 0x7f, 0xad, 0x27, 0xeb, 0x34, 0x8e, 0xaf, 0xa7, 0xae, 0x76, 0x33, 0x75, 0xb5, 0xdb, 0xa9, - 0xab, 0xdd, 0x4f, 0x5d, 0xf0, 0x7c, 0xe6, 0x82, 0x97, 0x33, 0x17, 0x5c, 0xcf, 0x5c, 0x70, 0x33, - 0x73, 0xc1, 0xfb, 0x99, 0x0b, 0x3e, 0xcc, 0x5c, 0xed, 0x7e, 0xe6, 0x82, 0x17, 0x77, 0xae, 0x76, - 0x73, 0xe7, 0x6a, 0xb7, 0x77, 0xae, 0xf6, 0x6f, 0x9e, 0x71, 0x10, 0xa3, 0x76, 0xdb, 0x14, 0xa4, - 0x7e, 0xfc, 0x18, 0x00, 0x00, 0xff, 0xff, 0xb4, 0xb0, 0xa8, 0x21, 0xb5, 0x06, 0x00, 0x00, + // 772 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x93, 0x41, 0x6f, 0xd3, 0x48, + 0x14, 0xc7, 0x3d, 0xf1, 0xd8, 0x99, 0x4c, 0x9a, 0xae, 0x3b, 0xed, 0x56, 0xae, 0xbb, 0x72, 0xa3, + 0x48, 0x2b, 0x45, 0xab, 0xdd, 0x74, 0x55, 0x24, 0x10, 0x07, 0x0e, 0x4d, 0x25, 0x94, 0x5a, 0xc0, + 0xc1, 0x45, 0x1c, 0xb8, 0x44, 0x4e, 0x32, 0x4d, 0xac, 0x26, 0x76, 0xf0, 0x38, 0xd0, 0xf4, 0xc4, + 0x47, 0xe0, 0x63, 0x20, 0xf1, 0x09, 0xb8, 0x72, 0xaa, 0xc4, 0x81, 0x1e, 0x7b, 0x42, 0x24, 0xbd, + 0x70, 0xec, 0x47, 0x40, 0x9e, 0x99, 0xc4, 0x31, 0x4a, 0x55, 0x2a, 0x71, 0xf3, 0xfb, 0xff, 0xdf, + 0xcc, 0xbc, 0xf7, 0x7b, 0xcf, 0xb8, 0x10, 0x0d, 0xdb, 0xb5, 0x61, 0x14, 0xc6, 0x21, 0xd1, 0xe3, + 0x9e, 0x17, 0x84, 0xcc, 0x2a, 0xc6, 0xe3, 0x21, 0x65, 0x42, 0xb4, 0xfe, 0xeb, 0xfa, 0x71, 0x6f, + 0xd4, 0xaa, 0xb5, 0xc3, 0xc1, 0x6e, 0x37, 0xec, 0x86, 0xbb, 0x5c, 0x6e, 0x8d, 0x8e, 0x79, 0xc4, + 0x03, 0xfe, 0x25, 0xd3, 0xb7, 0xba, 0x61, 0xd8, 0xed, 0xd3, 0x34, 0xcb, 0x0b, 0xc6, 0xc2, 0xaa, + 0x7c, 0xcc, 0xe1, 0xd2, 0x11, 0x8d, 0x7c, 0xca, 0x5c, 0xfa, 0x6a, 0x44, 0x59, 0x4c, 0xb6, 0x30, + 0x1a, 0xf8, 0x41, 0x33, 0xf6, 0x07, 0xd4, 0x04, 0x65, 0x50, 0x55, 0xdd, 0xfc, 0xc0, 0x0f, 0x9e, + 0xfb, 0x03, 0xca, 0x2d, 0xef, 0x54, 0x58, 0x39, 0x69, 0x79, 0xa7, 0xdc, 0xba, 0x9f, 0x58, 0x71, + 0xbb, 0x47, 0x23, 0x66, 0xaa, 0x65, 0xb5, 0x5a, 0xdc, 0xdb, 0xa8, 0x89, 0xca, 0x6b, 0x4f, 0xbc, + 0x16, 0xed, 0x3f, 0x15, 0x66, 0x1d, 0x9e, 0x7f, 0xdd, 0x51, 0xdc, 0x79, 0x2e, 0xd9, 0xc1, 0x45, + 0x76, 0xe2, 0x0f, 0x9b, 0xed, 0xde, 0x28, 0x38, 0x61, 0x26, 0x2a, 0x83, 0x2a, 0x72, 0x71, 0x22, + 0x1d, 0x70, 0x85, 0xfc, 0x83, 0xb5, 0x9e, 0x1f, 0xc4, 0xcc, 0x2c, 0x94, 0x01, 0xbf, 0x55, 0xf4, + 0x52, 0x9b, 0xf5, 0x52, 0xdb, 0x0f, 0xc6, 0xae, 0x48, 0x21, 0x8f, 0xf0, 0x36, 0x8b, 0x23, 0xea, + 0x0d, 0xfc, 0xa0, 0x2b, 0x6f, 0x6c, 0xb6, 0x92, 0x97, 0x9a, 0xcc, 0x3f, 0xa3, 0x66, 0xa7, 0x0c, + 0xaa, 0xd0, 0x35, 0xe7, 0x29, 0xe2, 0x85, 0x7a, 0x92, 0x70, 0xe4, 0x9f, 0x51, 0x07, 0x22, 0x68, + 0x68, 0x0e, 0x44, 0x9a, 0xa1, 0x3b, 0x10, 0xe9, 0x46, 0xde, 0x81, 0x28, 0x6f, 0x20, 0x07, 0x22, + 0x6c, 0x14, 0x1d, 0x88, 0x8a, 0xc6, 0x8a, 0x03, 0xd1, 0x8a, 0x51, 0x72, 0x20, 0x2a, 0x19, 0xab, + 0x95, 0x07, 0x58, 0x3b, 0x8a, 0xbd, 0x98, 0x91, 0x1a, 0x5e, 0x3f, 0xa6, 0x49, 0x43, 0x9d, 0xa6, + 0x1f, 0x74, 0xe8, 0x69, 0xb3, 0x35, 0x8e, 0x29, 0xe3, 0xf4, 0xa0, 0xbb, 0x26, 0xad, 0xc3, 0xc4, + 0xa9, 0x27, 0x46, 0xe5, 0x73, 0x0e, 0xaf, 0xce, 0xa0, 0xb3, 0x61, 0x18, 0x30, 0x4a, 0xaa, 0x58, + 0x67, 0x5c, 0xe1, 0xa7, 0x8a, 0x7b, 0xab, 0x33, 0x7a, 0x22, 0xaf, 0xa1, 0xb8, 0xd2, 0x27, 0x16, + 0xce, 0xbf, 0xf1, 0xa2, 0xc0, 0x0f, 0xba, 0x7c, 0x06, 0x85, 0x86, 0xe2, 0xce, 0x04, 0xf2, 0xef, + 0x0c, 0x96, 0x7a, 0x33, 0xac, 0x86, 0x32, 0xc3, 0xf5, 0x37, 0xd6, 0x58, 0x52, 0xbf, 0x09, 0x79, + 0x76, 0x69, 0xfe, 0x64, 0x22, 0x26, 0x69, 0xdc, 0x25, 0x87, 0xd8, 0x48, 0xa9, 0xca, 0x22, 0x35, + 0x7e, 0xe2, 0xaf, 0xf4, 0x84, 0xf4, 0x45, 0xb5, 0x1c, 0x69, 0x43, 0x71, 0xff, 0x60, 0x59, 0x3d, + 0x7b, 0x95, 0x1c, 0xb9, 0x7e, 0xc3, 0x55, 0x0b, 0xd3, 0xc9, 0x5c, 0x25, 0x75, 0x84, 0xf5, 0x88, + 0xb2, 0x51, 0x3f, 0xae, 0x7c, 0x00, 0x78, 0x8d, 0xef, 0xd8, 0x33, 0x6f, 0x90, 0xae, 0xf1, 0x06, + 0x6f, 0x2e, 0x8a, 0x39, 0x0a, 0xd5, 0x15, 0x01, 0x31, 0xb0, 0x4a, 0x83, 0x0e, 0x6f, 0x58, 0x75, + 0x93, 0xcf, 0x74, 0xbf, 0xb4, 0xdb, 0xf7, 0x6b, 0x71, 0xc9, 0xf5, 0x5f, 0x5f, 0x72, 0x07, 0x22, + 0x60, 0xe4, 0x1c, 0x88, 0x72, 0x86, 0x5a, 0x89, 0x30, 0x59, 0x2c, 0x56, 0x8e, 0x7f, 0x03, 0x6b, + 0x41, 0x22, 0x98, 0xa0, 0xac, 0x56, 0x0b, 0xae, 0x08, 0x88, 0x85, 0x91, 0x9c, 0x2c, 0x33, 0x73, + 0xdc, 0x98, 0xc7, 0x69, 0xdd, 0xea, 0xad, 0x75, 0x57, 0x3e, 0x01, 0xf9, 0xe8, 0x0b, 0xaf, 0x3f, + 0xca, 0x20, 0xea, 0x27, 0x2a, 0x5f, 0xb9, 0x82, 0x2b, 0x82, 0x14, 0x1c, 0x5c, 0x02, 0x4e, 0x5b, + 0x02, 0x4e, 0xbf, 0x1b, 0xb8, 0xfc, 0x9d, 0xc0, 0xe5, 0x0c, 0xd5, 0x81, 0x48, 0x35, 0x60, 0x65, + 0x84, 0xd7, 0x33, 0x3d, 0x48, 0x72, 0x9b, 0x58, 0x7f, 0xcd, 0x15, 0x89, 0x4e, 0x46, 0xbf, 0x8b, + 0xdd, 0xde, 0x17, 0x90, 0xfc, 0xe5, 0x61, 0x44, 0xc9, 0x43, 0xac, 0xcb, 0x35, 0xfe, 0x33, 0xfb, + 0x73, 0x4a, 0x9e, 0xd6, 0xe6, 0xcf, 0xb2, 0x28, 0xf1, 0x7f, 0x40, 0x0e, 0x30, 0x4e, 0x87, 0x4e, + 0xb6, 0x32, 0xbd, 0x2f, 0x6e, 0xad, 0x65, 0x2d, 0xb3, 0x64, 0xa7, 0x8f, 0x71, 0x71, 0x01, 0x00, + 0xc9, 0xa6, 0x66, 0x26, 0x6b, 0x6d, 0x2f, 0xf5, 0xc4, 0x3d, 0xf5, 0xfd, 0xf3, 0x89, 0xad, 0x5c, + 0x4c, 0x6c, 0xe5, 0x72, 0x62, 0x2b, 0xd7, 0x13, 0x1b, 0xbc, 0x9d, 0xda, 0xe0, 0xfd, 0xd4, 0x06, + 0xe7, 0x53, 0x1b, 0x5c, 0x4c, 0x6d, 0xf0, 0x6d, 0x6a, 0x83, 0xef, 0x53, 0x5b, 0xb9, 0x9e, 0xda, + 0xe0, 0xdd, 0x95, 0xad, 0x5c, 0x5c, 0xd9, 0xca, 0xe5, 0x95, 0xad, 0xbc, 0xcc, 0xb3, 0x04, 0xc4, + 0xb0, 0xd5, 0xd2, 0x39, 0xa9, 0x7b, 0x3f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x71, 0x84, 0x02, + 0xa8, 0x06, 0x00, 0x00, } func (this *SeriesRequest) Equal(that interface{}) bool { @@ -700,14 +700,14 @@ func (this *SeriesResponse_StreamingSeries) Equal(that interface{}) bool { } return true } -func (this *SeriesResponse_StreamingSeriesChunks) Equal(that interface{}) bool { +func (this *SeriesResponse_StreamingChunks) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*SeriesResponse_StreamingSeriesChunks) + that1, ok := that.(*SeriesResponse_StreamingChunks) if !ok { - that2, ok := that.(SeriesResponse_StreamingSeriesChunks) + that2, ok := that.(SeriesResponse_StreamingChunks) if ok { that1 = &that2 } else { @@ -719,7 +719,7 @@ func (this *SeriesResponse_StreamingSeriesChunks) Equal(that interface{}) bool { } else if this == nil { return false } - if !this.StreamingSeriesChunks.Equal(that1.StreamingSeriesChunks) { + if !this.StreamingChunks.Equal(that1.StreamingChunks) { return false } return true @@ -968,12 +968,12 @@ func (this *SeriesResponse_StreamingSeries) GoString() string { `StreamingSeries:` + fmt.Sprintf("%#v", this.StreamingSeries) + `}`}, ", ") return s } -func (this *SeriesResponse_StreamingSeriesChunks) GoString() string { +func (this *SeriesResponse_StreamingChunks) GoString() string { if this == nil { return "nil" } - s := strings.Join([]string{`&storepb.SeriesResponse_StreamingSeriesChunks{` + - `StreamingSeriesChunks:` + fmt.Sprintf("%#v", this.StreamingSeriesChunks) + `}`}, ", ") + s := strings.Join([]string{`&storepb.SeriesResponse_StreamingChunks{` + + `StreamingChunks:` + fmt.Sprintf("%#v", this.StreamingChunks) + `}`}, ", ") return s } func (this *LabelNamesRequest) GoString() string { @@ -1487,15 +1487,15 @@ func (m *SeriesResponse_StreamingSeries) MarshalToSizedBuffer(dAtA []byte) (int, } return len(dAtA) - i, nil } -func (m *SeriesResponse_StreamingSeriesChunks) MarshalTo(dAtA []byte) (int, error) { +func (m *SeriesResponse_StreamingChunks) MarshalTo(dAtA []byte) (int, error) { return m.MarshalToSizedBuffer(dAtA[:m.Size()]) } -func (m *SeriesResponse_StreamingSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *SeriesResponse_StreamingChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) - if m.StreamingSeriesChunks != nil { + if m.StreamingChunks != nil { { - size, err := m.StreamingSeriesChunks.MarshalToSizedBuffer(dAtA[:i]) + size, err := m.StreamingChunks.MarshalToSizedBuffer(dAtA[:i]) if err != nil { return 0, err } @@ -1862,14 +1862,14 @@ func (m *SeriesResponse_StreamingSeries) Size() (n int) { } return n } -func (m *SeriesResponse_StreamingSeriesChunks) Size() (n int) { +func (m *SeriesResponse_StreamingChunks) Size() (n int) { if m == nil { return 0 } var l int _ = l - if m.StreamingSeriesChunks != nil { - l = m.StreamingSeriesChunks.Size() + if m.StreamingChunks != nil { + l = m.StreamingChunks.Size() n += 1 + l + sovRpc(uint64(l)) } return n @@ -2069,17 +2069,17 @@ func (this *SeriesResponse_StreamingSeries) String() string { return "nil" } s := strings.Join([]string{`&SeriesResponse_StreamingSeries{`, - `StreamingSeries:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeries), "StreamSeriesBatch", "StreamSeriesBatch", 1) + `,`, + `StreamingSeries:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeries), "StreamingSeriesBatch", "StreamingSeriesBatch", 1) + `,`, `}`, }, "") return s } -func (this *SeriesResponse_StreamingSeriesChunks) String() string { +func (this *SeriesResponse_StreamingChunks) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&SeriesResponse_StreamingSeriesChunks{`, - `StreamingSeriesChunks:` + strings.Replace(fmt.Sprintf("%v", this.StreamingSeriesChunks), "StreamSeriesChunksBatch", "StreamSeriesChunksBatch", 1) + `,`, + s := strings.Join([]string{`&SeriesResponse_StreamingChunks{`, + `StreamingChunks:` + strings.Replace(fmt.Sprintf("%v", this.StreamingChunks), "StreamingChunksBatch", "StreamingChunksBatch", 1) + `,`, `}`, }, "") return s @@ -2620,7 +2620,7 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &StreamSeriesBatch{} + v := &StreamingSeriesBatch{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -2628,7 +2628,7 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StreamingSeriesChunks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StreamingChunks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2655,11 +2655,11 @@ func (m *SeriesResponse) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - v := &StreamSeriesChunksBatch{} + v := &StreamingChunksBatch{} if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } - m.Result = &SeriesResponse_StreamingSeriesChunks{v} + m.Result = &SeriesResponse_StreamingChunks{v} iNdEx = postIndex default: iNdEx = preIndex diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index f2239a8a11f..d3d0e61a057 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -80,7 +80,7 @@ message SeriesRequest { // Thanos shard_info. reserved 13; - // Why 100? TBD, it is just copied from ingesters and need to see if we still need 100 here. + // It is 100 so that we have an option to bring back compatibility with Thanos' storage API. uint64 streaming_chunks_batch_size = 100; } @@ -109,9 +109,12 @@ message SeriesResponse { /// related to the processing the series response on store-gateways did available to the querier and query-frontends. Stats stats = 4; - StreamSeriesBatch streaming_series = 5; + /// streaming_series is a list of series labels sent as part of a streaming Series call. + StreamingSeriesBatch streaming_series = 5; - StreamSeriesChunksBatch streaming_series_chunks = 6; + /// streaming_chunks is a batch of list of chunks sent as part of a streaming Series request. + /// They are associated with series labels sent as streaming_series earlier in the same Series request. + StreamingChunksBatch streaming_chunks = 6; } } diff --git a/pkg/storegateway/storepb/types.pb.go b/pkg/storegateway/storepb/types.pb.go index 42c838093c9..542c4cb611d 100644 --- a/pkg/storegateway/storepb/types.pb.go +++ b/pkg/storegateway/storepb/types.pb.go @@ -189,22 +189,22 @@ func (m *StreamingSeries) XXX_DiscardUnknown() { var xxx_messageInfo_StreamingSeries proto.InternalMessageInfo -type StreamSeriesBatch struct { +type StreamingSeriesBatch struct { Series []*StreamingSeries `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` IsEndOfSeriesStream bool `protobuf:"varint,2,opt,name=is_end_of_series_stream,json=isEndOfSeriesStream,proto3" json:"is_end_of_series_stream,omitempty"` } -func (m *StreamSeriesBatch) Reset() { *m = StreamSeriesBatch{} } -func (*StreamSeriesBatch) ProtoMessage() {} -func (*StreamSeriesBatch) Descriptor() ([]byte, []int) { +func (m *StreamingSeriesBatch) Reset() { *m = StreamingSeriesBatch{} } +func (*StreamingSeriesBatch) ProtoMessage() {} +func (*StreamingSeriesBatch) Descriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{3} } -func (m *StreamSeriesBatch) XXX_Unmarshal(b []byte) error { +func (m *StreamingSeriesBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *StreamSeriesBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *StreamingSeriesBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_StreamSeriesBatch.Marshal(b, m, deterministic) + return xxx_messageInfo_StreamingSeriesBatch.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -214,34 +214,34 @@ func (m *StreamSeriesBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, e return b[:n], nil } } -func (m *StreamSeriesBatch) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamSeriesBatch.Merge(m, src) +func (m *StreamingSeriesBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamingSeriesBatch.Merge(m, src) } -func (m *StreamSeriesBatch) XXX_Size() int { +func (m *StreamingSeriesBatch) XXX_Size() int { return m.Size() } -func (m *StreamSeriesBatch) XXX_DiscardUnknown() { - xxx_messageInfo_StreamSeriesBatch.DiscardUnknown(m) +func (m *StreamingSeriesBatch) XXX_DiscardUnknown() { + xxx_messageInfo_StreamingSeriesBatch.DiscardUnknown(m) } -var xxx_messageInfo_StreamSeriesBatch proto.InternalMessageInfo +var xxx_messageInfo_StreamingSeriesBatch proto.InternalMessageInfo -type StreamSeriesChunks struct { +type StreamingChunks struct { SeriesIndex uint64 `protobuf:"varint,1,opt,name=series_index,json=seriesIndex,proto3" json:"series_index,omitempty"` Chunks []AggrChunk `protobuf:"bytes,2,rep,name=chunks,proto3" json:"chunks"` } -func (m *StreamSeriesChunks) Reset() { *m = StreamSeriesChunks{} } -func (*StreamSeriesChunks) ProtoMessage() {} -func (*StreamSeriesChunks) Descriptor() ([]byte, []int) { +func (m *StreamingChunks) Reset() { *m = StreamingChunks{} } +func (*StreamingChunks) ProtoMessage() {} +func (*StreamingChunks) Descriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{4} } -func (m *StreamSeriesChunks) XXX_Unmarshal(b []byte) error { +func (m *StreamingChunks) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *StreamSeriesChunks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *StreamingChunks) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_StreamSeriesChunks.Marshal(b, m, deterministic) + return xxx_messageInfo_StreamingChunks.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -251,33 +251,33 @@ func (m *StreamSeriesChunks) XXX_Marshal(b []byte, deterministic bool) ([]byte, return b[:n], nil } } -func (m *StreamSeriesChunks) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamSeriesChunks.Merge(m, src) +func (m *StreamingChunks) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamingChunks.Merge(m, src) } -func (m *StreamSeriesChunks) XXX_Size() int { +func (m *StreamingChunks) XXX_Size() int { return m.Size() } -func (m *StreamSeriesChunks) XXX_DiscardUnknown() { - xxx_messageInfo_StreamSeriesChunks.DiscardUnknown(m) +func (m *StreamingChunks) XXX_DiscardUnknown() { + xxx_messageInfo_StreamingChunks.DiscardUnknown(m) } -var xxx_messageInfo_StreamSeriesChunks proto.InternalMessageInfo +var xxx_messageInfo_StreamingChunks proto.InternalMessageInfo -type StreamSeriesChunksBatch struct { - Series []*StreamSeriesChunks `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` +type StreamingChunksBatch struct { + Series []*StreamingChunks `protobuf:"bytes,1,rep,name=series,proto3" json:"series,omitempty"` } -func (m *StreamSeriesChunksBatch) Reset() { *m = StreamSeriesChunksBatch{} } -func (*StreamSeriesChunksBatch) ProtoMessage() {} -func (*StreamSeriesChunksBatch) Descriptor() ([]byte, []int) { +func (m *StreamingChunksBatch) Reset() { *m = StreamingChunksBatch{} } +func (*StreamingChunksBatch) ProtoMessage() {} +func (*StreamingChunksBatch) Descriptor() ([]byte, []int) { return fileDescriptor_d938547f84707355, []int{5} } -func (m *StreamSeriesChunksBatch) XXX_Unmarshal(b []byte) error { +func (m *StreamingChunksBatch) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *StreamSeriesChunksBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *StreamingChunksBatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_StreamSeriesChunksBatch.Marshal(b, m, deterministic) + return xxx_messageInfo_StreamingChunksBatch.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -287,17 +287,17 @@ func (m *StreamSeriesChunksBatch) XXX_Marshal(b []byte, deterministic bool) ([]b return b[:n], nil } } -func (m *StreamSeriesChunksBatch) XXX_Merge(src proto.Message) { - xxx_messageInfo_StreamSeriesChunksBatch.Merge(m, src) +func (m *StreamingChunksBatch) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamingChunksBatch.Merge(m, src) } -func (m *StreamSeriesChunksBatch) XXX_Size() int { +func (m *StreamingChunksBatch) XXX_Size() int { return m.Size() } -func (m *StreamSeriesChunksBatch) XXX_DiscardUnknown() { - xxx_messageInfo_StreamSeriesChunksBatch.DiscardUnknown(m) +func (m *StreamingChunksBatch) XXX_DiscardUnknown() { + xxx_messageInfo_StreamingChunksBatch.DiscardUnknown(m) } -var xxx_messageInfo_StreamSeriesChunksBatch proto.InternalMessageInfo +var xxx_messageInfo_StreamingChunksBatch proto.InternalMessageInfo type AggrChunk struct { MinTime int64 `protobuf:"varint,1,opt,name=min_time,json=minTime,proto3" json:"min_time,omitempty"` @@ -382,9 +382,9 @@ func init() { proto.RegisterType((*Chunk)(nil), "thanos.Chunk") proto.RegisterType((*Series)(nil), "thanos.Series") proto.RegisterType((*StreamingSeries)(nil), "thanos.StreamingSeries") - proto.RegisterType((*StreamSeriesBatch)(nil), "thanos.StreamSeriesBatch") - proto.RegisterType((*StreamSeriesChunks)(nil), "thanos.StreamSeriesChunks") - proto.RegisterType((*StreamSeriesChunksBatch)(nil), "thanos.StreamSeriesChunksBatch") + proto.RegisterType((*StreamingSeriesBatch)(nil), "thanos.StreamingSeriesBatch") + proto.RegisterType((*StreamingChunks)(nil), "thanos.StreamingChunks") + proto.RegisterType((*StreamingChunksBatch)(nil), "thanos.StreamingChunksBatch") proto.RegisterType((*AggrChunk)(nil), "thanos.AggrChunk") proto.RegisterType((*LabelMatcher)(nil), "thanos.LabelMatcher") } @@ -392,50 +392,50 @@ func init() { func init() { proto.RegisterFile("types.proto", fileDescriptor_d938547f84707355) } var fileDescriptor_d938547f84707355 = []byte{ - // 684 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x4f, 0x6f, 0x12, 0x41, - 0x14, 0xdf, 0x81, 0x05, 0x96, 0xa1, 0xb5, 0xdb, 0x69, 0x63, 0x69, 0x0f, 0x53, 0xdc, 0x13, 0x31, - 0xe9, 0xa2, 0xd8, 0x8b, 0x89, 0x97, 0x62, 0x30, 0x95, 0x58, 0xdb, 0x4e, 0x6b, 0x62, 0x8c, 0x09, - 0x19, 0x60, 0x58, 0x26, 0x65, 0xff, 0x64, 0x77, 0x50, 0xf0, 0xd4, 0x8f, 0xe0, 0x57, 0xf0, 0xe6, - 0x17, 0x31, 0xe9, 0xcd, 0x1e, 0x1b, 0x0f, 0x8d, 0xd0, 0x8b, 0xc7, 0x7e, 0x04, 0xb3, 0x33, 0x8b, - 0x42, 0x7b, 0xa9, 0x17, 0x4f, 0xcc, 0x7b, 0xbf, 0xdf, 0x7b, 0xbf, 0xdf, 0x1b, 0xe6, 0x2d, 0x2c, - 0x88, 0x51, 0xc0, 0x22, 0x3b, 0x08, 0x7d, 0xe1, 0xa3, 0xac, 0xe8, 0x51, 0xcf, 0x8f, 0x36, 0xb6, - 0x1c, 0x2e, 0x7a, 0x83, 0x96, 0xdd, 0xf6, 0xdd, 0x8a, 0xe3, 0x3b, 0x7e, 0x45, 0xc2, 0xad, 0x41, - 0x57, 0x46, 0x32, 0x90, 0x27, 0x55, 0xb6, 0xf1, 0x68, 0x96, 0x1e, 0xd2, 0x2e, 0xf5, 0x68, 0xc5, - 0xe5, 0x2e, 0x0f, 0x2b, 0xc1, 0x89, 0xa3, 0x4e, 0x41, 0x4b, 0xfd, 0xaa, 0x0a, 0xeb, 0x3b, 0x80, - 0x99, 0xe7, 0xbd, 0x81, 0x77, 0x82, 0x1e, 0x42, 0x3d, 0x76, 0x50, 0x04, 0x25, 0x50, 0xbe, 0x57, - 0xbd, 0x6f, 0x2b, 0x07, 0xb6, 0x04, 0xed, 0xba, 0xd7, 0xf6, 0x3b, 0xdc, 0x73, 0x88, 0xe4, 0xa0, - 0x03, 0xa8, 0x77, 0xa8, 0xa0, 0xc5, 0x54, 0x09, 0x94, 0x17, 0x6a, 0xcf, 0xce, 0x2e, 0x37, 0xb5, - 0x1f, 0x97, 0x9b, 0xdb, 0x77, 0x51, 0xb7, 0xdf, 0x78, 0x11, 0xed, 0xb2, 0xda, 0x48, 0xb0, 0xa3, - 0x3e, 0x6f, 0x33, 0x22, 0x3b, 0x59, 0xbb, 0xd0, 0x98, 0x6a, 0xa0, 0x45, 0x98, 0x97, 0xaa, 0xcd, - 0xb7, 0xfb, 0xc4, 0xd4, 0xd0, 0x0a, 0x5c, 0x52, 0xe1, 0x2e, 0x8f, 0x84, 0xef, 0x84, 0xd4, 0x35, - 0x01, 0x2a, 0xc2, 0x55, 0x95, 0x7c, 0xd1, 0xf7, 0xa9, 0xf8, 0x8b, 0xa4, 0xac, 0x2f, 0x00, 0x66, - 0x8f, 0x58, 0xc8, 0x59, 0x84, 0xba, 0x30, 0xdb, 0xa7, 0x2d, 0xd6, 0x8f, 0x8a, 0xa0, 0x94, 0x2e, - 0x17, 0xaa, 0x2b, 0x76, 0xdb, 0x0f, 0x05, 0x1b, 0x06, 0x2d, 0xfb, 0x55, 0x9c, 0x3f, 0xa0, 0x3c, - 0xac, 0x3d, 0x4d, 0xdc, 0x3f, 0xbe, 0x93, 0x7b, 0x59, 0xb7, 0xd3, 0xa1, 0x81, 0x60, 0x21, 0x49, - 0xba, 0xa3, 0x0a, 0xcc, 0xb6, 0x63, 0x33, 0x51, 0x31, 0x25, 0x75, 0x96, 0xa7, 0x97, 0xb7, 0xe3, - 0x38, 0xa1, 0xb4, 0x59, 0xd3, 0x63, 0x15, 0x92, 0xd0, 0xac, 0x11, 0x5c, 0x3a, 0x12, 0x21, 0xa3, - 0x2e, 0xf7, 0x9c, 0xff, 0xeb, 0xd5, 0xfa, 0x04, 0x97, 0x95, 0xb4, 0xd2, 0xad, 0x51, 0xd1, 0xee, - 0xc5, 0x03, 0x44, 0x32, 0x4c, 0xc4, 0xd7, 0xa6, 0x03, 0xdc, 0x70, 0x49, 0x12, 0x1a, 0xda, 0x86, - 0x6b, 0x3c, 0x6a, 0x32, 0xaf, 0xd3, 0xf4, 0xbb, 0x4d, 0x95, 0x6b, 0x46, 0x92, 0x2b, 0xdf, 0x84, - 0x41, 0x56, 0x78, 0x54, 0xf7, 0x3a, 0xfb, 0x5d, 0x55, 0xa7, 0xda, 0x58, 0x3d, 0x88, 0x66, 0xb5, - 0xe5, 0xcd, 0x44, 0xe8, 0x01, 0x5c, 0x48, 0x3a, 0x70, 0xaf, 0xc3, 0x86, 0xf2, 0x01, 0xea, 0xa4, - 0xa0, 0x72, 0x2f, 0xe3, 0xd4, 0xbf, 0x5f, 0xf0, 0x1e, 0x5c, 0xbb, 0xad, 0xa4, 0x66, 0xad, 0xde, - 0x98, 0x75, 0x63, 0x7e, 0xd6, 0xd9, 0x82, 0xe9, 0xb8, 0xd6, 0x29, 0x80, 0xf9, 0x3f, 0x52, 0x68, - 0x1d, 0x1a, 0x2e, 0xf7, 0x9a, 0x82, 0xbb, 0x6a, 0x5b, 0xd2, 0x24, 0xe7, 0x72, 0xef, 0x98, 0xbb, - 0x4c, 0x42, 0x74, 0xa8, 0xa0, 0x54, 0x02, 0xd1, 0xa1, 0x84, 0x36, 0x61, 0x3a, 0xa4, 0x1f, 0x8b, - 0xe9, 0x12, 0x28, 0x17, 0xaa, 0x8b, 0x73, 0xeb, 0x45, 0x62, 0xa4, 0xa1, 0x1b, 0xba, 0x99, 0x69, - 0xe8, 0x46, 0xc6, 0xcc, 0x36, 0x74, 0x23, 0x6b, 0xe6, 0x1a, 0xba, 0x91, 0x33, 0x8d, 0x86, 0x6e, - 0x18, 0x66, 0xde, 0xfa, 0x06, 0xe0, 0x82, 0xfc, 0x43, 0xf7, 0xe2, 0x29, 0x58, 0x88, 0xb6, 0xe6, - 0xf6, 0x75, 0x7d, 0xda, 0x70, 0x96, 0x63, 0x1f, 0x8f, 0x02, 0x96, 0xac, 0x2c, 0x82, 0xba, 0x47, - 0x13, 0x57, 0x79, 0x22, 0xcf, 0x68, 0x15, 0x66, 0x3e, 0xd0, 0xfe, 0x80, 0x49, 0x53, 0x79, 0xa2, - 0x02, 0xeb, 0x3d, 0xd4, 0xe3, 0xba, 0x78, 0xef, 0x66, 0x9b, 0x35, 0xeb, 0x87, 0xa6, 0x86, 0x56, - 0xa1, 0x39, 0x97, 0x7c, 0x5d, 0x3f, 0x34, 0xc1, 0x2d, 0x2a, 0xa9, 0x9b, 0xa9, 0xdb, 0x54, 0x52, - 0x37, 0xd3, 0xb5, 0x9d, 0xb3, 0x31, 0xd6, 0xce, 0xc7, 0x58, 0xbb, 0x18, 0x63, 0xed, 0x7a, 0x8c, - 0xc1, 0xe9, 0x04, 0x83, 0xaf, 0x13, 0x0c, 0xce, 0x26, 0x18, 0x9c, 0x4f, 0x30, 0xf8, 0x39, 0xc1, - 0xe0, 0xd7, 0x04, 0x6b, 0xd7, 0x13, 0x0c, 0x3e, 0x5f, 0x61, 0xed, 0xfc, 0x0a, 0x6b, 0x17, 0x57, - 0x58, 0x7b, 0x97, 0x8b, 0x84, 0x1f, 0xb2, 0xa0, 0xd5, 0xca, 0xca, 0x4f, 0xd7, 0x93, 0xdf, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xf2, 0x10, 0x58, 0x47, 0x32, 0x05, 0x00, 0x00, + // 677 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0xbd, 0x6e, 0x13, 0x4d, + 0x14, 0xdd, 0xb1, 0xd7, 0xf6, 0x7a, 0x9c, 0x7c, 0xd9, 0x6f, 0x62, 0x11, 0x27, 0xc5, 0xc4, 0x6c, + 0x65, 0x21, 0x65, 0x0d, 0x21, 0x0d, 0x12, 0x4d, 0x8c, 0x0c, 0xc1, 0x02, 0x92, 0x4c, 0x82, 0x84, + 0x10, 0x92, 0x35, 0xb6, 0xc7, 0xeb, 0x51, 0xbc, 0x3f, 0xda, 0x1d, 0x83, 0x5d, 0x20, 0xe5, 0x11, + 0x78, 0x05, 0x3a, 0x5e, 0x04, 0x29, 0x1d, 0x29, 0x23, 0x8a, 0x08, 0x6f, 0x1a, 0xca, 0x3c, 0x02, + 0xda, 0x99, 0x35, 0x38, 0x49, 0x13, 0x1a, 0x2a, 0xcf, 0xbd, 0xe7, 0xdc, 0x7b, 0xce, 0x1d, 0xcf, + 0x5d, 0x58, 0x12, 0x93, 0x80, 0x45, 0x76, 0x10, 0xfa, 0xc2, 0x47, 0x79, 0x31, 0xa0, 0x9e, 0x1f, + 0xad, 0x6d, 0x38, 0x5c, 0x0c, 0x46, 0x1d, 0xbb, 0xeb, 0xbb, 0x75, 0xc7, 0x77, 0xfc, 0xba, 0x84, + 0x3b, 0xa3, 0xbe, 0x8c, 0x64, 0x20, 0x4f, 0xaa, 0x6c, 0xed, 0xfe, 0x3c, 0x3d, 0xa4, 0x7d, 0xea, + 0xd1, 0xba, 0xcb, 0x5d, 0x1e, 0xd6, 0x83, 0x23, 0x47, 0x9d, 0x82, 0x8e, 0xfa, 0x55, 0x15, 0xd6, + 0x37, 0x00, 0x73, 0x4f, 0x06, 0x23, 0xef, 0x08, 0xdd, 0x83, 0x7a, 0xe2, 0xa0, 0x02, 0xaa, 0xa0, + 0xf6, 0xdf, 0xe6, 0x1d, 0x5b, 0x39, 0xb0, 0x25, 0x68, 0x37, 0xbd, 0xae, 0xdf, 0xe3, 0x9e, 0x43, + 0x24, 0x07, 0xed, 0x41, 0xbd, 0x47, 0x05, 0xad, 0x64, 0xaa, 0xa0, 0xb6, 0xd0, 0x78, 0x7c, 0x72, + 0xbe, 0xae, 0x7d, 0x3f, 0x5f, 0xdf, 0xba, 0x8d, 0xba, 0xfd, 0xda, 0x8b, 0x68, 0x9f, 0x35, 0x26, + 0x82, 0x1d, 0x0c, 0x79, 0x97, 0x11, 0xd9, 0xc9, 0xda, 0x81, 0xc6, 0x4c, 0x03, 0x2d, 0xc2, 0xa2, + 0x54, 0x6d, 0xbf, 0xd9, 0x25, 0xa6, 0x86, 0x96, 0xe1, 0x92, 0x0a, 0x77, 0x78, 0x24, 0x7c, 0x27, + 0xa4, 0xae, 0x09, 0x50, 0x05, 0x96, 0x55, 0xf2, 0xe9, 0xd0, 0xa7, 0xe2, 0x0f, 0x92, 0xb1, 0x3e, + 0x03, 0x98, 0x3f, 0x60, 0x21, 0x67, 0x11, 0xea, 0xc3, 0xfc, 0x90, 0x76, 0xd8, 0x30, 0xaa, 0x80, + 0x6a, 0xb6, 0x56, 0xda, 0x5c, 0xb6, 0xbb, 0x7e, 0x28, 0xd8, 0x38, 0xe8, 0xd8, 0x2f, 0x92, 0xfc, + 0x1e, 0xe5, 0x61, 0xe3, 0x51, 0xea, 0xfe, 0xc1, 0xad, 0xdc, 0xcb, 0xba, 0xed, 0x1e, 0x0d, 0x04, + 0x0b, 0x49, 0xda, 0x1d, 0xd5, 0x61, 0xbe, 0x9b, 0x98, 0x89, 0x2a, 0x19, 0xa9, 0xf3, 0xff, 0xec, + 0xf2, 0xb6, 0x1d, 0x27, 0x94, 0x36, 0x1b, 0x7a, 0xa2, 0x42, 0x52, 0x9a, 0x35, 0x81, 0x4b, 0x07, + 0x22, 0x64, 0xd4, 0xe5, 0x9e, 0xf3, 0x6f, 0xbd, 0x5a, 0x1f, 0x61, 0xf9, 0x9a, 0x74, 0x83, 0x8a, + 0xee, 0x20, 0x99, 0x21, 0x92, 0x61, 0xaa, 0xbf, 0x32, 0x9b, 0xe1, 0x1a, 0x9b, 0xa4, 0x34, 0xb4, + 0x05, 0x57, 0x78, 0xd4, 0x66, 0x5e, 0xaf, 0xed, 0xf7, 0xdb, 0x2a, 0xd7, 0x8e, 0x24, 0x57, 0x3e, + 0x0b, 0x83, 0x2c, 0xf3, 0xa8, 0xe9, 0xf5, 0x76, 0xfb, 0xaa, 0x4e, 0xb5, 0xb1, 0xd8, 0xdc, 0xe4, + 0xf2, 0x66, 0x22, 0x74, 0x17, 0x2e, 0xa4, 0xe5, 0xdc, 0xeb, 0xb1, 0xb1, 0x7c, 0x80, 0x3a, 0x29, + 0xa9, 0xdc, 0xf3, 0x24, 0xf5, 0xf7, 0x17, 0xfc, 0x6c, 0x6e, 0x4a, 0x25, 0x73, 0xdb, 0x29, 0x15, + 0x7b, 0x36, 0xa5, 0x75, 0x0c, 0x60, 0xf1, 0xb7, 0x08, 0x5a, 0x85, 0x86, 0xcb, 0xbd, 0xb6, 0xe0, + 0xae, 0xda, 0x93, 0x2c, 0x29, 0xb8, 0xdc, 0x3b, 0xe4, 0x2e, 0x93, 0x10, 0x1d, 0x2b, 0x28, 0x93, + 0x42, 0x74, 0x2c, 0xa1, 0x75, 0x98, 0x0d, 0xe9, 0x87, 0x4a, 0xb6, 0x0a, 0x6a, 0xa5, 0xcd, 0xc5, + 0x2b, 0x8b, 0x45, 0x12, 0xa4, 0xa5, 0x1b, 0xba, 0x99, 0x6b, 0xe9, 0x46, 0xce, 0xcc, 0xb7, 0x74, + 0x23, 0x6f, 0x16, 0x5a, 0xba, 0x51, 0x30, 0x8d, 0x96, 0x6e, 0x18, 0x66, 0xd1, 0xfa, 0x0a, 0xe0, + 0x82, 0xfc, 0x2b, 0x5f, 0x26, 0x23, 0xb0, 0x10, 0x6d, 0x5c, 0xd9, 0xd4, 0xd5, 0x59, 0xc3, 0x79, + 0x8e, 0x7d, 0x38, 0x09, 0x58, 0xba, 0xac, 0x08, 0xea, 0x1e, 0x4d, 0x5d, 0x15, 0x89, 0x3c, 0xa3, + 0x32, 0xcc, 0xbd, 0xa7, 0xc3, 0x11, 0x93, 0xa6, 0x8a, 0x44, 0x05, 0xd6, 0x3b, 0xa8, 0x27, 0x75, + 0xc9, 0xc6, 0xcd, 0x37, 0x6b, 0x37, 0xf7, 0x4d, 0x0d, 0x95, 0xa1, 0x79, 0x25, 0xf9, 0xaa, 0xb9, + 0x6f, 0x82, 0x1b, 0x54, 0xd2, 0x34, 0x33, 0x37, 0xa9, 0xa4, 0x69, 0x66, 0x1b, 0xdb, 0x27, 0x53, + 0xac, 0x9d, 0x4e, 0xb1, 0x76, 0x36, 0xc5, 0xda, 0xe5, 0x14, 0x83, 0xe3, 0x18, 0x83, 0x2f, 0x31, + 0x06, 0x27, 0x31, 0x06, 0xa7, 0x31, 0x06, 0x3f, 0x62, 0x0c, 0x7e, 0xc6, 0x58, 0xbb, 0x8c, 0x31, + 0xf8, 0x74, 0x81, 0xb5, 0xd3, 0x0b, 0xac, 0x9d, 0x5d, 0x60, 0xed, 0x6d, 0x21, 0x12, 0x7e, 0xc8, + 0x82, 0x4e, 0x27, 0x2f, 0x3f, 0x5a, 0x0f, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0x81, 0xc9, 0x56, + 0x35, 0x2c, 0x05, 0x00, 0x00, } func (x Chunk_Encoding) String() string { @@ -545,14 +545,14 @@ func (this *StreamingSeries) Equal(that interface{}) bool { } return true } -func (this *StreamSeriesBatch) Equal(that interface{}) bool { +func (this *StreamingSeriesBatch) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*StreamSeriesBatch) + that1, ok := that.(*StreamingSeriesBatch) if !ok { - that2, ok := that.(StreamSeriesBatch) + that2, ok := that.(StreamingSeriesBatch) if ok { that1 = &that2 } else { @@ -577,14 +577,14 @@ func (this *StreamSeriesBatch) Equal(that interface{}) bool { } return true } -func (this *StreamSeriesChunks) Equal(that interface{}) bool { +func (this *StreamingChunks) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*StreamSeriesChunks) + that1, ok := that.(*StreamingChunks) if !ok { - that2, ok := that.(StreamSeriesChunks) + that2, ok := that.(StreamingChunks) if ok { that1 = &that2 } else { @@ -609,14 +609,14 @@ func (this *StreamSeriesChunks) Equal(that interface{}) bool { } return true } -func (this *StreamSeriesChunksBatch) Equal(that interface{}) bool { +func (this *StreamingChunksBatch) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*StreamSeriesChunksBatch) + that1, ok := that.(*StreamingChunksBatch) if !ok { - that2, ok := that.(StreamSeriesChunksBatch) + that2, ok := that.(StreamingChunksBatch) if ok { that1 = &that2 } else { @@ -736,12 +736,12 @@ func (this *StreamingSeries) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *StreamSeriesBatch) GoString() string { +func (this *StreamingSeriesBatch) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 6) - s = append(s, "&storepb.StreamSeriesBatch{") + s = append(s, "&storepb.StreamingSeriesBatch{") if this.Series != nil { s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") } @@ -749,12 +749,12 @@ func (this *StreamSeriesBatch) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *StreamSeriesChunks) GoString() string { +func (this *StreamingChunks) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 6) - s = append(s, "&storepb.StreamSeriesChunks{") + s = append(s, "&storepb.StreamingChunks{") s = append(s, "SeriesIndex: "+fmt.Sprintf("%#v", this.SeriesIndex)+",\n") if this.Chunks != nil { vs := make([]*AggrChunk, len(this.Chunks)) @@ -766,12 +766,12 @@ func (this *StreamSeriesChunks) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *StreamSeriesChunksBatch) GoString() string { +func (this *StreamingChunksBatch) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 5) - s = append(s, "&storepb.StreamSeriesChunksBatch{") + s = append(s, "&storepb.StreamingChunksBatch{") if this.Series != nil { s = append(s, "Series: "+fmt.Sprintf("%#v", this.Series)+",\n") } @@ -938,7 +938,7 @@ func (m *StreamingSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *StreamSeriesBatch) Marshal() (dAtA []byte, err error) { +func (m *StreamingSeriesBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -948,12 +948,12 @@ func (m *StreamSeriesBatch) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StreamSeriesBatch) MarshalTo(dAtA []byte) (int, error) { +func (m *StreamingSeriesBatch) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StreamSeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StreamingSeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -985,7 +985,7 @@ func (m *StreamSeriesBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *StreamSeriesChunks) Marshal() (dAtA []byte, err error) { +func (m *StreamingChunks) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -995,12 +995,12 @@ func (m *StreamSeriesChunks) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StreamSeriesChunks) MarshalTo(dAtA []byte) (int, error) { +func (m *StreamingChunks) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StreamSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StreamingChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1027,7 +1027,7 @@ func (m *StreamSeriesChunks) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *StreamSeriesChunksBatch) Marshal() (dAtA []byte, err error) { +func (m *StreamingChunksBatch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -1037,12 +1037,12 @@ func (m *StreamSeriesChunksBatch) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *StreamSeriesChunksBatch) MarshalTo(dAtA []byte) (int, error) { +func (m *StreamingChunksBatch) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *StreamSeriesChunksBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *StreamingChunksBatch) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -1212,7 +1212,7 @@ func (m *StreamingSeries) Size() (n int) { return n } -func (m *StreamSeriesBatch) Size() (n int) { +func (m *StreamingSeriesBatch) Size() (n int) { if m == nil { return 0 } @@ -1230,7 +1230,7 @@ func (m *StreamSeriesBatch) Size() (n int) { return n } -func (m *StreamSeriesChunks) Size() (n int) { +func (m *StreamingChunks) Size() (n int) { if m == nil { return 0 } @@ -1248,7 +1248,7 @@ func (m *StreamSeriesChunks) Size() (n int) { return n } -func (m *StreamSeriesChunksBatch) Size() (n int) { +func (m *StreamingChunksBatch) Size() (n int) { if m == nil { return 0 } @@ -1345,7 +1345,7 @@ func (this *StreamingSeries) String() string { }, "") return s } -func (this *StreamSeriesBatch) String() string { +func (this *StreamingSeriesBatch) String() string { if this == nil { return "nil" } @@ -1354,14 +1354,14 @@ func (this *StreamSeriesBatch) String() string { repeatedStringForSeries += strings.Replace(f.String(), "StreamingSeries", "StreamingSeries", 1) + "," } repeatedStringForSeries += "}" - s := strings.Join([]string{`&StreamSeriesBatch{`, + s := strings.Join([]string{`&StreamingSeriesBatch{`, `Series:` + repeatedStringForSeries + `,`, `IsEndOfSeriesStream:` + fmt.Sprintf("%v", this.IsEndOfSeriesStream) + `,`, `}`, }, "") return s } -func (this *StreamSeriesChunks) String() string { +func (this *StreamingChunks) String() string { if this == nil { return "nil" } @@ -1370,23 +1370,23 @@ func (this *StreamSeriesChunks) String() string { repeatedStringForChunks += strings.Replace(strings.Replace(f.String(), "AggrChunk", "AggrChunk", 1), `&`, ``, 1) + "," } repeatedStringForChunks += "}" - s := strings.Join([]string{`&StreamSeriesChunks{`, + s := strings.Join([]string{`&StreamingChunks{`, `SeriesIndex:` + fmt.Sprintf("%v", this.SeriesIndex) + `,`, `Chunks:` + repeatedStringForChunks + `,`, `}`, }, "") return s } -func (this *StreamSeriesChunksBatch) String() string { +func (this *StreamingChunksBatch) String() string { if this == nil { return "nil" } - repeatedStringForSeries := "[]*StreamSeriesChunks{" + repeatedStringForSeries := "[]*StreamingChunks{" for _, f := range this.Series { - repeatedStringForSeries += strings.Replace(f.String(), "StreamSeriesChunks", "StreamSeriesChunks", 1) + "," + repeatedStringForSeries += strings.Replace(f.String(), "StreamingChunks", "StreamingChunks", 1) + "," } repeatedStringForSeries += "}" - s := strings.Join([]string{`&StreamSeriesChunksBatch{`, + s := strings.Join([]string{`&StreamingChunksBatch{`, `Series:` + repeatedStringForSeries + `,`, `}`, }, "") @@ -1737,7 +1737,7 @@ func (m *StreamingSeries) Unmarshal(dAtA []byte) error { } return nil } -func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { +func (m *StreamingSeriesBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1760,10 +1760,10 @@ func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StreamSeriesBatch: wiretype end group for non-group") + return fmt.Errorf("proto: StreamingSeriesBatch: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StreamSeriesBatch: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StreamingSeriesBatch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1844,7 +1844,7 @@ func (m *StreamSeriesBatch) Unmarshal(dAtA []byte) error { } return nil } -func (m *StreamSeriesChunks) Unmarshal(dAtA []byte) error { +func (m *StreamingChunks) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1867,10 +1867,10 @@ func (m *StreamSeriesChunks) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StreamSeriesChunks: wiretype end group for non-group") + return fmt.Errorf("proto: StreamingChunks: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StreamSeriesChunks: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StreamingChunks: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -1950,7 +1950,7 @@ func (m *StreamSeriesChunks) Unmarshal(dAtA []byte) error { } return nil } -func (m *StreamSeriesChunksBatch) Unmarshal(dAtA []byte) error { +func (m *StreamingChunksBatch) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1973,10 +1973,10 @@ func (m *StreamSeriesChunksBatch) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: StreamSeriesChunksBatch: wiretype end group for non-group") + return fmt.Errorf("proto: StreamingChunksBatch: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: StreamSeriesChunksBatch: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StreamingChunksBatch: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -2008,7 +2008,7 @@ func (m *StreamSeriesChunksBatch) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Series = append(m.Series, &StreamSeriesChunks{}) + m.Series = append(m.Series, &StreamingChunks{}) if err := m.Series[len(m.Series)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } diff --git a/pkg/storegateway/storepb/types.proto b/pkg/storegateway/storepb/types.proto index cd789bebbfe..8d299b675da 100644 --- a/pkg/storegateway/storepb/types.proto +++ b/pkg/storegateway/storepb/types.proto @@ -38,21 +38,21 @@ message Series { } message StreamingSeries { - repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/mimir/pkg/mimirpb.LabelAdapter"]; + repeated cortexpb.LabelPair labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/grafana/mimir/pkg/mimirpb.LabelAdapter"]; } -message StreamSeriesBatch { +message StreamingSeriesBatch { repeated StreamingSeries series = 1; bool is_end_of_series_stream = 2; } -message StreamSeriesChunks { +message StreamingChunks { uint64 series_index = 1; // Index into list of all series previously sent with SeriesResponse messages by this storegateway during this query response. repeated AggrChunk chunks = 2 [(gogoproto.nullable) = false]; } -message StreamSeriesChunksBatch { - repeated StreamSeriesChunks series = 1; +message StreamingChunksBatch { + repeated StreamingChunks series = 1; } From 0c3f75bd6be28036ce84281cd5ecee795214dcaf Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 19 Jun 2023 00:38:26 +0530 Subject: [PATCH 38/75] Add seriesIteratorStrategy Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 34 ++++---- pkg/storegateway/bucket_chunk_reader_test.go | 2 +- pkg/storegateway/bucket_index_reader.go | 4 +- pkg/storegateway/bucket_test.go | 2 +- pkg/storegateway/series_refs.go | 83 +++++++++++++------- pkg/storegateway/series_refs_test.go | 26 +++--- 6 files changed, 97 insertions(+), 54 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 85b796f3602..e13c5eeb28b 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -958,6 +958,13 @@ func (s *BucketStore) streamingSeriesSetForBlocks( g, _ = errgroup.WithContext(ctx) begin = time.Now() ) + var strategy seriesIteratorStrategy + if req.SkipChunks { + strategy |= noChunks + } + if req.StreamingChunksBatchSize > 0 { + strategy |= overlapMintMaxt + } for i, b := range blocks { b := b i := i @@ -988,8 +995,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( matchers, shardSelector, cachedSeriesHasher{blockSeriesHashCache}, - req.SkipChunks, - req.StreamingChunksBatchSize > 0, + strategy, req.MinTime, req.MaxTime, s.numChunksRangesPerSeries, stats, @@ -1291,7 +1297,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] matchers, nil, cachedSeriesHasher{nil}, - true, false, + noChunks, minTime, maxTime, 1, // we skip chunks, so this doesn't make any difference stats, @@ -1511,7 +1517,7 @@ func labelValuesFromSeries(ctx context.Context, labelName string, seriesPerBatch b.meta, nil, nil, - true, false, + noChunks, b.meta.MinTime, b.meta.MaxTime, b.userID, @@ -1868,7 +1874,7 @@ type symbolizedLabel struct { // decodeSeries decodes a series entry from the given byte slice decoding all chunk metas of the series. // If skipChunks is specified decodeSeries does not return any chunks, but only labels and only if at least single chunk is within time range. // decodeSeries returns false, when there are no series data for given time range. -func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]chunks.Meta, resMint, resMaxt int64, skipChunks, streamingSeries bool) (ok bool, lset []symbolizedLabel, err error) { +func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]chunks.Meta, resMint, resMaxt int64, strategy seriesIteratorStrategy) (ok bool, lset []symbolizedLabel, err error) { *chks = (*chks)[:0] @@ -1894,6 +1900,8 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch // Similar for first ref. ref := int64(d.Uvarint64()) + isNoChunks := strategy.isNoChunks() + isNoChunkOverlapMintMaxt := strategy.isNoChunks() && strategy.isOverlapMintMaxt() for i := 0; i < k; i++ { if i > 0 { mint += int64(d.Uvarint64()) @@ -1902,17 +1910,15 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch } // Found a chunk. - if skipChunks { - if streamingSeries { - // We are not interested in chunks, but we want the series to overlap with the query mint-maxt. - if maxt >= resMint && mint <= resMaxt { - // Chunk overlaps. - return true, lset, nil - } - } else { - // We are not interested in chunks and we know there is at least one, that's enough to return series. + if isNoChunkOverlapMintMaxt { + // We are not interested in chunks, but we want the series to overlap with the query mint-maxt. + if maxt >= resMint && mint <= resMaxt { + // Chunk overlaps. return true, lset, nil } + } else if isNoChunks { + // We are not interested in chunks and we know there is at least one, that's enough to return series. + return true, lset, nil } else { *chks = append(*chks, chunks.Meta{ Ref: chunks.ChunkRef(ref), diff --git a/pkg/storegateway/bucket_chunk_reader_test.go b/pkg/storegateway/bucket_chunk_reader_test.go index 023d1f8a6d0..7b04b7bb339 100644 --- a/pkg/storegateway/bucket_chunk_reader_test.go +++ b/pkg/storegateway/bucket_chunk_reader_test.go @@ -38,7 +38,7 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { []*labels.Matcher{labels.MustNewMatcher(labels.MatchEqual, "j", "foo")}, nil, nil, - false, false, + defaultStrategy, block.meta.MinTime, block.meta.MaxTime, 2, diff --git a/pkg/storegateway/bucket_index_reader.go b/pkg/storegateway/bucket_index_reader.go index 970f6c463d4..b40657e4131 100644 --- a/pkg/storegateway/bucket_index_reader.go +++ b/pkg/storegateway/bucket_index_reader.go @@ -755,12 +755,12 @@ func (l *bucketIndexLoadedSeries) addSeries(ref storage.SeriesRef, data []byte) // Error is returned on decoding error or if the reference does not resolve to a known series. // // It's NOT safe to call this function concurrently with addSeries(). -func (l *bucketIndexLoadedSeries) unsafeLoadSeries(ref storage.SeriesRef, chks *[]chunks.Meta, mint, maxt int64, skipChunks, streamingSeries bool, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) (ok bool, _ []symbolizedLabel, err error) { +func (l *bucketIndexLoadedSeries) unsafeLoadSeries(ref storage.SeriesRef, chks *[]chunks.Meta, mint, maxt int64, strategy seriesIteratorStrategy, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) (ok bool, _ []symbolizedLabel, err error) { b, ok := l.series[ref] if !ok { return false, nil, errors.Errorf("series %d not found", ref) } stats.seriesProcessed++ stats.seriesProcessedSizeSum += len(b) - return decodeSeries(b, lsetPool, chks, mint, maxt, skipChunks, streamingSeries) + return decodeSeries(b, lsetPool, chks, mint, maxt, strategy) } diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index d20100af3ef..1acc2f694c7 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1164,7 +1164,7 @@ func loadSeries(ctx context.Context, tb test.TB, postings []storage.SeriesRef, i indexr.block.meta, nil, nil, - true, false, + noChunks, 0, 0, "", diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index d3cf44cb098..8f8dfe8e9cd 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -712,19 +712,19 @@ func (l *limitingSeriesChunkRefsSetIterator) Err() error { } type loadingSeriesChunkRefsSetIterator struct { - ctx context.Context - postingsSetIterator *postingsSetsIterator - indexr *bucketIndexReader - indexCache indexcache.IndexCache - stats *safeQueryStats - blockID ulid.ULID - shard *sharding.ShardSelector - seriesHasher seriesHasher - skipChunks, streamingSeries bool - minTime, maxTime int64 - tenantID string - chunkRangesPerSeries int - logger log.Logger + ctx context.Context + postingsSetIterator *postingsSetsIterator + indexr *bucketIndexReader + indexCache indexcache.IndexCache + stats *safeQueryStats + blockID ulid.ULID + shard *sharding.ShardSelector + seriesHasher seriesHasher + strategy seriesIteratorStrategy + minTime, maxTime int64 + tenantID string + chunkRangesPerSeries int + logger log.Logger chunkMetasBuffer []chunks.Meta @@ -742,8 +742,7 @@ func openBlockSeriesChunkRefsSetsIterator( matchers []*labels.Matcher, // Series matchers. shard *sharding.ShardSelector, // Shard selector. seriesHasher seriesHasher, - skipChunks bool, // If true chunks are not loaded and minTime/maxTime are ignored. - streamingSeries bool, // If true, along with skipChunks=true, the series returned overlap with query mint and maxt. + strategy seriesIteratorStrategy, minTime, maxTime int64, // Series must have data in this time range to be returned (ignored if skipChunks=true). chunkRangesPerSeries int, stats *safeQueryStats, @@ -774,8 +773,7 @@ func openBlockSeriesChunkRefsSetsIterator( blockMeta, shard, seriesHasher, - skipChunks, - streamingSeries, + strategy, minTime, maxTime, tenantID, @@ -798,6 +796,39 @@ func seriesStreamingFetchRefsDurationIterator(iterator seriesChunkRefsSetIterato }) } +// seriesIteratorStrategy defines the strategy to use when loading the series and their chunk refs. +// See below for available options. +type seriesIteratorStrategy byte + +const ( + // By default, the strategy is to fetch series labels AND chunk refs + // for time ranges overlapping mint and maxt provided. + // To change the default behavior, use the flags below this. + defaultStrategy seriesIteratorStrategy = 0 + + // noChunks when used by itself fetches only series labels for series in the entire block. + noChunks seriesIteratorStrategy = 0b00000001 + // overlapMintMaxt is used to be used together with noChunks. With this, only the series whose + // chunks overlap with mint->maxt are selected. + overlapMintMaxt seriesIteratorStrategy = 0b00000010 +) + +func (s seriesIteratorStrategy) isNoChunks() bool { + return s&noChunks != 0 +} + +func (s seriesIteratorStrategy) isOverlapMintMaxt() bool { + return s&overlapMintMaxt != 0 +} + +func (s seriesIteratorStrategy) isNoChunksEntireBlock() bool { + return s.isNoChunks() && !s.isOverlapMintMaxt() +} + +func (s seriesIteratorStrategy) isNoChunksOverlapMintMaxt() bool { + return s.isNoChunks() && s.isOverlapMintMaxt() +} + func newLoadingSeriesChunkRefsSetIterator( ctx context.Context, postingsSetIterator *postingsSetsIterator, @@ -807,15 +838,14 @@ func newLoadingSeriesChunkRefsSetIterator( blockMeta *block.Meta, shard *sharding.ShardSelector, seriesHasher seriesHasher, - skipChunks bool, - streamingSeries bool, + strategy seriesIteratorStrategy, minTime int64, maxTime int64, tenantID string, chunkRangesPerSeries int, logger log.Logger, ) *loadingSeriesChunkRefsSetIterator { - if skipChunks && !streamingSeries { + if strategy.isNoChunksEntireBlock() { minTime, maxTime = blockMeta.MinTime, blockMeta.MaxTime } @@ -828,8 +858,7 @@ func newLoadingSeriesChunkRefsSetIterator( blockID: blockMeta.ULID, shard: shard, seriesHasher: seriesHasher, - skipChunks: skipChunks, - streamingSeries: streamingSeries, + strategy: strategy, minTime: minTime, maxTime: maxTime, tenantID: tenantID, @@ -851,7 +880,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Next() bool { nextPostings := s.postingsSetIterator.At() var cachedSeriesID cachedSeriesForPostingsID - if s.skipChunks && !s.streamingSeries { + if s.strategy.isNoChunks() { var err error // Calculate the cache ID before we filter out anything from the postings, // so that the key doesn't depend on the series hash cache or any other filtering we do on the postings list. @@ -901,7 +930,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Next() bool { } s.currentSet = nextSet - if s.skipChunks && cachedSeriesID.isSet() { + if s.strategy.isNoChunks() && cachedSeriesID.isSet() { storeCachedSeriesForPostings(ctx, s.indexCache, s.tenantID, s.blockID, s.shard, cachedSeriesID, nextSet, s.logger) } return true @@ -929,7 +958,7 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p if err != nil { return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "read series") } - if !s.skipChunks { + if !s.strategy.isNoChunks() { clampLastChunkLength(symbolizedSet.series, metas) series.chunksRanges = metasToRanges(partitionChunks(metas, s.chunkRangesPerSeries, minChunksPerRange), s.blockID, s.minTime, s.maxTime) } @@ -987,7 +1016,7 @@ func (s *loadingSeriesChunkRefsSetIterator) filterSeries(set seriesChunkRefsSet, for sIdx, series := range set.series { // An empty label set means the series had no chunks in this block, so we skip it. // No chunk ranges means the series doesn't have a single chunk range in the requested range. - if len(series.lset) == 0 || (!s.skipChunks && len(series.chunksRanges) == 0) { + if len(series.lset) == 0 || (!s.strategy.isNoChunks() && len(series.chunksRanges) == 0) { continue } if !shardOwned(s.shard, s.seriesHasher, postings[sIdx], series.lset, stats) { @@ -1122,7 +1151,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Err() error { // loadSeries returns a for chunks. It is not safe to use the returned []chunks.Meta after calling loadSeries again func (s *loadingSeriesChunkRefsSetIterator) loadSeries(ref storage.SeriesRef, loadedSeries *bucketIndexLoadedSeries, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) ([]symbolizedLabel, []chunks.Meta, error) { - ok, lbls, err := loadedSeries.unsafeLoadSeries(ref, &s.chunkMetasBuffer, s.minTime, s.maxTime, s.skipChunks, s.streamingSeries, stats, lsetPool) + ok, lbls, err := loadedSeries.unsafeLoadSeries(ref, &s.chunkMetasBuffer, s.minTime, s.maxTime, s.strategy, stats, lsetPool) if !ok || err != nil { return nil, nil, errors.Wrap(err, "loadSeries") } diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index d77047b224d..3cded6ef2ca 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1357,6 +1357,13 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { if hasher == nil { hasher = cachedSeriesHasher{hashcache.NewSeriesHashCache(100).GetBlockCache("")} } + var strategy seriesIteratorStrategy + if tc.skipChunks { + strategy |= noChunks + } + if tc.streamingSeries { + strategy |= overlapMintMaxt + } loadingIterator := newLoadingSeriesChunkRefsSetIterator( context.Background(), postingsIterator, @@ -1366,8 +1373,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { block.meta, tc.shard, hasher, - tc.skipChunks, - tc.streamingSeries, + strategy, tc.minT, tc.maxT, "t1", @@ -1709,6 +1715,10 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { maxT = testCase.maxT } + var strategy seriesIteratorStrategy + if testCase.skipChunks { + strategy |= noChunks + } iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, testCase.batchSize, @@ -1719,7 +1729,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { []*labels.Matcher{testCase.matcher}, nil, cachedSeriesHasher{hashCache}, - testCase.skipChunks, false, + strategy, minT, maxT, 2, @@ -1821,8 +1831,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { testCase.matchers, nil, cachedSeriesHasher{hashCache}, - true, // skip chunks since we are testing labels filtering - false, + noChunks, // skip chunks since we are testing labels filtering block.meta.MinTime, block.meta.MaxTime, 2, @@ -1887,8 +1896,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { testCase.matchers, nil, cachedSeriesHasher{hashCache}, - false, // we don't skip chunks, so we can measure impact in loading chunk refs too - false, + defaultStrategy, // we don't skip chunks, so we can measure impact in loading chunk refs too block.meta.MinTime, block.meta.MaxTime, 2, @@ -2437,7 +2445,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { testCase.matchers, testCase.shard, seriesHasher, - true, false, + noChunks, b.meta.MinTime, b.meta.MaxTime, 1, @@ -2469,7 +2477,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { testCase.matchers, testCase.shard, seriesHasher, - true, false, + noChunks, b.meta.MinTime, b.meta.MaxTime, 1, From 9f7945b93126e03ab187b192c0aa342efd11a957 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 19 Jun 2023 15:38:22 +0530 Subject: [PATCH 39/75] Lint and stuff Signed-off-by: Ganesh Vernekar --- cmd/mimir/config-descriptor.json | 22 ++++++++++++++++++++++ cmd/mimir/help-all.txt.tmpl | 4 ++++ integration/querier_test.go | 14 ++++++++------ pkg/storegateway/bucket.go | 7 +++---- pkg/storegateway/series_refs.go | 10 +++++----- 5 files changed, 42 insertions(+), 15 deletions(-) diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 9781c5cb67f..1b1819e66de 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1662,6 +1662,17 @@ "fieldType": "boolean", "fieldCategory": "experimental" }, + { + "kind": "field", + "name": "prefer_streaming_chunks_storegateway", + "required": false, + "desc": "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.", + "fieldValue": null, + "fieldDefaultValue": false, + "fieldFlag": "querier.prefer-streaming-chunks-storegateway", + "fieldType": "boolean", + "fieldCategory": "experimental" + }, { "kind": "field", "name": "streaming_chunks_per_ingester_series_buffer_size", @@ -1673,6 +1684,17 @@ "fieldType": "int", "fieldCategory": "experimental" }, + { + "kind": "field", + "name": "streaming_chunks_per_ingester_series_buffer_size_storegateway", + "required": false, + "desc": "Number of series to buffer per storegateway when streaming chunks from storegateway.", + "fieldValue": null, + "fieldDefaultValue": 256, + "fieldFlag": "querier.streaming-chunks-per-ingester-buffer-size-storegateway", + "fieldType": "int", + "fieldCategory": "experimental" + }, { "kind": "field", "name": "minimize_ingester_requests", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index a66aa354a2e..90dcbf7fc9f 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1595,6 +1595,8 @@ Usage of ./cmd/mimir/mimir: [experimental] If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path. -querier.prefer-streaming-chunks [experimental] Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this. + -querier.prefer-streaming-chunks-storegateway + [experimental] Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this. -querier.query-ingesters-within duration Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. (default 13h) -querier.query-store-after duration @@ -1621,6 +1623,8 @@ Usage of ./cmd/mimir/mimir: Override the expected name on the server certificate. -querier.streaming-chunks-per-ingester-buffer-size uint [experimental] Number of series to buffer per ingester when streaming chunks from ingesters. (default 256) + -querier.streaming-chunks-per-ingester-buffer-size-storegateway uint + [experimental] Number of series to buffer per storegateway when streaming chunks from storegateway. (default 256) -querier.timeout duration The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. This also applies to queries evaluated by the ruler (internally or remotely). (default 2m0s) -query-frontend.align-queries-with-step diff --git a/integration/querier_test.go b/integration/querier_test.go index de8980996df..d69736767c3 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -171,6 +171,7 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream "-query-frontend.query-stats-enabled": "true", "-query-frontend.parallelize-shardable-queries": strconv.FormatBool(testCfg.queryShardingEnabled), "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-storegateway": strconv.FormatBool(streamingEnabled), }) // Start store-gateways. @@ -894,12 +895,13 @@ func TestQueryLimitsWithBlocksStorageRunningInMicroServices(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. flags := mergeFlags(BlocksStorageFlags(), BlocksStorageS3Flags(), map[string]string{ - "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), - "-blocks-storage.tsdb.ship-interval": "1s", - "-blocks-storage.bucket-store.sync-interval": "1s", - "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), - "-querier.max-fetched-series-per-query": "3", - "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-querier.max-fetched-series-per-query": "3", + "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-storegateway": strconv.FormatBool(streamingEnabled), }) // Start dependencies. diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index e13c5eeb28b..25f94567e3c 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -713,9 +713,8 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( // TODO: can we send this in parallel while we start fetching the chunks below? for seriesSet.Next() { var lset labels.Labels - // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle - // because the subsequent call to seriesSet.Next() may release it. - // TODO: check if it is safe to hold the lset. + // Although subsequent call to seriesSet.Next() may release the memory of this series object, + // it is safe to hold onto the labels because it is not released. lset, _ = seriesSet.At() // We are re-using the slice for every batch this way. @@ -1901,7 +1900,7 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch ref := int64(d.Uvarint64()) isNoChunks := strategy.isNoChunks() - isNoChunkOverlapMintMaxt := strategy.isNoChunks() && strategy.isOverlapMintMaxt() + isNoChunkOverlapMintMaxt := strategy.isNoChunksAndOverlapMintMaxt() for i := 0; i < k; i++ { if i > 0 { mint += int64(d.Uvarint64()) diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 7598bee3477..89a9081c3b4 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -806,9 +806,9 @@ const ( // To change the default behavior, use the flags below this. defaultStrategy seriesIteratorStrategy = 0 - // noChunks when used by itself fetches only series labels for series in the entire block. + // noChunks flag when used by itself fetches only series labels for series in the entire block. noChunks seriesIteratorStrategy = 0b00000001 - // overlapMintMaxt is used to be used together with noChunks. With this, only the series whose + // overlapMintMaxt flag is used to be used together with noChunks. With this, only the series whose // chunks overlap with mint->maxt are selected. overlapMintMaxt seriesIteratorStrategy = 0b00000010 ) @@ -821,11 +821,11 @@ func (s seriesIteratorStrategy) isOverlapMintMaxt() bool { return s&overlapMintMaxt != 0 } -func (s seriesIteratorStrategy) isNoChunksEntireBlock() bool { +func (s seriesIteratorStrategy) isNoChunksOnEntireBlock() bool { return s.isNoChunks() && !s.isOverlapMintMaxt() } -func (s seriesIteratorStrategy) isNoChunksOverlapMintMaxt() bool { +func (s seriesIteratorStrategy) isNoChunksAndOverlapMintMaxt() bool { return s.isNoChunks() && s.isOverlapMintMaxt() } @@ -845,7 +845,7 @@ func newLoadingSeriesChunkRefsSetIterator( chunkRangesPerSeries int, logger log.Logger, ) *loadingSeriesChunkRefsSetIterator { - if strategy.isNoChunksEntireBlock() { + if strategy.isNoChunksOnEntireBlock() { minTime, maxTime = blockMeta.MinTime, blockMeta.MaxTime } From 9f5188ec98354a21247b0e58077ce88062e9abb5 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 19 Jun 2023 16:26:28 +0530 Subject: [PATCH 40/75] Fix goroutine leak Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 29 +++++++++++++++++--- pkg/storegateway/bucket_streaming_readers.go | 15 +++++++--- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index cfdc219c18b..16f6c7d2b6d 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -444,6 +444,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* convertedMatchers = convertMatchersToLabelMatcher(matchers) resSeriesSets = []storage.SeriesSet(nil) resWarnings = storage.Warnings(nil) + streamClosers []func() ) shard, _, err := sharding.ShardFromMatchers(matchers) @@ -452,19 +453,27 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, convertedMatchers) + seriesSets, queriedBlocks, warnings, streamCloser, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, convertedMatchers) if err != nil { return nil, err } resSeriesSets = append(resSeriesSets, seriesSets...) resWarnings = append(resWarnings, warnings...) + if streamCloser != nil { + streamClosers = append(streamClosers, streamCloser) + } return queriedBlocks, nil } err = q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, shard, queryFunc) if err != nil { + // If this was a streaming call, we should close the stream readers so that goroutines are not + // stuck waiting for chunks. + for _, sc := range streamClosers { + sc() + } return storage.ErrSeriesSet(err) } @@ -686,7 +695,11 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // Errors while creating storepb.SeriesRequest, context cancellation, and unprocessable // requests to the store-gateways (e.g., if a chunk or series limit is hit) are // considered serious errors. All other errors are not returned, but they give rise to fetch retrials. -func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) ([]storage.SeriesSet, []ulid.ULID, storage.Warnings, error) { +// +// In case of a successful run, fetchSeriesFromStores returns a streamCloser function if it was a streaming +// call for series+chunks. If you are ending the execution of the query later without iterating through all the series +// and consuming the chunks, the streamCloser MUST be called to avoid leaking goroutines and gRPC connections. +func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) (_ []storage.SeriesSet, _ []ulid.ULID, _ storage.Warnings, streamCloser func(), _ error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) g, gCtx = errgroup.WithContext(reqCtx) @@ -869,14 +882,22 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor level.Warn(q.logger).Log("msg", "closing storegateway client stream failed", "err", err) } } - return nil, nil, nil, err + return nil, nil, nil, nil, err } for _, sr := range streamReaders { sr.StartBuffering() } - return seriesSets, queriedBlocks, warnings, nil + if len(streams) > 0 { + streamCloser = func() { + for _, sr := range streamReaders { + sr.Close() + } + } + } + + return seriesSets, queriedBlocks, warnings, streamCloser, nil } func shouldStopQueryFunc(err error) bool { diff --git a/pkg/storegateway/bucket_streaming_readers.go b/pkg/storegateway/bucket_streaming_readers.go index ee1643988db..97b93823b9c 100644 --- a/pkg/storegateway/bucket_streaming_readers.go +++ b/pkg/storegateway/bucket_streaming_readers.go @@ -31,6 +31,7 @@ type SeriesChunksStreamReader struct { seriesChunksChan chan *storepb.StreamingChunksBatch chunksBatch []*storepb.StreamingChunks errorChan chan error + closeCalled chan struct{} } func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *SeriesChunksStreamReader { @@ -40,12 +41,19 @@ func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient queryLimiter: queryLimiter, stats: stats, log: log, + closeCalled: make(chan struct{}), } } // Close cleans up all resources associated with this SeriesChunksStreamReader. // This method should only be called if StartBuffering is not called. func (s *SeriesChunksStreamReader) Close() { + select { + case <-s.closeCalled: + return + default: + } + close(s.closeCalled) if err := s.client.CloseSend(); err != nil { level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) } @@ -65,10 +73,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { go func() { defer func() { - if err := s.client.CloseSend(); err != nil { - level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) - } - + s.Close() close(s.seriesChunksChan) close(s.errorChan) }() @@ -133,6 +138,8 @@ func (s *SeriesChunksStreamReader) StartBuffering() { // which is true at the time of writing. s.errorChan <- s.client.Context().Err() return + case <-s.closeCalled: + return case s.seriesChunksChan <- c: // Batch enqueued successfully, nothing else to do for this batch. } From 47a90b600ba4c88ff1e9ae2db3f2246162b2ff1b Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 19 Jun 2023 16:38:41 +0530 Subject: [PATCH 41/75] Fix a race Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 16f6c7d2b6d..2d48b2275d5 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -734,7 +734,9 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor stream, err := c.Series(reqCtx, req) if err == nil { + mtx.Lock() streams = append(streams, stream) + mtx.Unlock() err = gCtx.Err() } if err != nil { From 64ccb448c1ccf8326847277718c9ba55c7448305 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 20 Jun 2023 11:26:41 +0530 Subject: [PATCH 42/75] Fix unit test panic Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable_test.go | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index ca6019b8012..825819857bb 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -2020,9 +2020,9 @@ type storeGatewayClientMock struct { mockedLabelValuesErr error } -func (m *storeGatewayClientMock) Series(context.Context, *storepb.SeriesRequest, ...grpc.CallOption) (storegatewaypb.StoreGateway_SeriesClient, error) { +func (m *storeGatewayClientMock) Series(ctx context.Context, _ *storepb.SeriesRequest, _ ...grpc.CallOption) (storegatewaypb.StoreGateway_SeriesClient, error) { seriesClient := &storeGatewaySeriesClientMock{ - ClientStream: grpcClientStreamMock{}, // Required to not panic. + ClientStream: grpcClientStreamMock{ctx: ctx}, // Required to not panic. mockedResponses: m.mockedSeriesResponses, } @@ -2060,12 +2060,14 @@ func (m *storeGatewaySeriesClientMock) Recv() (*storepb.SeriesResponse, error) { return res, nil } -type grpcClientStreamMock struct{} +type grpcClientStreamMock struct { + ctx context.Context +} func (grpcClientStreamMock) Header() (metadata.MD, error) { return nil, nil } func (grpcClientStreamMock) Trailer() metadata.MD { return nil } func (grpcClientStreamMock) CloseSend() error { return nil } -func (grpcClientStreamMock) Context() context.Context { return context.Background() } +func (m grpcClientStreamMock) Context() context.Context { return m.ctx } func (grpcClientStreamMock) SendMsg(interface{}) error { return nil } func (grpcClientStreamMock) RecvMsg(interface{}) error { return nil } @@ -2091,6 +2093,9 @@ func (m *cancelerStoreGatewayClientMock) Series(ctx context.Context, _ *storepb. series := &cancelerStoreGatewaySeriesClientMock{ ctx: ctx, cancel: m.cancel, + storeGatewaySeriesClientMock: storeGatewaySeriesClientMock{ + ClientStream: grpcClientStreamMock{ctx: ctx}, + }, } return series, nil } From 1720db083725d6128c972587c74340c137c19f1f Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 20 Jun 2023 13:29:31 +0530 Subject: [PATCH 43/75] Fix the use of seriesIteratorStrategy Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 2 +- pkg/storegateway/series_refs.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 25f94567e3c..cad2d3c1bf7 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -914,7 +914,7 @@ func (s *BucketStore) sendHintsAndStats(srv storepb.Store_SeriesServer, resHints var anyHints *types.Any var err error if anyHints, err = types.MarshalAny(resHints); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "marshal series response hints").Error()) + return status.Error(codes.Internal, errors.Wrap(err, "marshal series response hints").Error()) } if err := srv.Send(storepb.NewHintsSeriesResponse(anyHints)); err != nil { diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 89a9081c3b4..6b648e2b68e 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -880,7 +880,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Next() bool { nextPostings := s.postingsSetIterator.At() var cachedSeriesID cachedSeriesForPostingsID - if s.strategy.isNoChunks() { + if s.strategy.isNoChunksOnEntireBlock() { var err error // Calculate the cache ID before we filter out anything from the postings, // so that the key doesn't depend on the series hash cache or any other filtering we do on the postings list. From a51311ff38a96dbf79ee44d890260150e899bb7c Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 20 Jun 2023 15:07:07 +0530 Subject: [PATCH 44/75] Take care of tracing spans Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 20 ++++++++------------ pkg/storegateway/series_refs.go | 1 - 2 files changed, 8 insertions(+), 13 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index cad2d3c1bf7..eae0a5def98 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -643,7 +643,9 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } iterationBegin = time.Now() - err = s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) + tracing.DoWithSpan(ctx, "bucket_store_streaming_series_merge_all", func(ctx context.Context, _ tracing.Span) { + err = s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) + }) if err != nil { return err } @@ -651,22 +653,19 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie req.SkipChunks = false } - // TODO: if streaming is enabled, we don't need to fetch the labels again; we just need to fetch the chunk references. - // But we need labels to merge the series from blocks. Find other way of caching the resultant series refs (maybe final ordered - // list of series IDs and block IDs). seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err } // Merge the sub-results from each selected block. - tracing.DoWithSpan(ctx, "bucket_store_merge_all", func(ctx context.Context, _ tracing.Span) { + spanName := "bucket_store_merge_all" + if req.StreamingChunksBatchSize > 0 { + spanName = "bucket_store_streaming_chunks_merge_all" + } + tracing.DoWithSpan(ctx, spanName, func(ctx context.Context, _ tracing.Span) { err = s.sendSeriesChunks(req, srv, seriesSet, stats, iterationBegin) - if err != nil { - return - } }) - if err != nil { return } @@ -702,7 +701,6 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( stats.streamingSeriesSendResponseDuration += sendDuration }) - // TODO: should we pool the seriesBuffer/seriesBatch? seriesBuffer := make([]*storepb.StreamingSeries, req.StreamingChunksBatchSize) for i := range seriesBuffer { seriesBuffer[i] = &storepb.StreamingSeries{} @@ -721,8 +719,6 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( seriesBatch.Series = seriesBatch.Series[:len(seriesBatch.Series)+1] seriesBatch.Series[len(seriesBatch.Series)-1].Labels = mimirpb.FromLabelsToLabelAdapters(lset) - // TODO: Add relevant trace spans and timers. - if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { msg := &grpc.PreparedMsg{} diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 6b648e2b68e..7fb04a4dce2 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -754,7 +754,6 @@ func openBlockSeriesChunkRefsSetsIterator( return nil, nil, nil, errors.New("set size must be a positive number") } - // TODO: cache the filtered postings instead later. if len(ps) == 0 { var err error ps, pendingMatchers, err = indexr.ExpandedPostings(ctx, matchers, stats) From c76e5602697d8c69bddd9fac074e5a5e5363dd06 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 20 Jun 2023 17:16:55 +0530 Subject: [PATCH 45/75] Fix metrics in integration tests Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index d69736767c3..e0dfe895b38 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -250,14 +250,19 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream instantQueriesCount++ // Check the in-memory index cache metrics (in the store-gateway). - // TODO: metrics when streaming enabled. - if !streamingEnabled { + + if streamingEnabled { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 + //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(5), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+7), "thanos_memcached_operations_total")) + } + } else { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty - } - - // TODO: metrics when streaming enabled. - if !streamingEnabled { if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one @@ -273,8 +278,16 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream assert.Equal(t, expectedVector1, result.(model.Vector)) expectedFetchedSeries++ // Storage only. - // TODO: metrics when streaming enabled. - if !streamingEnabled { + if streamingEnabled { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5+3), "thanos_store_index_cache_requests_total")) + //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(8), "thanos_store_index_cache_hits_total")) + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+7+4), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) + } + } else { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+2), "thanos_store_index_cache_requests_total")) require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { From 5115fc44c677223bf0988addd22154a6581d95e3 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 21 Jun 2023 10:39:29 +0530 Subject: [PATCH 46/75] Fix flags Signed-off-by: Ganesh Vernekar --- cmd/mimir/config-descriptor.json | 8 ++++---- cmd/mimir/help-all.txt.tmpl | 4 ++-- .../references/configuration-parameters/index.md | 8 ++++---- integration/querier_test.go | 16 ++++++++-------- pkg/querier/blocks_store_queryable.go | 2 +- pkg/querier/querier.go | 16 ++++++++-------- 6 files changed, 27 insertions(+), 27 deletions(-) diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 1b1819e66de..5a850558589 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1664,12 +1664,12 @@ }, { "kind": "field", - "name": "prefer_streaming_chunks_storegateway", + "name": "prefer_streaming_chunks_store_gateway", "required": false, "desc": "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.", "fieldValue": null, "fieldDefaultValue": false, - "fieldFlag": "querier.prefer-streaming-chunks-storegateway", + "fieldFlag": "querier.prefer-streaming-chunks-store-gateway", "fieldType": "boolean", "fieldCategory": "experimental" }, @@ -1686,12 +1686,12 @@ }, { "kind": "field", - "name": "streaming_chunks_per_ingester_series_buffer_size_storegateway", + "name": "streaming_chunks_per_store_gateway_series_buffer_size", "required": false, "desc": "Number of series to buffer per storegateway when streaming chunks from storegateway.", "fieldValue": null, "fieldDefaultValue": 256, - "fieldFlag": "querier.streaming-chunks-per-ingester-buffer-size-storegateway", + "fieldFlag": "querier.streaming-chunks-per-store-gateway-buffer-size", "fieldType": "int", "fieldCategory": "experimental" }, diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 90dcbf7fc9f..ba33f8ccc81 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1595,7 +1595,7 @@ Usage of ./cmd/mimir/mimir: [experimental] If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path. -querier.prefer-streaming-chunks [experimental] Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this. - -querier.prefer-streaming-chunks-storegateway + -querier.prefer-streaming-chunks-store-gateway [experimental] Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this. -querier.query-ingesters-within duration Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. (default 13h) @@ -1623,7 +1623,7 @@ Usage of ./cmd/mimir/mimir: Override the expected name on the server certificate. -querier.streaming-chunks-per-ingester-buffer-size uint [experimental] Number of series to buffer per ingester when streaming chunks from ingesters. (default 256) - -querier.streaming-chunks-per-ingester-buffer-size-storegateway uint + -querier.streaming-chunks-per-store-gateway-buffer-size uint [experimental] Number of series to buffer per storegateway when streaming chunks from storegateway. (default 256) -querier.timeout duration The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. This also applies to queries evaluated by the ruler (internally or remotely). (default 2m0s) diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index dd0c67b7dca..b896cb977d2 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -1083,8 +1083,8 @@ store_gateway_client: # (experimental) Request storegateway stream chunks. Storegateway will only # respond with a stream of chunks if the target storegateway supports this, and # this preference will be ignored by storegateway that do not support this. -# CLI flag: -querier.prefer-streaming-chunks-storegateway -[prefer_streaming_chunks_storegateway: | default = false] +# CLI flag: -querier.prefer-streaming-chunks-store-gateway +[prefer_streaming_chunks_store_gateway: | default = false] # (experimental) Number of series to buffer per ingester when streaming chunks # from ingesters. @@ -1093,8 +1093,8 @@ store_gateway_client: # (experimental) Number of series to buffer per storegateway when streaming # chunks from storegateway. -# CLI flag: -querier.streaming-chunks-per-ingester-buffer-size-storegateway -[streaming_chunks_per_ingester_series_buffer_size_storegateway: | default = 256] +# CLI flag: -querier.streaming-chunks-per-store-gateway-buffer-size +[streaming_chunks_per_store_gateway_series_buffer_size: | default = 256] # (experimental) If true, when querying ingesters, only the minimum required # ingesters required to reach quorum will be queried initially, with other diff --git a/integration/querier_test.go b/integration/querier_test.go index e0dfe895b38..e50bcd17385 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -171,7 +171,7 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream "-query-frontend.query-stats-enabled": "true", "-query-frontend.parallelize-shardable-queries": strconv.FormatBool(testCfg.queryShardingEnabled), "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), - "-querier.prefer-streaming-chunks-storegateway": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-store-gateway": strconv.FormatBool(streamingEnabled), }) // Start store-gateways. @@ -908,13 +908,13 @@ func TestQueryLimitsWithBlocksStorageRunningInMicroServices(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. flags := mergeFlags(BlocksStorageFlags(), BlocksStorageS3Flags(), map[string]string{ - "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), - "-blocks-storage.tsdb.ship-interval": "1s", - "-blocks-storage.bucket-store.sync-interval": "1s", - "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), - "-querier.max-fetched-series-per-query": "3", - "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), - "-querier.prefer-streaming-chunks-storegateway": strconv.FormatBool(streamingEnabled), + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-querier.max-fetched-series-per-query": "3", + "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-store-gateway": strconv.FormatBool(streamingEnabled), }) // Start dependencies. diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index f0f682ccb93..6c2a913398b 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -264,7 +264,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa reg, ) - streamingBufferSize := querierCfg.StreamingChunksPerIngesterSeriesBufferSizeStoregateway + streamingBufferSize := querierCfg.StreamingChunksPerStoregatewaySeriesBufferSize if !querierCfg.PreferStreamingChunksStoregateway { streamingBufferSize = 0 } diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 007a748e81c..38988406862 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -51,11 +51,11 @@ type Config struct { ShuffleShardingIngestersEnabled bool `yaml:"shuffle_sharding_ingesters_enabled" category:"advanced"` - PreferStreamingChunks bool `yaml:"prefer_streaming_chunks" category:"experimental"` - PreferStreamingChunksStoregateway bool `yaml:"prefer_streaming_chunks_storegateway" category:"experimental"` - StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size" category:"experimental"` - StreamingChunksPerIngesterSeriesBufferSizeStoregateway uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size_storegateway" category:"experimental"` - MinimizeIngesterRequests bool `yaml:"minimize_ingester_requests" category:"experimental"` + PreferStreamingChunks bool `yaml:"prefer_streaming_chunks" category:"experimental"` + PreferStreamingChunksStoregateway bool `yaml:"prefer_streaming_chunks_store_gateway" category:"experimental"` + StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size" category:"experimental"` + StreamingChunksPerStoregatewaySeriesBufferSize uint64 `yaml:"streaming_chunks_per_store_gateway_series_buffer_size" category:"experimental"` + MinimizeIngesterRequests bool `yaml:"minimize_ingester_requests" category:"experimental"` // PromQL engine config. EngineConfig engine.Config `yaml:",inline"` @@ -85,13 +85,13 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.QueryStoreAfter, queryStoreAfterFlag, 12*time.Hour, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.BoolVar(&cfg.ShuffleShardingIngestersEnabled, "querier.shuffle-sharding-ingesters-enabled", true, fmt.Sprintf("Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -%s. If this setting is false or -%s is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", validation.QueryIngestersWithinFlag, validation.QueryIngestersWithinFlag)) f.BoolVar(&cfg.PreferStreamingChunks, "querier.prefer-streaming-chunks", false, "Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this.") - f.BoolVar(&cfg.PreferStreamingChunksStoregateway, "querier.prefer-streaming-chunks-storegateway", false, "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.") + f.BoolVar(&cfg.PreferStreamingChunksStoregateway, "querier.prefer-streaming-chunks-store-gateway", false, "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.") f.BoolVar(&cfg.MinimizeIngesterRequests, "querier.minimize-ingester-requests", false, "If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path.") - // Why 256 series / ingester/storegateway? + // Why 256 series / ingester/store-gateway? // Based on our testing, 256 series / ingester was a good balance between memory consumption and the CPU overhead of managing a batch of series. f.Uint64Var(&cfg.StreamingChunksPerIngesterSeriesBufferSize, "querier.streaming-chunks-per-ingester-buffer-size", 256, "Number of series to buffer per ingester when streaming chunks from ingesters.") - f.Uint64Var(&cfg.StreamingChunksPerIngesterSeriesBufferSizeStoregateway, "querier.streaming-chunks-per-ingester-buffer-size-storegateway", 256, "Number of series to buffer per storegateway when streaming chunks from storegateway.") + f.Uint64Var(&cfg.StreamingChunksPerStoregatewaySeriesBufferSize, "querier.streaming-chunks-per-store-gateway-buffer-size", 256, "Number of series to buffer per storegateway when streaming chunks from storegateway.") // The querier.query-ingesters-within flag has been moved to the limits.go file // We still need to set a default value for cfg.QueryIngestersWithin since we need to keep supporting the querier yaml field until Mimir 2.11.0 From afd6e26ab2e69066113d4a28a79b033157a9f6d3 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 21 Jun 2023 14:35:06 +0530 Subject: [PATCH 47/75] Fix review comments Signed-off-by: Ganesh Vernekar --- pkg/old.txt | 0 pkg/storegateway/bucket.go | 149 ++++++++++++++--------------- pkg/storegateway/storepb/rpc.proto | 14 ++- 3 files changed, 83 insertions(+), 80 deletions(-) delete mode 100644 pkg/old.txt diff --git a/pkg/old.txt b/pkg/old.txt deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index eae0a5def98..4822e756f8e 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -580,6 +580,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), "request matchers", storepb.PromMatchersToString(matchers...), "request shard selector", maybeNilShard(shardSelector).LabelValue(), + "streaming chunks batch size", req.StreamingChunksBatchSize, ) var ( @@ -642,13 +643,20 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - iterationBegin = time.Now() - tracing.DoWithSpan(ctx, "bucket_store_streaming_series_merge_all", func(ctx context.Context, _ tracing.Span) { - err = s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) - }) + numSeries, err := s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) if err != nil { return err } + level.Debug(spanLogger).Log( + "msg", "sent streaming series", + "request min time", time.UnixMilli(req.MinTime).UTC().Format(time.RFC3339Nano), + "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), + "request matchers", storepb.PromMatchersToString(matchers...), + "request shard selector", maybeNilShard(shardSelector).LabelValue(), + "streaming chunks batch size", req.StreamingChunksBatchSize, + "num_series", numSeries, + "duration", time.Since(iterationBegin), + ) req.SkipChunks = false } @@ -658,17 +666,26 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - // Merge the sub-results from each selected block. - spanName := "bucket_store_merge_all" - if req.StreamingChunksBatchSize > 0 { - spanName = "bucket_store_streaming_chunks_merge_all" - } - tracing.DoWithSpan(ctx, spanName, func(ctx context.Context, _ tracing.Span) { - err = s.sendSeriesChunks(req, srv, seriesSet, stats, iterationBegin) - }) + start := time.Now() + numSeries, numChunks, err := s.sendSeriesChunks(req, srv, seriesSet, stats) if err != nil { return } + debugMessage := "sent series" + if req.StreamingChunksBatchSize > 0 { + debugMessage = "sent streaming chunks" + } + level.Debug(spanLogger).Log( + "msg", debugMessage, + "request min time", time.UnixMilli(req.MinTime).UTC().Format(time.RFC3339Nano), + "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), + "request matchers", storepb.PromMatchersToString(matchers...), + "request shard selector", maybeNilShard(shardSelector).LabelValue(), + "streaming chunks batch size", req.StreamingChunksBatchSize, + "num_series", numSeries, + "num_chunks", numChunks, + "duration", time.Since(start), + ) if req.StreamingChunksBatchSize == 0 || req.SkipChunks { // Hints and stats were not sent before, so send it now. @@ -687,18 +704,22 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( stats *safeQueryStats, seriesSet storepb.SeriesSet, resHints *hintspb.SeriesResponseHints, -) error { +) (numSeries int, err error) { var ( encodeDuration = time.Duration(0) sendDuration = time.Duration(0) + iterationBegin = time.Now() ) - // Once the iteration is done we will update the stats. defer stats.update(func(stats *queryStats) { // The time spent iterating over the series set is the // actual time spent fetching series and chunks, encoding and sending them to the client. // We split the timings to have a better view over how time is spent. + // We do not update streamingSeriesFetchSeriesAndChunksDuration here because it will be updated when sending + // streaming chunks, that includes the series and chunks fetch duration for sending the streaming series. stats.streamingSeriesEncodeResponseDuration += encodeDuration stats.streamingSeriesSendResponseDuration += sendDuration + stats.streamingSeriesOtherDuration += time.Duration(util_math.Max(0, int64(time.Since(iterationBegin)- + stats.streamingSeriesFetchSeriesAndChunksDuration-encodeDuration-sendDuration))) }) seriesBuffer := make([]*storepb.StreamingSeries, req.StreamingChunksBatchSize) @@ -710,9 +731,10 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( } // TODO: can we send this in parallel while we start fetching the chunks below? for seriesSet.Next() { + numSeries++ var lset labels.Labels // Although subsequent call to seriesSet.Next() may release the memory of this series object, - // it is safe to hold onto the labels because it is not released. + // it is safe to hold onto the labels because they are not released. lset, _ = seriesSet.At() // We are re-using the slice for every batch this way. @@ -720,54 +742,27 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( seriesBatch.Series[len(seriesBatch.Series)-1].Labels = mimirpb.FromLabelsToLabelAdapters(lset) if len(seriesBatch.Series) == int(req.StreamingChunksBatchSize) { - msg := &grpc.PreparedMsg{} - - encodeBegin := time.Now() - if err := msg.Encode(srv, storepb.NewStreamingSeriesResponse(seriesBatch)); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) - } - encodeDuration += time.Since(encodeBegin) - - sendBegin := time.Now() - if err := srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) + err := s.sendMessage("streaming series", srv, storepb.NewStreamingSeriesResponse(seriesBatch), &encodeDuration, &sendDuration) + if err != nil { + return 0, err } - sendDuration += time.Since(sendBegin) - seriesBatch.Series = seriesBatch.Series[:0] } } if seriesSet.Err() != nil { - return errors.Wrap(seriesSet.Err(), "expand series set") + return 0, errors.Wrap(seriesSet.Err(), "expand series set") } // We need to send hints and stats before sending the chunks. // Also, these need to be sent before we send IsEndOfSeriesStream=true. if err := s.sendHintsAndStats(srv, resHints, stats); err != nil { - return err + return 0, err } // Send any remaining series and signal that there are no more series. - msg := &grpc.PreparedMsg{} seriesBatch.IsEndOfSeriesStream = true - - encodeBegin := time.Now() - if err := msg.Encode(srv, storepb.NewStreamingSeriesResponse(seriesBatch)); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode streaming series response").Error()) - } - encodeDuration += time.Since(encodeBegin) - - sendBegin := time.Now() - if err := srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send streaming series response").Error()) - } - sendDuration += time.Since(sendBegin) - - if seriesSet.Err() != nil { - return errors.Wrap(seriesSet.Err(), "expand series set") - } - - return nil + err = s.sendMessage("streaming series", srv, storepb.NewStreamingSeriesResponse(seriesBatch), &encodeDuration, &sendDuration) + return numSeries, err } func (s *BucketStore) sendSeriesChunks( @@ -775,21 +770,14 @@ func (s *BucketStore) sendSeriesChunks( srv storepb.Store_SeriesServer, seriesSet storepb.SeriesSet, stats *safeQueryStats, - iterationBegin time.Time, -) (err error) { +) (seriesCount, chunksCount int, err error) { var ( encodeDuration = time.Duration(0) sendDuration = time.Duration(0) - seriesCount int - chunksCount int + iterationBegin = time.Now() streamingChunks = req.StreamingChunksBatchSize > 0 ) - if iterationBegin.Equal(time.Time{}) { - iterationBegin = time.Now() - } - - // Once the iteration is done we will update the stats. defer stats.update(func(stats *queryStats) { stats.mergedSeriesCount += seriesCount stats.mergedChunksCount += chunksCount @@ -819,20 +807,20 @@ func (s *BucketStore) sendSeriesChunks( } for seriesSet.Next() { // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle - // because the subsequent call to seriesSet.Next() may release it. + // because the subsequent call to seriesSet.Next() may release it. But it is safe to hold + // onto lset because the labels are not released. lset, chks := seriesSet.At() seriesCount++ var response *storepb.SeriesResponse if streamingChunks { - // We only need to stream chunks here because the series labels have already - // been sent above. + // We only need to stream chunks here because the series labels have already been sent. chunksBatch.Series = chunksBatch.Series[:len(chunksBatch.Series)+1] - last := chunksBatch.Series[len(chunksBatch.Series)-1] - last.Chunks = append(last.Chunks[:0], chks...) - last.SeriesIndex = uint64(seriesCount - 1) + lastSeries := chunksBatch.Series[len(chunksBatch.Series)-1] + lastSeries.Chunks = append(lastSeries.Chunks[:0], chks...) + lastSeries.SeriesIndex = uint64(seriesCount - 1) // Copy the chunk bytes to avoid race. - for i := range last.Chunks { - raw := last.Chunks[i].Raw + for i := range lastSeries.Chunks { + raw := lastSeries.Chunks[i].Raw if raw == nil { continue } @@ -840,10 +828,10 @@ func (s *BucketStore) sendSeriesChunks( Type: raw.Type, Data: append(make([]byte, 0, len(raw.Data)), raw.Data...), } - last.Chunks[i].Raw = &newChk + lastSeries.Chunks[i].Raw = &newChk } - batchSizeBytes += last.Size() + batchSizeBytes += lastSeries.Size() // We are not strictly required to be under targetQueryStreamBatchMessageSize. // The aim is to not hit gRPC and TCP limits, hence some overage is ok. if (batchSizeBytes > 0 && batchSizeBytes > targetQueryStreamBatchMessageSize) || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { @@ -865,9 +853,9 @@ func (s *BucketStore) sendSeriesChunks( } if response != nil { - err := s.sendChunks(srv, response, &encodeDuration, &sendDuration) + err := s.sendMessage("series", srv, response, &encodeDuration, &sendDuration) if err != nil { - return err + return 0, 0, err } if streamingChunks { @@ -877,29 +865,32 @@ func (s *BucketStore) sendSeriesChunks( } } if seriesSet.Err() != nil { - return errors.Wrap(seriesSet.Err(), "expand series set") + return 0, 0, errors.Wrap(seriesSet.Err(), "expand series set") } if streamingChunks && len(chunksBatch.Series) > 0 { // Still some chunks left to send. - return s.sendChunks(srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) + err := s.sendMessage("series", srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) + if err != nil { + return 0, 0, err + } } - return nil + return seriesCount, chunksCount, nil } -func (s *BucketStore) sendChunks(srv storepb.Store_SeriesServer, chunks *storepb.SeriesResponse, encodeDuration, sendDuration *time.Duration) error { +func (s *BucketStore) sendMessage(typ string, srv storepb.Store_SeriesServer, msg interface{}, encodeDuration, sendDuration *time.Duration) error { // We encode it ourselves into a PreparedMsg in order to measure the time it takes. encodeBegin := time.Now() - msg := &grpc.PreparedMsg{} - if err := msg.Encode(srv, chunks); err != nil { - return status.Error(codes.Internal, errors.Wrap(err, "encode series response").Error()) + pmsg := &grpc.PreparedMsg{} + if err := pmsg.Encode(srv, msg); err != nil { + return status.Error(codes.Internal, errors.Wrapf(err, "encode %s response", typ).Error()) } *encodeDuration += time.Since(encodeBegin) sendBegin := time.Now() - if err := srv.SendMsg(msg); err != nil { - return status.Error(codes.Unknown, errors.Wrap(err, "send series response").Error()) + if err := srv.SendMsg(pmsg); err != nil { + return status.Error(codes.Unknown, errors.Wrapf(err, "send %s response", typ).Error()) } *sendDuration += time.Since(sendBegin) diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index d3d0e61a057..9ae61689574 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -80,6 +80,16 @@ message SeriesRequest { // Thanos shard_info. reserved 13; + // If streaming_chunks_batch_size=0, the response must only contain one 'series' at a time + // with the series labels and chunks data sent together. + // If streaming_chunks_batch_size > 0 + // - The store may choose to send the streaming_series/streaming_chunks OR behave as + // if streaming_chunks_batch_size=0 if it does not support streaming series. + // - The store must not send a mix of 'series' and streaming_series/streaming_chunks for a single request. + // - If the store chooses to send streaming series, all the streaming_series must be sent before + // sending any streaming_chunks, with the last streaming_series response containing is_end_of_series_stream=true. + // The order of series in both streaming_series/streaming_chunks must match and the size of the batch must not + // cross streaming_chunks_batch_size, although it can be lower than that. // It is 100 so that we have an option to bring back compatibility with Thanos' storage API. uint64 streaming_chunks_batch_size = 100; } @@ -110,10 +120,12 @@ message SeriesResponse { Stats stats = 4; /// streaming_series is a list of series labels sent as part of a streaming Series call. + /// These are populated only when streaming_chunks_batch_size > 0 in the series request. StreamingSeriesBatch streaming_series = 5; - /// streaming_chunks is a batch of list of chunks sent as part of a streaming Series request. + /// streaming_chunks is a list of chunks sent as part of a streaming Series request. /// They are associated with series labels sent as streaming_series earlier in the same Series request. + /// These are populated only when streaming_chunks_batch_size > 0 in the series request. StreamingChunksBatch streaming_chunks = 6; } } From 09d2fbda36e0121f872c0cb23bfd9f4c07b04b9c Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 21 Jun 2023 15:29:42 +0530 Subject: [PATCH 48/75] Fix more comments Signed-off-by: Ganesh Vernekar --- cmd/mimir/config-descriptor.json | 12 +++---- cmd/mimir/help-all.txt.tmpl | 8 ++--- pkg/mimir/modules.go | 2 +- pkg/querier/block_streaming_test.go | 22 +++++------- pkg/querier/block_test.go | 2 +- pkg/querier/blocks_store_queryable.go | 38 ++++++++------------ pkg/querier/blocks_store_queryable_test.go | 9 ++--- pkg/querier/querier.go | 10 +++--- pkg/storegateway/bucket_streaming_readers.go | 10 ------ pkg/storegateway/storepb/rpc.pb.go | 10 ++++++ 10 files changed, 54 insertions(+), 69 deletions(-) diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 5a850558589..c5a9f3ed5ce 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1653,23 +1653,23 @@ }, { "kind": "field", - "name": "prefer_streaming_chunks", + "name": "prefer_streaming_chunks_from_ingesters", "required": false, "desc": "Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this.", "fieldValue": null, "fieldDefaultValue": false, - "fieldFlag": "querier.prefer-streaming-chunks", + "fieldFlag": "querier.prefer-streaming-chunks-from-ingesters", "fieldType": "boolean", "fieldCategory": "experimental" }, { "kind": "field", - "name": "prefer_streaming_chunks_store_gateway", + "name": "prefer_streaming_chunks_from_store_gateways", "required": false, - "desc": "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.", + "desc": "Request store-gateways stream chunks. Store-gateways will only respond with a stream of chunks if the target store-gateway supports this, and this preference will be ignored by store-gateways that do not support this.", "fieldValue": null, "fieldDefaultValue": false, - "fieldFlag": "querier.prefer-streaming-chunks-store-gateway", + "fieldFlag": "querier.prefer-streaming-chunks-from-store-gateways", "fieldType": "boolean", "fieldCategory": "experimental" }, @@ -1688,7 +1688,7 @@ "kind": "field", "name": "streaming_chunks_per_store_gateway_series_buffer_size", "required": false, - "desc": "Number of series to buffer per storegateway when streaming chunks from storegateway.", + "desc": "Number of series to buffer per store-gateway when streaming chunks from store-gateways.", "fieldValue": null, "fieldDefaultValue": 256, "fieldFlag": "querier.streaming-chunks-per-store-gateway-buffer-size", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index ba33f8ccc81..95d53b1a2bb 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1593,10 +1593,10 @@ Usage of ./cmd/mimir/mimir: Maximum number of samples a single query can load into memory. This config option should be set on query-frontend too when query sharding is enabled. (default 50000000) -querier.minimize-ingester-requests [experimental] If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path. - -querier.prefer-streaming-chunks + -querier.prefer-streaming-chunks-from-ingesters [experimental] Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this. - -querier.prefer-streaming-chunks-store-gateway - [experimental] Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this. + -querier.prefer-streaming-chunks-from-store-gateways + [experimental] Request store-gateways stream chunks. Store-gateways will only respond with a stream of chunks if the target store-gateway supports this, and this preference will be ignored by store-gateways that do not support this. -querier.query-ingesters-within duration Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester. (default 13h) -querier.query-store-after duration @@ -1624,7 +1624,7 @@ Usage of ./cmd/mimir/mimir: -querier.streaming-chunks-per-ingester-buffer-size uint [experimental] Number of series to buffer per ingester when streaming chunks from ingesters. (default 256) -querier.streaming-chunks-per-store-gateway-buffer-size uint - [experimental] Number of series to buffer per storegateway when streaming chunks from storegateway. (default 256) + [experimental] Number of series to buffer per store-gateway when streaming chunks from store-gateways. (default 256) -querier.timeout duration The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. This also applies to queries evaluated by the ruler (internally or remotely). (default 2m0s) -query-frontend.align-queries-with-step diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index aac79c767c0..6cc88b44139 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -353,7 +353,7 @@ func (t *Mimir) initDistributorService() (serv services.Service, err error) { // ruler's dependency) canJoinDistributorsRing := t.Cfg.isAnyModuleEnabled(Distributor, Write, All) - t.Cfg.Distributor.PreferStreamingChunks = t.Cfg.Querier.PreferStreamingChunks + t.Cfg.Distributor.PreferStreamingChunks = t.Cfg.Querier.PreferStreamingChunksFromIngesters t.Cfg.Distributor.StreamingChunksPerIngesterSeriesBufferSize = t.Cfg.Querier.StreamingChunksPerIngesterSeriesBufferSize t.Cfg.Distributor.MinimizeIngesterRequests = t.Cfg.Querier.MinimizeIngesterRequests diff --git a/pkg/querier/block_streaming_test.go b/pkg/querier/block_streaming_test.go index a65015ffc95..2e47823e1f2 100644 --- a/pkg/querier/block_streaming_test.go +++ b/pkg/querier/block_streaming_test.go @@ -166,20 +166,16 @@ func TestBlockStreamingQuerierSeriesSet(t *testing.T) { require.Equal(t, c.expResult[idx].lbls, s.Labels()) it = s.Iterator(it) if c.errorChunkStreamer { - require.Error(t, it.Err()) - idx++ - // If chunk streamer errors out, we still go through every - // series but we don't get any samples. So we continue here - // and check all the series. - continue + require.EqualError(t, it.Err(), "mocked error") + } else { + var actSamples []testSample + for it.Next() != chunkenc.ValNone { + ts, val := it.At() + actSamples = append(actSamples, testSample{t: ts, v: val}) + } + require.Equal(t, c.expResult[idx].values, actSamples) + require.NoError(t, it.Err()) } - var actSamples []testSample - for it.Next() != chunkenc.ValNone { - ts, val := it.At() - actSamples = append(actSamples, testSample{t: ts, v: val}) - } - require.Equal(t, c.expResult[idx].values, actSamples) - require.NoError(t, it.Err()) idx++ } require.NoError(t, ss.Err()) diff --git a/pkg/querier/block_test.go b/pkg/querier/block_test.go index ec8cdc768eb..112bc21c828 100644 --- a/pkg/querier/block_test.go +++ b/pkg/querier/block_test.go @@ -231,7 +231,7 @@ func TestBlockQuerierSeriesSet(t *testing.T) { { Labels: mkZLabels("__name__", "overlapping2"), Chunks: []storepb.AggrChunk{ - // entire range overlaps with the nextSeriesIndex chunk, so this chunks contributes 0 samples (it will be sorted as second) + // entire range overlaps with the next chunk, so this chunks contributes 0 samples (it will be sorted as second) createAggrChunkWithSineSamples(now.Add(3*time.Second), now.Add(7*time.Second-5*time.Millisecond), 5*time.Millisecond), }, }, diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 6c2a913398b..02bb06e726f 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -265,7 +265,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa ) streamingBufferSize := querierCfg.StreamingChunksPerStoregatewaySeriesBufferSize - if !querierCfg.PreferStreamingChunksStoregateway { + if !querierCfg.PreferStreamingChunksFromStoregateways { streamingBufferSize = 0 } @@ -444,7 +444,7 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* convertedMatchers = convertMatchersToLabelMatcher(matchers) resSeriesSets = []storage.SeriesSet(nil) resWarnings = storage.Warnings(nil) - streamClosers []func() + streamStarters []func() ) shard, _, err := sharding.ShardFromMatchers(matchers) @@ -453,30 +453,28 @@ func (q *blocksStoreQuerier) selectSorted(sp *storage.SelectHints, matchers ...* } queryFunc := func(clients map[BlocksStoreClient][]ulid.ULID, minT, maxT int64) ([]ulid.ULID, error) { - seriesSets, queriedBlocks, warnings, streamCloser, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, convertedMatchers) + seriesSets, queriedBlocks, warnings, startStreamingChunks, err := q.fetchSeriesFromStores(spanCtx, sp, clients, minT, maxT, convertedMatchers) if err != nil { return nil, err } resSeriesSets = append(resSeriesSets, seriesSets...) resWarnings = append(resWarnings, warnings...) - if streamCloser != nil { - streamClosers = append(streamClosers, streamCloser) - } + streamStarters = append(streamStarters, startStreamingChunks) return queriedBlocks, nil } err = q.queryWithConsistencyCheck(spanCtx, spanLog, minT, maxT, shard, queryFunc) if err != nil { - // If this was a streaming call, we should close the stream readers so that goroutines are not - // stuck waiting for chunks. - for _, sc := range streamClosers { - sc() - } return storage.ErrSeriesSet(err) } + // If this was a streaming call, start fetching streaming chunks here. + for _, ss := range streamStarters { + ss() + } + if len(resSeriesSets) == 0 { storage.EmptySeriesSet() } @@ -592,7 +590,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg level.Debug(logger).Log("msg", "consistency check failed", "attempt", attempt, "missing blocks", strings.Join(convertULIDsToString(missingBlocks), " ")) - // The nextSeriesIndex attempt should just query the missing blocks. + // The next attempt should just query the missing blocks. remainingBlocks = missingBlocks } @@ -699,7 +697,7 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // In case of a successful run, fetchSeriesFromStores returns a streamCloser function if it was a streaming // call for series+chunks. If you are ending the execution of the query later without iterating through all the series // and consuming the chunks, the streamCloser MUST be called to avoid leaking goroutines and gRPC connections. -func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) (_ []storage.SeriesSet, _ []ulid.ULID, _ storage.Warnings, streamCloser func(), _ error) { +func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) (_ []storage.SeriesSet, _ []ulid.ULID, _ storage.Warnings, startStreamingChunks func(), _ error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) g, gCtx = errgroup.WithContext(reqCtx) @@ -887,19 +885,13 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor return nil, nil, nil, nil, err } - for _, sr := range streamReaders { - sr.StartBuffering() - } - - if len(streams) > 0 { - streamCloser = func() { - for _, sr := range streamReaders { - sr.Close() - } + startStreamingChunks = func() { + for _, sr := range streamReaders { + sr.StartBuffering() } } - return seriesSets, queriedBlocks, warnings, streamCloser, nil + return seriesSets, queriedBlocks, warnings, startStreamingChunks, nil } func shouldStopQueryFunc(err error) bool { diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 825819857bb..26a487e589c 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -860,20 +860,17 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { if testData.expectedErr != nil { if streaming && set.Err() == nil { // In case of streaming, the error can happen during iteration. - foundErr := false + var err error for set.Next() { it := set.At().Iterator(nil) for it.Next() != chunkenc.ValNone { // nolint } - err := it.Err() + err = it.Err() if err != nil { - assert.ErrorContains(t, err, testData.expectedErr.Error()) - assert.ErrorIs(t, err, testData.expectedErr) - foundErr = true break } } - assert.True(t, foundErr) + assert.ErrorIs(t, err, testData.expectedErr) } else { assert.ErrorContains(t, set.Err(), testData.expectedErr.Error()) assert.IsType(t, set.Err(), testData.expectedErr) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 38988406862..f60bc6520b1 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -51,8 +51,8 @@ type Config struct { ShuffleShardingIngestersEnabled bool `yaml:"shuffle_sharding_ingesters_enabled" category:"advanced"` - PreferStreamingChunks bool `yaml:"prefer_streaming_chunks" category:"experimental"` - PreferStreamingChunksStoregateway bool `yaml:"prefer_streaming_chunks_store_gateway" category:"experimental"` + PreferStreamingChunksFromIngesters bool `yaml:"prefer_streaming_chunks_from_ingesters" category:"experimental"` + PreferStreamingChunksFromStoregateways bool `yaml:"prefer_streaming_chunks_from_store_gateways" category:"experimental"` StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size" category:"experimental"` StreamingChunksPerStoregatewaySeriesBufferSize uint64 `yaml:"streaming_chunks_per_store_gateway_series_buffer_size" category:"experimental"` MinimizeIngesterRequests bool `yaml:"minimize_ingester_requests" category:"experimental"` @@ -84,14 +84,14 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.QueryStoreAfter, queryStoreAfterFlag, 12*time.Hour, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.BoolVar(&cfg.ShuffleShardingIngestersEnabled, "querier.shuffle-sharding-ingesters-enabled", true, fmt.Sprintf("Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -%s. If this setting is false or -%s is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", validation.QueryIngestersWithinFlag, validation.QueryIngestersWithinFlag)) - f.BoolVar(&cfg.PreferStreamingChunks, "querier.prefer-streaming-chunks", false, "Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this.") - f.BoolVar(&cfg.PreferStreamingChunksStoregateway, "querier.prefer-streaming-chunks-store-gateway", false, "Request storegateway stream chunks. Storegateway will only respond with a stream of chunks if the target storegateway supports this, and this preference will be ignored by storegateway that do not support this.") + f.BoolVar(&cfg.PreferStreamingChunksFromIngesters, "querier.prefer-streaming-chunks-from-ingesters", false, "Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this.") + f.BoolVar(&cfg.PreferStreamingChunksFromStoregateways, "querier.prefer-streaming-chunks-from-store-gateways", false, "Request store-gateways stream chunks. Store-gateways will only respond with a stream of chunks if the target store-gateway supports this, and this preference will be ignored by store-gateways that do not support this.") f.BoolVar(&cfg.MinimizeIngesterRequests, "querier.minimize-ingester-requests", false, "If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path.") // Why 256 series / ingester/store-gateway? // Based on our testing, 256 series / ingester was a good balance between memory consumption and the CPU overhead of managing a batch of series. f.Uint64Var(&cfg.StreamingChunksPerIngesterSeriesBufferSize, "querier.streaming-chunks-per-ingester-buffer-size", 256, "Number of series to buffer per ingester when streaming chunks from ingesters.") - f.Uint64Var(&cfg.StreamingChunksPerStoregatewaySeriesBufferSize, "querier.streaming-chunks-per-store-gateway-buffer-size", 256, "Number of series to buffer per storegateway when streaming chunks from storegateway.") + f.Uint64Var(&cfg.StreamingChunksPerStoregatewaySeriesBufferSize, "querier.streaming-chunks-per-store-gateway-buffer-size", 256, "Number of series to buffer per store-gateway when streaming chunks from store-gateways.") // The querier.query-ingesters-within flag has been moved to the limits.go file // We still need to set a default value for cfg.QueryIngestersWithin since we need to keep supporting the querier yaml field until Mimir 2.11.0 diff --git a/pkg/storegateway/bucket_streaming_readers.go b/pkg/storegateway/bucket_streaming_readers.go index 97b93823b9c..c81597c703e 100644 --- a/pkg/storegateway/bucket_streaming_readers.go +++ b/pkg/storegateway/bucket_streaming_readers.go @@ -31,7 +31,6 @@ type SeriesChunksStreamReader struct { seriesChunksChan chan *storepb.StreamingChunksBatch chunksBatch []*storepb.StreamingChunks errorChan chan error - closeCalled chan struct{} } func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *SeriesChunksStreamReader { @@ -41,19 +40,12 @@ func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient queryLimiter: queryLimiter, stats: stats, log: log, - closeCalled: make(chan struct{}), } } // Close cleans up all resources associated with this SeriesChunksStreamReader. // This method should only be called if StartBuffering is not called. func (s *SeriesChunksStreamReader) Close() { - select { - case <-s.closeCalled: - return - default: - } - close(s.closeCalled) if err := s.client.CloseSend(); err != nil { level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) } @@ -138,8 +130,6 @@ func (s *SeriesChunksStreamReader) StartBuffering() { // which is true at the time of writing. s.errorChan <- s.client.Context().Err() return - case <-s.closeCalled: - return case s.seriesChunksChan <- c: // Batch enqueued successfully, nothing else to do for this batch. } diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index ed55eba7f98..75a54dd2fbb 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -40,6 +40,16 @@ type SeriesRequest struct { // The content of this field and whether it's supported depends on the // implementation of a specific store. Hints *types.Any `protobuf:"bytes,9,opt,name=hints,proto3" json:"hints,omitempty"` + // If streaming_chunks_batch_size=0, the response must only contain one 'series' at a time + // with the series labels and chunks data sent together. + // If streaming_chunks_batch_size > 0 + // - The store may choose to send the streaming_series/streaming_chunks OR behave as + // if streaming_chunks_batch_size=0 if it does not support streaming series. + // - The store must not send a mix of 'series' and streaming_series/streaming_chunks for a single request. + // - If the store chooses to send streaming series, all the streaming_series must be sent before + // sending any streaming_chunks, with the last streaming_series response containing is_end_of_series_stream=true. + // The order of series in both streaming_series/streaming_chunks must match and the size of the batch must not + // cross streaming_chunks_batch_size, although it can be lower than that. // It is 100 so that we have an option to bring back compatibility with Thanos' storage API. StreamingChunksBatchSize uint64 `protobuf:"varint,100,opt,name=streaming_chunks_batch_size,json=streamingChunksBatchSize,proto3" json:"streaming_chunks_batch_size,omitempty"` } From fa463f3d419fa9c1770cb6fb1ad61c2153c64fbc Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 21 Jun 2023 17:54:53 +0530 Subject: [PATCH 49/75] Optimise streaming of chunks Signed-off-by: Ganesh Vernekar --- .../configuration-parameters/index.md | 18 +- pkg/storegateway/bucket.go | 171 ++++++++++-------- pkg/storegateway/series_chunks.go | 4 +- 3 files changed, 111 insertions(+), 82 deletions(-) diff --git a/docs/sources/mimir/references/configuration-parameters/index.md b/docs/sources/mimir/references/configuration-parameters/index.md index b896cb977d2..c51eaa3102c 100644 --- a/docs/sources/mimir/references/configuration-parameters/index.md +++ b/docs/sources/mimir/references/configuration-parameters/index.md @@ -1077,22 +1077,22 @@ store_gateway_client: # (experimental) Request ingesters stream chunks. Ingesters will only respond # with a stream of chunks if the target ingester supports this, and this # preference will be ignored by ingesters that do not support this. -# CLI flag: -querier.prefer-streaming-chunks -[prefer_streaming_chunks: | default = false] +# CLI flag: -querier.prefer-streaming-chunks-from-ingesters +[prefer_streaming_chunks_from_ingesters: | default = false] -# (experimental) Request storegateway stream chunks. Storegateway will only -# respond with a stream of chunks if the target storegateway supports this, and -# this preference will be ignored by storegateway that do not support this. -# CLI flag: -querier.prefer-streaming-chunks-store-gateway -[prefer_streaming_chunks_store_gateway: | default = false] +# (experimental) Request store-gateways stream chunks. Store-gateways will only +# respond with a stream of chunks if the target store-gateway supports this, and +# this preference will be ignored by store-gateways that do not support this. +# CLI flag: -querier.prefer-streaming-chunks-from-store-gateways +[prefer_streaming_chunks_from_store_gateways: | default = false] # (experimental) Number of series to buffer per ingester when streaming chunks # from ingesters. # CLI flag: -querier.streaming-chunks-per-ingester-buffer-size [streaming_chunks_per_ingester_series_buffer_size: | default = 256] -# (experimental) Number of series to buffer per storegateway when streaming -# chunks from storegateway. +# (experimental) Number of series to buffer per store-gateway when streaming +# chunks from store-gateways. # CLI flag: -querier.streaming-chunks-per-store-gateway-buffer-size [streaming_chunks_per_store_gateway_series_buffer_size: | default = 256] diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 4822e756f8e..bb5e141ab6b 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -638,7 +638,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie reusePostings = make([][]storage.SeriesRef, len(blocks)) reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) - seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + seriesSet, _, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err } @@ -661,13 +661,18 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie req.SkipChunks = false } - seriesSet, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + seriesSet, seriesChunkIt, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err } + var numSeries, numChunks int start := time.Now() - numSeries, numChunks, err := s.sendSeriesChunks(req, srv, seriesSet, stats) + if req.StreamingChunksBatchSize > 0 { + numSeries, numChunks, err = s.sendStreamingChunks(req, srv, seriesChunkIt, stats) + } else { + numSeries, numChunks, err = s.sendSeriesChunks(req, srv, seriesSet, stats) + } if err != nil { return } @@ -765,17 +770,16 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( return numSeries, err } -func (s *BucketStore) sendSeriesChunks( +func (s *BucketStore) sendStreamingChunks( req *storepb.SeriesRequest, srv storepb.Store_SeriesServer, - seriesSet storepb.SeriesSet, + it seriesChunksSetIterator, stats *safeQueryStats, ) (seriesCount, chunksCount int, err error) { var ( - encodeDuration = time.Duration(0) - sendDuration = time.Duration(0) - iterationBegin = time.Now() - streamingChunks = req.StreamingChunksBatchSize > 0 + encodeDuration = time.Duration(0) + sendDuration = time.Duration(0) + iterationBegin = time.Now() ) defer stats.update(func(stats *queryStats) { @@ -794,57 +798,95 @@ func (s *BucketStore) sendSeriesChunks( var ( batchSizeBytes int - chunksBuffer []*storepb.StreamingChunks + chunksBuffer = make([]*storepb.StreamingChunks, req.StreamingChunksBatchSize) ) - if streamingChunks { - chunksBuffer = make([]*storepb.StreamingChunks, req.StreamingChunksBatchSize) - for i := range chunksBuffer { - chunksBuffer[i] = &storepb.StreamingChunks{} - } - } - chunksBatch := &storepb.StreamingChunksBatch{ - Series: chunksBuffer[:0], - } - for seriesSet.Next() { - // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle - // because the subsequent call to seriesSet.Next() may release it. But it is safe to hold - // onto lset because the labels are not released. - lset, chks := seriesSet.At() - seriesCount++ - var response *storepb.SeriesResponse - if streamingChunks { - // We only need to stream chunks here because the series labels have already been sent. + for i := range chunksBuffer { + chunksBuffer[i] = &storepb.StreamingChunks{} + } + chunksBatch := &storepb.StreamingChunksBatch{Series: chunksBuffer[:0]} + for it.Next() { + set := it.At() + for _, sc := range set.series { + seriesCount++ chunksBatch.Series = chunksBatch.Series[:len(chunksBatch.Series)+1] lastSeries := chunksBatch.Series[len(chunksBatch.Series)-1] - lastSeries.Chunks = append(lastSeries.Chunks[:0], chks...) + lastSeries.Chunks = sc.chks lastSeries.SeriesIndex = uint64(seriesCount - 1) - // Copy the chunk bytes to avoid race. - for i := range lastSeries.Chunks { - raw := lastSeries.Chunks[i].Raw - if raw == nil { - continue - } - newChk := storepb.Chunk{ - Type: raw.Type, - Data: append(make([]byte, 0, len(raw.Data)), raw.Data...), - } - lastSeries.Chunks[i].Raw = &newChk - } batchSizeBytes += lastSeries.Size() + + chunksCount += len(sc.chks) + s.metrics.chunkSizeBytes.Observe(float64(chunksSize(sc.chks))) + // We are not strictly required to be under targetQueryStreamBatchMessageSize. // The aim is to not hit gRPC and TCP limits, hence some overage is ok. - if (batchSizeBytes > 0 && batchSizeBytes > targetQueryStreamBatchMessageSize) || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { - response = storepb.NewStreamingChunksResponse(chunksBatch) + if batchSizeBytes > targetQueryStreamBatchMessageSize || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { + err := s.sendMessage("streaming chunks", srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) + if err != nil { + return 0, 0, err + } + chunksBatch.Series = chunksBatch.Series[:0] + batchSizeBytes = 0 } - } else { - var series storepb.Series - if !req.SkipChunks { - series.Chunks = chks + } + + if len(chunksBatch.Series) > 0 { + // Still some chunks left to send before we release the batch. + err := s.sendMessage("streaming chunks", srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) + if err != nil { + return 0, 0, err } - series.Labels = mimirpb.FromLabelsToLabelAdapters(lset) + chunksBatch.Series = chunksBatch.Series[:0] + batchSizeBytes = 0 + } + + set.release() + } + + if it.Err() != nil { + return 0, 0, it.Err() + } + + return seriesCount, chunksCount, it.Err() +} + +func (s *BucketStore) sendSeriesChunks( + req *storepb.SeriesRequest, + srv storepb.Store_SeriesServer, + seriesSet storepb.SeriesSet, + stats *safeQueryStats, +) (seriesCount, chunksCount int, err error) { + var ( + encodeDuration = time.Duration(0) + sendDuration = time.Duration(0) + iterationBegin = time.Now() + ) + + defer stats.update(func(stats *queryStats) { + stats.mergedSeriesCount += seriesCount + stats.mergedChunksCount += chunksCount - response = storepb.NewSeriesResponse(&series) + // The time spent iterating over the series set is the + // actual time spent fetching series and chunks, encoding and sending them to the client. + // We split the timings to have a better view over how time is spent. + stats.streamingSeriesFetchSeriesAndChunksDuration += stats.streamingSeriesWaitBatchLoadedDuration + stats.streamingSeriesEncodeResponseDuration += encodeDuration + stats.streamingSeriesSendResponseDuration += sendDuration + stats.streamingSeriesOtherDuration += time.Duration(util_math.Max(0, int64(time.Since(iterationBegin)- + stats.streamingSeriesFetchSeriesAndChunksDuration-encodeDuration-sendDuration))) + }) + + for seriesSet.Next() { + seriesCount++ + // IMPORTANT: do not retain the memory returned by seriesSet.At() beyond this loop cycle + // because the subsequent call to seriesSet.Next() may release it. But it is safe to hold + // onto lset because the labels are not released. + lset, chks := seriesSet.At() + series := storepb.Series{ + Labels: mimirpb.FromLabelsToLabelAdapters(lset), + } + if !req.SkipChunks { + series.Chunks = chks } if !req.SkipChunks { @@ -852,30 +894,15 @@ func (s *BucketStore) sendSeriesChunks( s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) } - if response != nil { - err := s.sendMessage("series", srv, response, &encodeDuration, &sendDuration) - if err != nil { - return 0, 0, err - } - - if streamingChunks { - chunksBatch.Series = chunksBatch.Series[:0] - batchSizeBytes = 0 - } + err := s.sendMessage("series", srv, storepb.NewSeriesResponse(&series), &encodeDuration, &sendDuration) + if err != nil { + return 0, 0, err } } if seriesSet.Err() != nil { return 0, 0, errors.Wrap(seriesSet.Err(), "expand series set") } - if streamingChunks && len(chunksBatch.Series) > 0 { - // Still some chunks left to send. - err := s.sendMessage("series", srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) - if err != nil { - return 0, 0, err - } - } - return seriesCount, chunksCount, nil } @@ -936,7 +963,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( stats *safeQueryStats, reusePostings [][]storage.SeriesRef, // Used if not empty. reusePendingMatchers [][]*labels.Matcher, // Used if not empty. -) (storepb.SeriesSet, *hintspb.SeriesResponseHints, error) { +) (storepb.SeriesSet, seriesChunksSetIterator, *hintspb.SeriesResponseHints, error) { var ( resHints = &hintspb.SeriesResponseHints{} mtx = sync.Mutex{} @@ -1008,7 +1035,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( err := g.Wait() if err != nil { - return nil, nil, err + return nil, nil, nil, err } stats.update(func(stats *queryStats) { @@ -1023,16 +1050,18 @@ func (s *BucketStore) streamingSeriesSetForBlocks( mergedIterator = newLimitingSeriesChunkRefsSetIterator(mergedIterator, chunksLimiter, seriesLimiter) var set storepb.SeriesSet + var scsi seriesChunksSetIterator if !req.SkipChunks { var cache chunkscache.Cache if s.fineGrainedChunksCachingEnabled { cache = s.chunksCache } - set = newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, mergedIterator, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) + scsi = newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, mergedIterator, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) + set = newSeriesChunksSeriesSet(scsi) } else { set = newSeriesSetWithoutChunks(ctx, mergedIterator, stats) } - return set, resHints, nil + return set, scsi, resHints, nil } func (s *BucketStore) recordSeriesCallResult(safeStats *safeQueryStats) { diff --git a/pkg/storegateway/series_chunks.go b/pkg/storegateway/series_chunks.go index f578666a4b2..737db07ebb1 100644 --- a/pkg/storegateway/series_chunks.go +++ b/pkg/storegateway/series_chunks.go @@ -192,11 +192,11 @@ func newSeriesSetWithChunks( refsIteratorBatchSize int, stats *safeQueryStats, minT, maxT int64, -) storepb.SeriesSet { +) seriesChunksSetIterator { var iterator seriesChunksSetIterator iterator = newLoadingSeriesChunksSetIterator(ctx, logger, userID, cache, chunkReaders, refsIterator, refsIteratorBatchSize, stats, minT, maxT) iterator = newPreloadingAndStatsTrackingSetIterator[seriesChunksSet](ctx, 1, iterator, stats) - return newSeriesChunksSeriesSet(iterator) + return iterator } // Next advances to the next item. Once the underlying seriesChunksSet has been fully consumed From 813afcd6de25b91f3de68b9d6dcb471c20de8a37 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 21 Jun 2023 18:23:57 +0530 Subject: [PATCH 50/75] Fix tests Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index e50bcd17385..e5368af6c91 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -170,8 +170,8 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream "-store-gateway.tenant-shard-size": fmt.Sprintf("%d", testCfg.tenantShardSize), "-query-frontend.query-stats-enabled": "true", "-query-frontend.parallelize-shardable-queries": strconv.FormatBool(testCfg.queryShardingEnabled), - "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), - "-querier.prefer-streaming-chunks-store-gateway": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-from-ingesters": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-from-store-gateways": strconv.FormatBool(streamingEnabled), }) // Start store-gateways. @@ -908,13 +908,13 @@ func TestQueryLimitsWithBlocksStorageRunningInMicroServices(t *testing.T) { // Configure the blocks storage to frequently compact TSDB head // and ship blocks to the storage. flags := mergeFlags(BlocksStorageFlags(), BlocksStorageS3Flags(), map[string]string{ - "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), - "-blocks-storage.tsdb.ship-interval": "1s", - "-blocks-storage.bucket-store.sync-interval": "1s", - "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), - "-querier.max-fetched-series-per-query": "3", - "-querier.prefer-streaming-chunks": strconv.FormatBool(streamingEnabled), - "-querier.prefer-streaming-chunks-store-gateway": strconv.FormatBool(streamingEnabled), + "-blocks-storage.tsdb.block-ranges-period": blockRangePeriod.String(), + "-blocks-storage.tsdb.ship-interval": "1s", + "-blocks-storage.bucket-store.sync-interval": "1s", + "-blocks-storage.tsdb.retention-period": ((blockRangePeriod * 2) - 1).String(), + "-querier.max-fetched-series-per-query": "3", + "-querier.prefer-streaming-chunks-from-ingesters": strconv.FormatBool(streamingEnabled), + "-querier.prefer-streaming-chunks-from-store-gateways": strconv.FormatBool(streamingEnabled), }) // Start dependencies. From a71c6a04eda384f6ffdc9e3bbe2c5b0aaeb142ff Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 22 Jun 2023 13:00:26 +0530 Subject: [PATCH 51/75] Fix limits calculation in storegateway Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 8 ++-- pkg/storegateway/bucket_e2e_test.go | 73 ++++++++++++++++------------- 2 files changed, 46 insertions(+), 35 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index bb5e141ab6b..ca354525c56 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -587,8 +587,6 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie ctx = srv.Context() stats = newSafeQueryStats() reqBlockMatchers []*labels.Matcher - chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) - seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) ) defer s.recordSeriesCallResult(stats) @@ -637,6 +635,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie reusePostings = make([][]storage.SeriesRef, len(blocks)) reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) + chunksLimiter := s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) + seriesLimiter := s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) seriesSet, _, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { @@ -661,6 +661,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie req.SkipChunks = false } + chunksLimiter := s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) + seriesLimiter := s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) seriesSet, seriesChunkIt, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) if err != nil { return err @@ -961,7 +963,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, - reusePostings [][]storage.SeriesRef, // Used if not empty. + reusePostings [][]storage.SeriesRef, // Used if not empty. reusePendingMatchers [][]*labels.Matcher, // Used if not empty. ) (storepb.SeriesSet, seriesChunksSetIterator, *hintspb.SeriesResponseHints, error) { var ( diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index c298434214b..662a722276e 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -619,8 +619,8 @@ func TestBucketStore_ManyParts_e2e(t *testing.T) { } func TestBucketStore_Series_ChunksLimiter_e2e(t *testing.T) { - // The query will fetch 2 series from 6 blocks, so we do expect to hit a total of 12 chunks. - expectedChunks := uint64(2 * 6) + // The query will fetch 4 series from 3 blocks each, so we do expect to hit a total of 12 chunks. + expectedChunks := uint64(4 * 3) cases := map[string]struct { maxChunksLimit uint64 @@ -631,6 +631,10 @@ func TestBucketStore_Series_ChunksLimiter_e2e(t *testing.T) { "should succeed if the max chunks limit is not exceeded": { maxChunksLimit: expectedChunks, }, + "should succeed if the max series limit is not exceeded": { + // The streaming case should not count the series twice. + maxSeriesLimit: 4, + }, "should fail if the max chunks limit is exceeded - 422": { maxChunksLimit: expectedChunks - 1, expectedErr: "exceeded chunks limit", @@ -646,36 +650,41 @@ func TestBucketStore_Series_ChunksLimiter_e2e(t *testing.T) { for testName, testData := range cases { t.Run(testName, func(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - bkt := objstore.NewInMemBucket() - - prepConfig := defaultPrepareStoreConfig(t) - prepConfig.chunksLimiterFactory = newStaticChunksLimiterFactory(testData.maxChunksLimit) - prepConfig.seriesLimiterFactory = newStaticSeriesLimiterFactory(testData.maxSeriesLimit) - - s := prepareStoreWithTestBlocks(t, bkt, prepConfig) - assert.NoError(t, s.store.SyncBlocks(ctx)) - - req := &storepb.SeriesRequest{ - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "1"}, - }, - MinTime: timestamp.FromTime(minTime), - MaxTime: timestamp.FromTime(maxTime), - } - - srv := newBucketStoreTestServer(t, s.store) - _, _, _, err := srv.Series(context.Background(), req) - - if testData.expectedErr == "" { - assert.NoError(t, err) - } else { - assert.Error(t, err) - assert.True(t, strings.Contains(err.Error(), testData.expectedErr)) - status, ok := status.FromError(err) - assert.Equal(t, true, ok) - assert.Equal(t, testData.expectedCode, status.Code()) + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + bkt := objstore.NewInMemBucket() + + prepConfig := defaultPrepareStoreConfig(t) + prepConfig.chunksLimiterFactory = newStaticChunksLimiterFactory(testData.maxChunksLimit) + prepConfig.seriesLimiterFactory = newStaticSeriesLimiterFactory(testData.maxSeriesLimit) + + s := prepareStoreWithTestBlocks(t, bkt, prepConfig) + assert.NoError(t, s.store.SyncBlocks(ctx)) + + req := &storepb.SeriesRequest{ + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "a", Value: "1"}, + }, + MinTime: timestamp.FromTime(minTime), + MaxTime: timestamp.FromTime(maxTime), + StreamingChunksBatchSize: uint64(streamingBatchSize), + } + + srv := newBucketStoreTestServer(t, s.store) + _, _, _, err := srv.Series(context.Background(), req) + + if testData.expectedErr == "" { + assert.NoError(t, err) + } else { + assert.Error(t, err) + assert.True(t, strings.Contains(err.Error(), testData.expectedErr)) + status, ok := status.FromError(err) + assert.Equal(t, true, ok) + assert.Equal(t, testData.expectedCode, status.Code()) + } + }) } }) } From b78935eb3effac8588d5c389bddede4daf4a6b38 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 22 Jun 2023 14:02:10 +0530 Subject: [PATCH 52/75] Increase test coverage Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_e2e_test.go | 65 ++++---- pkg/storegateway/bucket_test.go | 240 ++++++++++++++++------------ pkg/storegateway/gateway_test.go | 220 +++++++++++++------------ 3 files changed, 288 insertions(+), 237 deletions(-) diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 662a722276e..37aa4985c07 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -906,41 +906,46 @@ func TestBucketStore_LabelValues_e2e(t *testing.T) { } func TestBucketStore_ValueTypes_e2e(t *testing.T) { - foreachStore(t, func(t *testing.T, newSuite suiteFactory) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + foreachStore(t, func(t *testing.T, newSuite suiteFactory) { - s := newSuite() + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() - mint, maxt := s.store.TimeRange() - assert.Equal(t, s.minTime, mint) - assert.Equal(t, s.maxTime, maxt) + s := newSuite() - req := &storepb.SeriesRequest{ - MinTime: mint, - MaxTime: maxt, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: "a", Value: "1|2"}, - }, - SkipChunks: false, - } + mint, maxt := s.store.TimeRange() + assert.Equal(t, s.minTime, mint) + assert.Equal(t, s.maxTime, maxt) + + req := &storepb.SeriesRequest{ + MinTime: mint, + MaxTime: maxt, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "a", Value: "1|2"}, + }, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } - srv := newBucketStoreTestServer(t, s.store) - seriesSet, _, _, err := srv.Series(ctx, req) - require.NoError(t, err) + srv := newBucketStoreTestServer(t, s.store) + seriesSet, _, _, err := srv.Series(ctx, req) + require.NoError(t, err) - counts := map[storepb.Chunk_Encoding]int{} - for _, series := range seriesSet { - for _, chunk := range series.Chunks { - counts[chunk.Raw.Type]++ - } - } - for _, chunkType := range []storepb.Chunk_Encoding{storepb.Chunk_XOR, storepb.Chunk_Histogram, storepb.Chunk_FloatHistogram} { - count, ok := counts[chunkType] - assert.True(t, ok, fmt.Sprintf("value type %s is not present", storepb.Chunk_Encoding_name[int32(chunkType)])) - assert.NotEmpty(t, count) - } - }) + counts := map[storepb.Chunk_Encoding]int{} + for _, series := range seriesSet { + for _, chunk := range series.Chunks { + counts[chunk.Raw.Type]++ + } + } + for _, chunkType := range []storepb.Chunk_Encoding{storepb.Chunk_XOR, storepb.Chunk_Histogram, storepb.Chunk_FloatHistogram} { + count, ok := counts[chunkType] + assert.True(t, ok, fmt.Sprintf("value type %s is not present", storepb.Chunk_Encoding_name[int32(chunkType)])) + assert.NotEmpty(t, count) + } + }) + }) + } } func emptyToNil(values []string) []string { diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 1acc2f694c7..25fa94f7371 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1565,7 +1565,8 @@ func TestBucketStore_Series_Concurrency(t *testing.T) { Hints: marshalledHints, } - runRequest := func(t *testing.T, store *BucketStore) { + runRequest := func(t *testing.T, store *BucketStore, streamBatchSize int) { + req.StreamingChunksBatchSize = uint64(streamBatchSize) srv := newBucketStoreTestServer(t, store) seriesSet, warnings, _, err := srv.Series(context.Background(), req) require.NoError(t, err) @@ -1582,57 +1583,61 @@ func TestBucketStore_Series_Concurrency(t *testing.T) { // Run the test with different batch sizes. for _, batchSize := range []int{len(expectedSeries) / 100, len(expectedSeries) * 2} { t.Run(fmt.Sprintf("batch size: %d", batchSize), func(t *testing.T) { - // Reset the memory pool tracker. - seriesChunkRefsSetPool.(*pool.TrackedPool).Reset() + for _, streamBatchSize := range []int{0, 10} { + t.Run(fmt.Sprintf("streamBatchSize:%d", streamBatchSize), func(t *testing.T) { + // Reset the memory pool tracker. + seriesChunkRefsSetPool.(*pool.TrackedPool).Reset() - metaFetcher, err := block.NewMetaFetcher(logger, 1, instrumentedBucket, "", nil, nil) - assert.NoError(t, err) + metaFetcher, err := block.NewMetaFetcher(logger, 1, instrumentedBucket, "", nil, nil) + assert.NoError(t, err) - // Create the bucket store. - store, err := NewBucketStore( - "test-user", - instrumentedBucket, - metaFetcher, - tmpDir, - batchSize, - 1, - selectAllStrategy{}, - newStaticChunksLimiterFactory(0), - newStaticSeriesLimiterFactory(0), - newGapBasedPartitioners(mimir_tsdb.DefaultPartitionerMaxGapSize, nil), - 1, - mimir_tsdb.DefaultPostingOffsetInMemorySampling, - indexheader.Config{}, - false, // Lazy index-header loading disabled. - 0, - hashcache.NewSeriesHashCache(1024*1024), - NewBucketStoreMetrics(nil), - WithLogger(logger), - ) - require.NoError(t, err) - require.NoError(t, store.SyncBlocks(ctx)) + // Create the bucket store. + store, err := NewBucketStore( + "test-user", + instrumentedBucket, + metaFetcher, + tmpDir, + batchSize, + 1, + selectAllStrategy{}, + newStaticChunksLimiterFactory(0), + newStaticSeriesLimiterFactory(0), + newGapBasedPartitioners(mimir_tsdb.DefaultPartitionerMaxGapSize, nil), + 1, + mimir_tsdb.DefaultPostingOffsetInMemorySampling, + indexheader.Config{}, + false, // Lazy index-header loading disabled. + 0, + hashcache.NewSeriesHashCache(1024*1024), + NewBucketStoreMetrics(nil), + WithLogger(logger), + ) + require.NoError(t, err) + require.NoError(t, store.SyncBlocks(ctx)) - // Run workers. - wg := sync.WaitGroup{} - wg.Add(numWorkers) + // Run workers. + wg := sync.WaitGroup{} + wg.Add(numWorkers) - for c := 0; c < numWorkers; c++ { - go func() { - defer wg.Done() + for c := 0; c < numWorkers; c++ { + go func() { + defer wg.Done() - for r := 0; r < numRequestsPerWorker; r++ { - runRequest(t, store) + for r := 0; r < numRequestsPerWorker; r++ { + runRequest(t, store, streamBatchSize) + } + }() } - }() - } - // Wait until all workers have done. - wg.Wait() + // Wait until all workers have done. + wg.Wait() - // Ensure the seriesChunkRefsSet memory pool has been used and all slices pulled from - // pool have put back. - assert.Greater(t, seriesChunkRefsSetPool.(*pool.TrackedPool).Gets.Load(), int64(0)) - assert.Equal(t, int64(0), seriesChunkRefsSetPool.(*pool.TrackedPool).Balance.Load()) + // Ensure the seriesChunkRefsSet memory pool has been used and all slices pulled from + // pool have put back. + assert.Greater(t, seriesChunkRefsSetPool.(*pool.TrackedPool).Gets.Load(), int64(0)) + assert.Equal(t, int64(0), seriesChunkRefsSetPool.(*pool.TrackedPool).Balance.Load()) + }) + } }) } } @@ -1998,6 +2003,13 @@ func TestBucketStore_Series_CanceledRequest(t *testing.T) { s, ok := status.FromError(err) assert.True(t, ok) assert.Equal(t, codes.Canceled, s.Code()) + + req.StreamingChunksBatchSize = 10 + _, _, _, err = srv.Series(ctx, req) + assert.Error(t, err) + s, ok = status.FromError(err) + assert.True(t, ok) + assert.Equal(t, codes.Canceled, s.Code()) } func TestBucketStore_Series_InvalidRequest(t *testing.T) { @@ -2189,28 +2201,33 @@ func testBucketStoreSeriesBlockWithMultipleChunks( for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - req := &storepb.SeriesRequest{ - MinTime: testData.reqMinTime, - MaxTime: testData.reqMaxTime, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "test"}, - }, - } + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + req := &storepb.SeriesRequest{ + MinTime: testData.reqMinTime, + MaxTime: testData.reqMaxTime, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: "__name__", Value: "test"}, + }, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } - seriesSet, _, _, err := srv.Series(context.Background(), req) - assert.NoError(t, err) - assert.True(t, len(seriesSet) == 1) + seriesSet, _, _, err := srv.Series(context.Background(), req) + assert.NoError(t, err) + assert.True(t, len(seriesSet) == 1) - // Count the number of samples in the returned chunks. - numSamples := 0 - for _, rawChunk := range seriesSet[0].Chunks { - decodedChunk, err := chunkenc.FromData(encoding, rawChunk.Raw.Data) - assert.NoError(t, err) + // Count the number of samples in the returned chunks. + numSamples := 0 + for _, rawChunk := range seriesSet[0].Chunks { + decodedChunk, err := chunkenc.FromData(encoding, rawChunk.Raw.Data) + assert.NoError(t, err) - numSamples += decodedChunk.NumSamples() - } + numSamples += decodedChunk.NumSamples() + } - assert.True(t, testData.expectedSamples == numSamples, "expected: %d, actual: %d", testData.expectedSamples, numSamples) + assert.True(t, testData.expectedSamples == numSamples, "expected: %d, actual: %d", testData.expectedSamples, numSamples) + }) + } }) } } @@ -2312,21 +2329,26 @@ func TestBucketStore_Series_Limits(t *testing.T) { assert.NoError(t, err) assert.NoError(t, store.SyncBlocks(ctx)) - req := &storepb.SeriesRequest{ - MinTime: minTime, - MaxTime: maxTime, - Matchers: testData.reqMatchers, - } - srv := newBucketStoreTestServer(t, store) - seriesSet, _, _, err := srv.Series(ctx, req) + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize: %d", streamingBatchSize), func(t *testing.T) { + req := &storepb.SeriesRequest{ + MinTime: minTime, + MaxTime: maxTime, + Matchers: testData.reqMatchers, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } - if testData.expectedErr != "" { - require.Error(t, err) - assert.ErrorContains(t, err, testData.expectedErr) - } else { - require.NoError(t, err) - assert.Len(t, seriesSet, testData.expectedSeries) + seriesSet, _, _, err := srv.Series(ctx, req) + + if testData.expectedErr != "" { + require.Error(t, err) + assert.ErrorContains(t, err, testData.expectedErr) + } else { + require.NoError(t, err) + assert.Len(t, seriesSet, testData.expectedSeries) + } + }) } }) } @@ -2801,41 +2823,51 @@ func runTestServerSeries(t test.TB, store *BucketStore, cases ...*seriesCase) { t.Run(c.Name, func(t test.TB) { srv := newBucketStoreTestServer(t, store) - t.ResetTimer() - for i := 0; i < t.N(); i++ { - seriesSet, warnings, hints, err := srv.Series(context.Background(), c.Req) - require.NoError(t, err) - require.Equal(t, len(c.ExpectedWarnings), len(warnings), "%v", warnings) - require.Equal(t, len(c.ExpectedSeries), len(seriesSet), "Matchers: %v Min time: %d Max time: %d", c.Req.Matchers, c.Req.MinTime, c.Req.MaxTime) - - if !t.IsBenchmark() { - if len(c.ExpectedSeries) == 1 { - // For bucketStoreAPI chunks are not sorted within response. TODO: Investigate: Is this fine? - sort.Slice(seriesSet[0].Chunks, func(i, j int) bool { - return seriesSet[0].Chunks[i].MinTime < seriesSet[0].Chunks[j].MinTime - }) - } - - // Huge responses can produce unreadable diffs - make it more human readable. - if len(c.ExpectedSeries) > 4 { - for j := range c.ExpectedSeries { - assert.Equal(t, c.ExpectedSeries[j].Labels, seriesSet[j].Labels, "%v series chunks mismatch", j) + streamingBatchSizes := []int{0} + if !t.IsBenchmark() { + streamingBatchSizes = []int{0, 1, 5} + } + for _, streamingBatchSize := range streamingBatchSizes { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t test.TB) { + c.Req.StreamingChunksBatchSize = uint64(streamingBatchSize) + t.ResetTimer() + for i := 0; i < t.N(); i++ { + seriesSet, warnings, hints, err := srv.Series(context.Background(), c.Req) + require.NoError(t, err) + require.Equal(t, len(c.ExpectedWarnings), len(warnings), "%v", warnings) + require.Equal(t, len(c.ExpectedSeries), len(seriesSet), "Matchers: %v Min time: %d Max time: %d", c.Req.Matchers, c.Req.MinTime, c.Req.MaxTime) + + if !t.IsBenchmark() { + if len(c.ExpectedSeries) == 1 { + // For bucketStoreAPI chunks are not sorted within response. TODO: Investigate: Is this fine? + sort.Slice(seriesSet[0].Chunks, func(i, j int) bool { + return seriesSet[0].Chunks[i].MinTime < seriesSet[0].Chunks[j].MinTime + }) + } - // Check chunks when it is not a skip chunk query - if !c.Req.SkipChunks { - if len(c.ExpectedSeries[j].Chunks) > 20 { - assert.Equal(t, len(c.ExpectedSeries[j].Chunks), len(seriesSet[j].Chunks), "%v series chunks number mismatch", j) + // Huge responses can produce unreadable diffs - make it more human readable. + if len(c.ExpectedSeries) > 4 { + for j := range c.ExpectedSeries { + assert.Equal(t, c.ExpectedSeries[j].Labels, seriesSet[j].Labels, "%v series chunks mismatch", j) + + // Check chunks when it is not a skip chunk query + if !c.Req.SkipChunks { + if len(c.ExpectedSeries[j].Chunks) > 20 { + assert.Equal(t, len(c.ExpectedSeries[j].Chunks), len(seriesSet[j].Chunks), "%v series chunks number mismatch", j) + } + assert.Equal(t, c.ExpectedSeries[j].Chunks, seriesSet[j].Chunks, "%v series chunks mismatch", j) + } } - assert.Equal(t, c.ExpectedSeries[j].Chunks, seriesSet[j].Chunks, "%v series chunks mismatch", j) + } else { + assert.Equal(t, c.ExpectedSeries, seriesSet) } + + assert.Equal(t, c.ExpectedHints, hints) } - } else { - assert.Equal(t, c.ExpectedSeries, seriesSet) } - - assert.Equal(t, c.ExpectedHints, hints) - } + }) } + }) } } diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 1511af6a57d..1866ba50453 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1050,34 +1050,38 @@ func TestStoreGateway_SeriesQueryingShouldRemoveExternalLabels(t *testing.T) { srv := newStoreGatewayTestServer(t, g) - // Query back all series. - req := &storepb.SeriesRequest{ - MinTime: minT, - MaxTime: maxT, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: "__name__", Value: ".*"}, - }, - } - - seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) - require.NoError(t, err) - assert.Empty(t, warnings) - assert.Len(t, seriesSet, numSeries) - - for seriesID := 0; seriesID < numSeries; seriesID++ { - actual := seriesSet[seriesID] - - // Ensure Mimir external labels have been removed. - assert.Equal(t, []mimirpb.LabelAdapter{{Name: "series_id", Value: strconv.Itoa(seriesID)}}, actual.Labels) - - // Ensure samples have been correctly queried. The store-gateway doesn't deduplicate chunks, - // so the same sample is returned twice because in this test we query two identical blocks. - samples, err := readSamplesFromChunks(actual.Chunks) - require.NoError(t, err) - assert.Equal(t, []sample{ - {t: minT + (step * int64(seriesID)), v: float64(seriesID)}, - {t: minT + (step * int64(seriesID)), v: float64(seriesID)}, - }, samples) + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + // Query back all series. + req := &storepb.SeriesRequest{ + MinTime: minT, + MaxTime: maxT, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "__name__", Value: ".*"}, + }, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } + seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) + require.NoError(t, err) + assert.Empty(t, warnings) + assert.Len(t, seriesSet, numSeries) + + for seriesID := 0; seriesID < numSeries; seriesID++ { + actual := seriesSet[seriesID] + + // Ensure Mimir external labels have been removed. + assert.Equal(t, []mimirpb.LabelAdapter{{Name: "series_id", Value: strconv.Itoa(seriesID)}}, actual.Labels) + + // Ensure samples have been correctly queried. The store-gateway doesn't deduplicate chunks, + // so the same sample is returned twice because in this test we query two identical blocks. + samples, err := readSamplesFromChunks(actual.Chunks) + require.NoError(t, err) + assert.Equal(t, []sample{ + {t: minT + (step * int64(seriesID)), v: float64(seriesID)}, + {t: minT + (step * int64(seriesID)), v: float64(seriesID)}, + }, samples) + } + }) } }) } @@ -1124,58 +1128,63 @@ func TestStoreGateway_Series_QuerySharding(t *testing.T) { }, } - // Prepare the storage dir. - bucketClient, storageDir := mimir_testutil.PrepareFilesystemBucket(t) - - // Generate a TSDB block in the storage dir, containing the fixture series. - mockTSDBWithGenerator(t, path.Join(storageDir, userID), func() func() (bool, labels.Labels, int64, float64) { - nextID := 0 - return func() (bool, labels.Labels, int64, float64) { - if nextID >= len(series) { - return false, labels.Labels{}, 0, 0 - } + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + // Prepare the storage dir. + bucketClient, storageDir := mimir_testutil.PrepareFilesystemBucket(t) + + // Generate a TSDB block in the storage dir, containing the fixture series. + mockTSDBWithGenerator(t, path.Join(storageDir, userID), func() func() (bool, labels.Labels, int64, float64) { + nextID := 0 + return func() (bool, labels.Labels, int64, float64) { + if nextID >= len(series) { + return false, labels.Labels{}, 0, 0 + } - nextSeries := series[nextID] - nextID++ + nextSeries := series[nextID] + nextID++ - return true, nextSeries, util.TimeToMillis(time.Now().Add(-time.Duration(nextID) * time.Second)), float64(nextID) - } - }()) + return true, nextSeries, util.TimeToMillis(time.Now().Add(-time.Duration(nextID) * time.Second)), float64(nextID) + } + }()) - createBucketIndex(t, bucketClient, userID) + createBucketIndex(t, bucketClient, userID) - // Create a store-gateway. - gatewayCfg := mockGatewayConfig() - storageCfg := mockStorageConfig(t) - storageCfg.BucketStore.BucketIndex.DeprecatedEnabled = true + // Create a store-gateway. + gatewayCfg := mockGatewayConfig() + storageCfg := mockStorageConfig(t) + storageCfg.BucketStore.BucketIndex.DeprecatedEnabled = true - ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) - t.Cleanup(func() { assert.NoError(t, closer.Close()) }) + ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), log.NewNopLogger(), nil, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(ctx, g)) - t.Cleanup(func() { assert.NoError(t, services.StopAndAwaitTerminated(ctx, g)) }) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, defaultLimitsOverrides(t), log.NewNopLogger(), nil, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(ctx, g)) + t.Cleanup(func() { assert.NoError(t, services.StopAndAwaitTerminated(ctx, g)) }) - srv := newStoreGatewayTestServer(t, g) + srv := newStoreGatewayTestServer(t, g) - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - req := &storepb.SeriesRequest{ - MinTime: math.MinInt64, - MaxTime: math.MaxInt64, - Matchers: testData.matchers, - } + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + req := &storepb.SeriesRequest{ + MinTime: math.MinInt64, + MaxTime: math.MaxInt64, + Matchers: testData.matchers, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } - seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) - require.NoError(t, err) - assert.Empty(t, warnings) + seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) + require.NoError(t, err) + assert.Empty(t, warnings) - actualMetrics := make([]string, 0, len(seriesSet)) - for _, s := range seriesSet { - actualMetrics = append(actualMetrics, s.PromLabels().Get(labels.MetricName)) + actualMetrics := make([]string, 0, len(seriesSet)) + for _, s := range seriesSet { + actualMetrics = append(actualMetrics, s.PromLabels().Get(labels.MetricName)) + } + assert.ElementsMatch(t, testData.expectedMetrics, actualMetrics) + }) } - assert.ElementsMatch(t, testData.expectedMetrics, actualMetrics) }) } } @@ -1419,44 +1428,49 @@ func TestStoreGateway_SeriesQueryingShouldEnforceMaxChunksPerQueryLimit(t *testi }, } - for testName, testData := range tests { - t.Run(testName, func(t *testing.T) { - // Customise the limits. - limits := defaultLimitsConfig() - limits.MaxChunksPerQuery = testData.limit - overrides, err := validation.NewOverrides(limits, nil) - require.NoError(t, err) - - // Create a store-gateway used to query back the series from the blocks. - gatewayCfg := mockGatewayConfig() - storageCfg := mockStorageConfig(t) - - ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) - t.Cleanup(func() { assert.NoError(t, closer.Close()) }) + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + // Customise the limits. + limits := defaultLimitsConfig() + limits.MaxChunksPerQuery = testData.limit + overrides, err := validation.NewOverrides(limits, nil) + require.NoError(t, err) - g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, overrides, logger, nil, nil) - require.NoError(t, err) - require.NoError(t, services.StartAndAwaitRunning(ctx, g)) - t.Cleanup(func() { assert.NoError(t, services.StopAndAwaitTerminated(ctx, g)) }) + // Create a store-gateway used to query back the series from the blocks. + gatewayCfg := mockGatewayConfig() + storageCfg := mockStorageConfig(t) - srv := newStoreGatewayTestServer(t, g) + ringStore, closer := consul.NewInMemoryClient(ring.GetCodec(), log.NewNopLogger(), nil) + t.Cleanup(func() { assert.NoError(t, closer.Close()) }) - // Query back all the series (1 chunk per series in this test). - seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) + g, err := newStoreGateway(gatewayCfg, storageCfg, bucketClient, ringStore, overrides, logger, nil, nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(ctx, g)) + t.Cleanup(func() { assert.NoError(t, services.StopAndAwaitTerminated(ctx, g)) }) - if testData.expectedErr != nil { - require.Error(t, err) - assert.IsType(t, testData.expectedErr, err) - s1, ok := status.FromError(errors.Cause(err)) - assert.True(t, ok) - s2, ok := status.FromError(errors.Cause(testData.expectedErr)) - assert.True(t, ok) - assert.True(t, strings.Contains(s1.Message(), s2.Message())) - assert.Equal(t, s1.Code(), s2.Code()) - } else { - require.NoError(t, err) - assert.Empty(t, warnings) - assert.Len(t, seriesSet, chunksQueried) + srv := newStoreGatewayTestServer(t, g) + + // Query back all the series (1 chunk per series in this test). + req.StreamingChunksBatchSize = uint64(streamingBatchSize) + seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) + + if testData.expectedErr != nil { + require.Error(t, err) + assert.IsType(t, testData.expectedErr, err) + s1, ok := status.FromError(errors.Cause(err)) + assert.True(t, ok) + s2, ok := status.FromError(errors.Cause(testData.expectedErr)) + assert.True(t, ok) + assert.True(t, strings.Contains(s1.Message(), s2.Message())) + assert.Equal(t, s1.Code(), s2.Code()) + } else { + require.NoError(t, err) + assert.Empty(t, warnings) + assert.Len(t, seriesSet, chunksQueried) + } + }) } }) } From bbbcd4eaafc5633840facda519b70344bce44b9c Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 16:40:45 +0530 Subject: [PATCH 53/75] Fix tests Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_test.go | 157 ++++++++++++++++---------------- 1 file changed, 80 insertions(+), 77 deletions(-) diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 25fa94f7371..e43c077f9dd 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1409,40 +1409,49 @@ func benchBucketSeries(t test.TB, skipChunk bool, samplesPerSeries, totalSeries ExpectedSeries: series[:seriesCut], }) } - runTestServerSeries(t, st, bCases...) + streamingBatchSizes := []int{0} if !t.IsBenchmark() { - if !skipChunk { - assert.Zero(t, seriesChunksSlicePool.(*pool.TrackedPool).Balance.Load()) - assert.Zero(t, chunksSlicePool.(*pool.TrackedPool).Balance.Load()) + streamingBatchSizes = []int{0, 1, 5} + } + for _, streamingBatchSize := range streamingBatchSizes { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t test.TB) { + runTestServerSeries(t, st, streamingBatchSize, bCases...) - assert.Greater(t, int(seriesChunksSlicePool.(*pool.TrackedPool).Gets.Load()), 0) - assert.Greater(t, int(chunksSlicePool.(*pool.TrackedPool).Gets.Load()), 0) - } + if !t.IsBenchmark() { + if !skipChunk { + assert.Zero(t, seriesChunksSlicePool.(*pool.TrackedPool).Balance.Load()) + assert.Zero(t, chunksSlicePool.(*pool.TrackedPool).Balance.Load()) - for _, b := range st.blocks { - // NOTE(bwplotka): It is 4 x 1.0 for 100mln samples. Kind of make sense: long series. - assert.Equal(t, 0.0, promtest.ToFloat64(b.metrics.seriesRefetches)) - } + assert.Greater(t, int(seriesChunksSlicePool.(*pool.TrackedPool).Gets.Load()), 0) + assert.Greater(t, int(chunksSlicePool.(*pool.TrackedPool).Gets.Load()), 0) + } - // Check exposed metrics. - assertHistograms := map[string]bool{ - "cortex_bucket_store_series_request_stage_duration_seconds": true, - "cortex_bucket_store_series_batch_preloading_load_duration_seconds": st.maxSeriesPerBatch < totalSeries, // Tracked only when a request is split in multiple batches. - "cortex_bucket_store_series_batch_preloading_wait_duration_seconds": st.maxSeriesPerBatch < totalSeries, // Tracked only when a request is split in multiple batches. - "cortex_bucket_store_series_refs_fetch_duration_seconds": true, - } + for _, b := range st.blocks { + // NOTE(bwplotka): It is 4 x 1.0 for 100mln samples. Kind of make sense: long series. + assert.Equal(t, 0.0, promtest.ToFloat64(b.metrics.seriesRefetches)) + } + + // Check exposed metrics. + assertHistograms := map[string]bool{ + "cortex_bucket_store_series_request_stage_duration_seconds": true, + "cortex_bucket_store_series_batch_preloading_load_duration_seconds": st.maxSeriesPerBatch < totalSeries || streamingBatchSize > 0, // Tracked only when a request is split in multiple batches. + "cortex_bucket_store_series_batch_preloading_wait_duration_seconds": st.maxSeriesPerBatch < totalSeries || streamingBatchSize > 0, // Tracked only when a request is split in multiple batches. + "cortex_bucket_store_series_refs_fetch_duration_seconds": true, + } - metrics, err := dskit_metrics.NewMetricFamilyMapFromGatherer(reg) - require.NoError(t, err) + metrics, err := dskit_metrics.NewMetricFamilyMapFromGatherer(reg) + require.NoError(t, err) - for metricName, expected := range assertHistograms { - if count := metrics.SumHistograms(metricName).Count(); expected { - assert.Greater(t, count, uint64(0), "metric name: %s", metricName) - } else { - assert.Equal(t, uint64(0), count, "metric name: %s", metricName) + for metricName, expected := range assertHistograms { + if count := metrics.SumHistograms(metricName).Count(); expected { + assert.Greater(t, count, uint64(0), "metric name: %s", metricName) + } else { + assert.Equal(t, uint64(0), count, "metric name: %s", metricName) + } + } } - } + }) } } @@ -1556,17 +1565,16 @@ func TestBucketStore_Series_Concurrency(t *testing.T) { marshalledHints, err := types.MarshalAny(hints) require.NoError(t, err) - req := &storepb.SeriesRequest{ - MinTime: math.MinInt64, - MaxTime: math.MaxInt64, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_EQ, Name: labels.MetricName, Value: "test_metric"}, - }, - Hints: marshalledHints, - } - runRequest := func(t *testing.T, store *BucketStore, streamBatchSize int) { - req.StreamingChunksBatchSize = uint64(streamBatchSize) + req := &storepb.SeriesRequest{ + MinTime: math.MinInt64, + MaxTime: math.MaxInt64, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_EQ, Name: labels.MetricName, Value: "test_metric"}, + }, + Hints: marshalledHints, + StreamingChunksBatchSize: uint64(streamBatchSize), + } srv := newBucketStoreTestServer(t, store) seriesSet, warnings, _, err := srv.Series(context.Background(), req) require.NoError(t, err) @@ -1884,7 +1892,11 @@ func TestBucketStore_Series_RequestAndResponseHints(t *testing.T) { tb, store, seriesSet1, seriesSet2, block1, block2, cleanup := setupStoreForHintsTest(t, 5000) tb.Cleanup(cleanup) - runTestServerSeries(tb, store, newTestCases(seriesSet1, seriesSet2, block1, block2)...) + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + runTestServerSeries(tb, store, streamingBatchSize, newTestCases(seriesSet1, seriesSet2, block1, block2)...) + }) + } } func TestBucketStore_Series_ErrorUnmarshallingRequestHints(t *testing.T) { @@ -2818,56 +2830,47 @@ type seriesCase struct { } // runTestServerSeries runs tests against given cases. -func runTestServerSeries(t test.TB, store *BucketStore, cases ...*seriesCase) { +func runTestServerSeries(t test.TB, store *BucketStore, streamingBatchSize int, cases ...*seriesCase) { for _, c := range cases { t.Run(c.Name, func(t test.TB) { srv := newBucketStoreTestServer(t, store) - streamingBatchSizes := []int{0} - if !t.IsBenchmark() { - streamingBatchSizes = []int{0, 1, 5} - } - for _, streamingBatchSize := range streamingBatchSizes { - t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t test.TB) { - c.Req.StreamingChunksBatchSize = uint64(streamingBatchSize) - t.ResetTimer() - for i := 0; i < t.N(); i++ { - seriesSet, warnings, hints, err := srv.Series(context.Background(), c.Req) - require.NoError(t, err) - require.Equal(t, len(c.ExpectedWarnings), len(warnings), "%v", warnings) - require.Equal(t, len(c.ExpectedSeries), len(seriesSet), "Matchers: %v Min time: %d Max time: %d", c.Req.Matchers, c.Req.MinTime, c.Req.MaxTime) - - if !t.IsBenchmark() { - if len(c.ExpectedSeries) == 1 { - // For bucketStoreAPI chunks are not sorted within response. TODO: Investigate: Is this fine? - sort.Slice(seriesSet[0].Chunks, func(i, j int) bool { - return seriesSet[0].Chunks[i].MinTime < seriesSet[0].Chunks[j].MinTime - }) - } + c.Req.StreamingChunksBatchSize = uint64(streamingBatchSize) + t.ResetTimer() + for i := 0; i < t.N(); i++ { + seriesSet, warnings, hints, err := srv.Series(context.Background(), c.Req) + require.NoError(t, err) + require.Equal(t, len(c.ExpectedWarnings), len(warnings), "%v", warnings) + require.Equal(t, len(c.ExpectedSeries), len(seriesSet), "Matchers: %v Min time: %d Max time: %d", c.Req.Matchers, c.Req.MinTime, c.Req.MaxTime) + + if !t.IsBenchmark() { + if len(c.ExpectedSeries) == 1 { + // For bucketStoreAPI chunks are not sorted within response. TODO: Investigate: Is this fine? + sort.Slice(seriesSet[0].Chunks, func(i, j int) bool { + return seriesSet[0].Chunks[i].MinTime < seriesSet[0].Chunks[j].MinTime + }) + } + + // Huge responses can produce unreadable diffs - make it more human readable. + if len(c.ExpectedSeries) > 4 { + for j := range c.ExpectedSeries { + assert.Equal(t, c.ExpectedSeries[j].Labels, seriesSet[j].Labels, "%v series chunks mismatch", j) - // Huge responses can produce unreadable diffs - make it more human readable. - if len(c.ExpectedSeries) > 4 { - for j := range c.ExpectedSeries { - assert.Equal(t, c.ExpectedSeries[j].Labels, seriesSet[j].Labels, "%v series chunks mismatch", j) - - // Check chunks when it is not a skip chunk query - if !c.Req.SkipChunks { - if len(c.ExpectedSeries[j].Chunks) > 20 { - assert.Equal(t, len(c.ExpectedSeries[j].Chunks), len(seriesSet[j].Chunks), "%v series chunks number mismatch", j) - } - assert.Equal(t, c.ExpectedSeries[j].Chunks, seriesSet[j].Chunks, "%v series chunks mismatch", j) - } + // Check chunks when it is not a skip chunk query + if !c.Req.SkipChunks { + if len(c.ExpectedSeries[j].Chunks) > 20 { + assert.Equal(t, len(c.ExpectedSeries[j].Chunks), len(seriesSet[j].Chunks), "%v series chunks number mismatch", j) } - } else { - assert.Equal(t, c.ExpectedSeries, seriesSet) + assert.Equal(t, c.ExpectedSeries[j].Chunks, seriesSet[j].Chunks, "%v series chunks mismatch", j) } - - assert.Equal(t, c.ExpectedHints, hints) } + } else { + assert.Equal(t, c.ExpectedSeries, seriesSet) } - }) - } + assert.Equal(t, c.ExpectedHints, hints) + } + } }) } } From a30ebf876acdc3c56eb4b9f7b3b6f2271e3b2a10 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 17:22:31 +0530 Subject: [PATCH 54/75] Fox integration test metrics Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 40 +++++++++++++++++-------------------- 1 file changed, 18 insertions(+), 22 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index e5368af6c91..ca020f1fb1d 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -28,7 +28,7 @@ import ( ) func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { - for _, streamingEnabled := range []bool{true, false} { + for _, streamingEnabled := range []bool{true} { t.Run(fmt.Sprintf("streaming=%t", streamingEnabled), func(t *testing.T) { testQuerierWithBlocksStorageRunningInMicroservicesMode(t, streamingEnabled, generateFloatSeries) }) @@ -252,24 +252,22 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream // Check the in-memory index cache metrics (in the store-gateway). if streamingEnabled { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 - //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(5), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+7), "thanos_memcached_operations_total")) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(3), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+6), "thanos_memcached_operations_total")) } } else { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2), "thanos_memcached_operations_total")) // one set for each get } } + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + } // Query back again the 1st series from storage. This time it should use the index cache. result, err = c.Query(series1Name, series1Timestamp) @@ -280,23 +278,21 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream if streamingEnabled { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5+3), "thanos_store_index_cache_requests_total")) - //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(8), "thanos_store_index_cache_hits_total")) - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - //require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+7+4), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(6), "thanos_store_index_cache_hits_total")) + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+2+6), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) } } else { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+2), "thanos_store_index_cache_requests_total")) require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before - } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+2), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) } } + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before + } // Query range. We expect 1 data point with a value of 3 (number of series). // Run this query multiple times to ensure each time we get the same result. From 0b542d6b4c5cf032f3fd31ff5134d38b37a06335 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 17:24:55 +0530 Subject: [PATCH 55/75] Fix tests Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index e43c077f9dd..f5ecd989ede 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1435,8 +1435,8 @@ func benchBucketSeries(t test.TB, skipChunk bool, samplesPerSeries, totalSeries // Check exposed metrics. assertHistograms := map[string]bool{ "cortex_bucket_store_series_request_stage_duration_seconds": true, - "cortex_bucket_store_series_batch_preloading_load_duration_seconds": st.maxSeriesPerBatch < totalSeries || streamingBatchSize > 0, // Tracked only when a request is split in multiple batches. - "cortex_bucket_store_series_batch_preloading_wait_duration_seconds": st.maxSeriesPerBatch < totalSeries || streamingBatchSize > 0, // Tracked only when a request is split in multiple batches. + "cortex_bucket_store_series_batch_preloading_load_duration_seconds": st.maxSeriesPerBatch < totalSeries || (!skipChunk && streamingBatchSize > 0), // Tracked only when a request is split in multiple batches. + "cortex_bucket_store_series_batch_preloading_wait_duration_seconds": st.maxSeriesPerBatch < totalSeries || (!skipChunk && streamingBatchSize > 0), // Tracked only when a request is split in multiple batches. "cortex_bucket_store_series_refs_fetch_duration_seconds": true, } From 68e74509e20e9eac176ef6ca6f28d1d62a13a2c2 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 17:57:46 +0530 Subject: [PATCH 56/75] Fix review comments Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 58 ++++++++++---------- pkg/storegateway/bucket_e2e_test.go | 37 ++++++++++++- pkg/storegateway/bucket_streaming_readers.go | 7 +-- pkg/storegateway/bucket_test.go | 2 +- pkg/storegateway/chunkscache/cache.go | 45 +++++++++++++++ pkg/storegateway/chunkscache/cache_test.go | 51 +---------------- pkg/storegateway/series_refs.go | 30 +++++----- pkg/storegateway/series_refs_test.go | 10 ++-- pkg/storegateway/stats.go | 8 +++ pkg/storegateway/storepb/rpc.proto | 2 +- 10 files changed, 143 insertions(+), 107 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index b444a56e3f4..b75a0229131 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -624,13 +624,12 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie var ( reusePostings [][]storage.SeriesRef reusePendingMatchers [][]*labels.Matcher - iterationBegin time.Time ) if req.StreamingChunksBatchSize > 0 { // The streaming feature is enabled where we stream the series labels first, followed // by the chunks later. Send only the labels here. req.SkipChunks = true - + seriesLoadStart := time.Now() reusePostings = make([][]storage.SeriesRef, len(blocks)) reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) chunksLimiter := s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) @@ -653,12 +652,14 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie "request shard selector", maybeNilShard(shardSelector).LabelValue(), "streaming chunks batch size", req.StreamingChunksBatchSize, "num_series", numSeries, - "duration", time.Since(iterationBegin), + "duration", time.Since(seriesLoadStart), ) req.SkipChunks = false } + // We create the limiter twice in the case of streaming so that we don't double count the series + // and hit the limit prematurely. chunksLimiter := s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) seriesLimiter := s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) seriesSet, seriesChunkIt, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) @@ -666,16 +667,16 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - var numSeries, numChunks int start := time.Now() if req.StreamingChunksBatchSize > 0 { - numSeries, numChunks, err = s.sendStreamingChunks(req, srv, seriesChunkIt, stats) + err = s.sendStreamingChunks(req, srv, seriesChunkIt, stats) } else { - numSeries, numChunks, err = s.sendSeriesChunks(req, srv, seriesSet, stats) + err = s.sendSeriesChunks(req, srv, seriesSet, stats) } if err != nil { return } + numSeries, numChunks := stats.seriesAndChunksCount() debugMessage := "sent series" if req.StreamingChunksBatchSize > 0 { debugMessage = "sent streaming chunks" @@ -775,11 +776,12 @@ func (s *BucketStore) sendStreamingChunks( srv storepb.Store_SeriesServer, it seriesChunksSetIterator, stats *safeQueryStats, -) (seriesCount, chunksCount int, err error) { +) error { var ( - encodeDuration = time.Duration(0) - sendDuration = time.Duration(0) - iterationBegin = time.Now() + encodeDuration time.Duration + sendDuration time.Duration + seriesCount, chunksCount int + iterationBegin = time.Now() ) defer stats.update(func(stats *queryStats) { @@ -823,7 +825,7 @@ func (s *BucketStore) sendStreamingChunks( if batchSizeBytes > targetQueryStreamBatchMessageSize || len(chunksBatch.Series) >= int(req.StreamingChunksBatchSize) { err := s.sendMessage("streaming chunks", srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) if err != nil { - return 0, 0, err + return err } chunksBatch.Series = chunksBatch.Series[:0] batchSizeBytes = 0 @@ -834,7 +836,7 @@ func (s *BucketStore) sendStreamingChunks( // Still some chunks left to send before we release the batch. err := s.sendMessage("streaming chunks", srv, storepb.NewStreamingChunksResponse(chunksBatch), &encodeDuration, &sendDuration) if err != nil { - return 0, 0, err + return err } chunksBatch.Series = chunksBatch.Series[:0] batchSizeBytes = 0 @@ -844,10 +846,10 @@ func (s *BucketStore) sendStreamingChunks( } if it.Err() != nil { - return 0, 0, it.Err() + return it.Err() } - return seriesCount, chunksCount, it.Err() + return it.Err() } func (s *BucketStore) sendSeriesChunks( @@ -855,11 +857,12 @@ func (s *BucketStore) sendSeriesChunks( srv storepb.Store_SeriesServer, seriesSet storepb.SeriesSet, stats *safeQueryStats, -) (seriesCount, chunksCount int, err error) { +) error { var ( - encodeDuration = time.Duration(0) - sendDuration = time.Duration(0) - iterationBegin = time.Now() + encodeDuration time.Duration + sendDuration time.Duration + seriesCount, chunksCount int + iterationBegin = time.Now() ) defer stats.update(func(stats *queryStats) { @@ -887,23 +890,20 @@ func (s *BucketStore) sendSeriesChunks( } if !req.SkipChunks { series.Chunks = chks - } - - if !req.SkipChunks { chunksCount += len(chks) s.metrics.chunkSizeBytes.Observe(float64(chunksSize(chks))) } err := s.sendMessage("series", srv, storepb.NewSeriesResponse(&series), &encodeDuration, &sendDuration) if err != nil { - return 0, 0, err + return err } } if seriesSet.Err() != nil { - return 0, 0, errors.Wrap(seriesSet.Err(), "expand series set") + return errors.Wrap(seriesSet.Err(), "expand series set") } - return seriesCount, chunksCount, nil + return nil } func (s *BucketStore) sendMessage(typ string, srv storepb.Store_SeriesServer, msg interface{}, encodeDuration, sendDuration *time.Duration) error { @@ -985,7 +985,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( ) var strategy seriesIteratorStrategy if req.SkipChunks { - strategy |= noChunks + strategy |= noChunkRefs } if req.StreamingChunksBatchSize > 0 { strategy |= overlapMintMaxt @@ -1328,7 +1328,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] matchers, nil, cachedSeriesHasher{nil}, - noChunks, + noChunkRefs, minTime, maxTime, 1, // we skip chunks, so this doesn't make any difference stats, @@ -1548,7 +1548,7 @@ func labelValuesFromSeries(ctx context.Context, labelName string, seriesPerBatch b.meta, nil, nil, - noChunks, + noChunkRefs, b.meta.MinTime, b.meta.MaxTime, b.userID, @@ -1931,8 +1931,8 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch // Similar for first ref. ref := int64(d.Uvarint64()) - isNoChunks := strategy.isNoChunks() - isNoChunkOverlapMintMaxt := strategy.isNoChunksAndOverlapMintMaxt() + isNoChunks := strategy.isNoChunkRefs() + isNoChunkOverlapMintMaxt := strategy.isNoChunkRefsAndOverlapMintMaxt() for i := 0; i < k; i++ { if i > 0 { mint += int64(d.Uvarint64()) diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 37aa4985c07..185ec7dd732 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -509,7 +509,7 @@ func TestBucketStore_e2e(t *testing.T) { return } - t.Run("with small index cache", func(t *testing.T) { + if ok := t.Run("with small index cache", func(t *testing.T) { indexCache2, err := indexcache.NewInMemoryIndexCacheWithConfig(s.logger, nil, indexcache.InMemoryIndexCacheConfig{ MaxItemSize: 50, MaxSize: 100, @@ -517,6 +517,21 @@ func TestBucketStore_e2e(t *testing.T) { assert.NoError(t, err) s.cache.SwapIndexCacheWith(indexCache2) testBucketStore_e2e(t, ctx, s) + }); !ok { + return + } + + t.Run("with large, sufficient index cache, and chunks cache", func(t *testing.T) { + indexCache, err := indexcache.NewInMemoryIndexCacheWithConfig(s.logger, nil, indexcache.InMemoryIndexCacheConfig{ + MaxItemSize: 1e5, + MaxSize: 2e5, + }) + assert.NoError(t, err) + chunksCache, err := chunkscache.NewChunksCache(s.logger, chunkscache.NewMockedCacheClient(nil), nil) + assert.NoError(t, err) + s.cache.SwapIndexCacheWith(indexCache) + s.cache.SwapChunksCacheWith(chunksCache) + testBucketStore_e2e(t, ctx, s) }) }) } @@ -539,6 +554,7 @@ func TestBucketStore_e2e_StreamingEdgeCases(t *testing.T) { Matchers: []storepb.LabelMatcher{ {Type: storepb.LabelMatcher_RE, Name: "a", Value: "1|2"}, }, + // A block spans 120 mins. So 121 grabs the second to last block. MinTime: maxt - 121*int64(time.Minute/time.Millisecond), MaxTime: maxt, }, @@ -572,14 +588,29 @@ func TestBucketStore_e2e_StreamingEdgeCases(t *testing.T) { return } - t.Run("with small index cache", func(t *testing.T) { + if ok := t.Run("with small index cache", func(t *testing.T) { indexCache2, err := indexcache.NewInMemoryIndexCacheWithConfig(s.logger, nil, indexcache.InMemoryIndexCacheConfig{ MaxItemSize: 50, MaxSize: 100, }) assert.NoError(t, err) s.cache.SwapIndexCacheWith(indexCache2) - testBucketStore_e2e(t, ctx, s, additionalCases...) + testBucketStore_e2e(t, ctx, s) + }); !ok { + return + } + + t.Run("with large, sufficient index cache, and chunks cache", func(t *testing.T) { + indexCache, err := indexcache.NewInMemoryIndexCacheWithConfig(s.logger, nil, indexcache.InMemoryIndexCacheConfig{ + MaxItemSize: 1e5, + MaxSize: 2e5, + }) + assert.NoError(t, err) + chunksCache, err := chunkscache.NewChunksCache(s.logger, chunkscache.NewMockedCacheClient(nil), nil) + assert.NoError(t, err) + s.cache.SwapIndexCacheWith(indexCache) + s.cache.SwapChunksCacheWith(chunksCache) + testBucketStore_e2e(t, ctx, s) }) }) } diff --git a/pkg/storegateway/bucket_streaming_readers.go b/pkg/storegateway/bucket_streaming_readers.go index c81597c703e..594c2789e36 100644 --- a/pkg/storegateway/bucket_streaming_readers.go +++ b/pkg/storegateway/bucket_streaming_readers.go @@ -40,6 +40,9 @@ func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient queryLimiter: queryLimiter, stats: stats, log: log, + seriesChunksChan: make(chan *storepb.StreamingChunksBatch, 1), + // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. + errorChan: make(chan error, 1), } } @@ -57,10 +60,6 @@ func (s *SeriesChunksStreamReader) Close() { // If an error occurs while streaming, a subsequent call to GetChunks will return an error. // To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. func (s *SeriesChunksStreamReader) StartBuffering() { - s.seriesChunksChan = make(chan *storepb.StreamingChunksBatch, 1) - - // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. - s.errorChan = make(chan error, 1) ctxDone := s.client.Context().Done() go func() { diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index f5ecd989ede..222d376ac66 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1164,7 +1164,7 @@ func loadSeries(ctx context.Context, tb test.TB, postings []storage.SeriesRef, i indexr.block.meta, nil, nil, - noChunks, + noChunkRefs, 0, 0, "", diff --git a/pkg/storegateway/chunkscache/cache.go b/pkg/storegateway/chunkscache/cache.go index 055d0b83c03..f539142cd40 100644 --- a/pkg/storegateway/chunkscache/cache.go +++ b/pkg/storegateway/chunkscache/cache.go @@ -145,3 +145,48 @@ func (c *ChunksCache) StoreChunks(userID string, ranges map[Range][]byte) { } c.cache.StoreAsync(rangesWithTenant, defaultTTL) } + +// NewMockedCacheClient must be used only for testing. +func NewMockedCacheClient(mockedGetMultiErr error) cache.Cache { + return &mockedCacheClient{ + cache: map[string][]byte{}, + mockedGetMultiErr: mockedGetMultiErr, + } +} + +type mockedCacheClient struct { + cache map[string][]byte + mockedGetMultiErr error +} + +func (c *mockedCacheClient) Fetch(_ context.Context, keys []string, _ ...cache.Option) map[string][]byte { + if c.mockedGetMultiErr != nil { + return nil + } + + hits := map[string][]byte{} + + for _, key := range keys { + if value, ok := c.cache[key]; ok { + hits[key] = value + } + } + + return hits +} + +func (c *mockedCacheClient) StoreAsync(data map[string][]byte, _ time.Duration) { + for key, value := range data { + c.cache[key] = value + } +} + +func (c *mockedCacheClient) Delete(_ context.Context, key string) error { + delete(c.cache, key) + + return nil +} + +func (c *mockedCacheClient) Name() string { + return "mockedCacheClient" +} diff --git a/pkg/storegateway/chunkscache/cache_test.go b/pkg/storegateway/chunkscache/cache_test.go index 0c92313b58e..0610a47c258 100644 --- a/pkg/storegateway/chunkscache/cache_test.go +++ b/pkg/storegateway/chunkscache/cache_test.go @@ -4,16 +4,13 @@ package chunkscache import ( "context" - "testing" - "time" - "github.com/go-kit/log" - "github.com/grafana/dskit/cache" "github.com/oklog/ulid" "github.com/pkg/errors" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/stretchr/testify/assert" + "testing" ) func TestDskitChunksCache_FetchMultiChunks(t *testing.T) { @@ -81,7 +78,7 @@ func TestDskitChunksCache_FetchMultiChunks(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - cacheClient := newMockedCacheClient(testData.mockedErr) + cacheClient := NewMockedCacheClient(testData.mockedErr) c, err := NewChunksCache(log.NewNopLogger(), cacheClient, nil) assert.NoError(t, err) @@ -127,47 +124,3 @@ type mockedChunks struct { r Range value []byte } - -type mockedCacheClient struct { - cache map[string][]byte - mockedGetMultiErr error -} - -func newMockedCacheClient(mockedGetMultiErr error) *mockedCacheClient { - return &mockedCacheClient{ - cache: map[string][]byte{}, - mockedGetMultiErr: mockedGetMultiErr, - } -} - -func (c *mockedCacheClient) Fetch(_ context.Context, keys []string, _ ...cache.Option) map[string][]byte { - if c.mockedGetMultiErr != nil { - return nil - } - - hits := map[string][]byte{} - - for _, key := range keys { - if value, ok := c.cache[key]; ok { - hits[key] = value - } - } - - return hits -} - -func (c *mockedCacheClient) StoreAsync(data map[string][]byte, _ time.Duration) { - for key, value := range data { - c.cache[key] = value - } -} - -func (c *mockedCacheClient) Delete(_ context.Context, key string) error { - delete(c.cache, key) - - return nil -} - -func (c *mockedCacheClient) Name() string { - return "mockedCacheClient" -} diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 21d58cb3972..b1fda57ed23 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -804,27 +804,27 @@ const ( // To change the default behavior, use the flags below this. defaultStrategy seriesIteratorStrategy = 0 - // noChunks flag when used by itself fetches only series labels for series in the entire block. - noChunks seriesIteratorStrategy = 0b00000001 - // overlapMintMaxt flag is used to be used together with noChunks. With this, only the series whose - // chunks overlap with mint->maxt are selected. + // noChunkRefs flag when used by itself fetches only series labels for series in the entire block. + noChunkRefs seriesIteratorStrategy = 0b00000001 + // overlapMintMaxt flag is used together with noChunkRefs. With this, only the series whose + // chunks overlap with [mint, maxt] are selected. overlapMintMaxt seriesIteratorStrategy = 0b00000010 ) -func (s seriesIteratorStrategy) isNoChunks() bool { - return s&noChunks != 0 +func (s seriesIteratorStrategy) isNoChunkRefs() bool { + return s&noChunkRefs != 0 } func (s seriesIteratorStrategy) isOverlapMintMaxt() bool { return s&overlapMintMaxt != 0 } -func (s seriesIteratorStrategy) isNoChunksOnEntireBlock() bool { - return s.isNoChunks() && !s.isOverlapMintMaxt() +func (s seriesIteratorStrategy) isNoChunkRefsOnEntireBlock() bool { + return s.isNoChunkRefs() && !s.isOverlapMintMaxt() } -func (s seriesIteratorStrategy) isNoChunksAndOverlapMintMaxt() bool { - return s.isNoChunks() && s.isOverlapMintMaxt() +func (s seriesIteratorStrategy) isNoChunkRefsAndOverlapMintMaxt() bool { + return s.isNoChunkRefs() && s.isOverlapMintMaxt() } func newLoadingSeriesChunkRefsSetIterator( @@ -843,7 +843,7 @@ func newLoadingSeriesChunkRefsSetIterator( chunkRangesPerSeries int, logger log.Logger, ) *loadingSeriesChunkRefsSetIterator { - if strategy.isNoChunksOnEntireBlock() { + if strategy.isNoChunkRefsOnEntireBlock() { minTime, maxTime = blockMeta.MinTime, blockMeta.MaxTime } @@ -887,7 +887,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Next() bool { nextPostings := s.postingsSetIterator.At() var cachedSeriesID cachedSeriesForPostingsID - if s.strategy.isNoChunksOnEntireBlock() { + if s.strategy.isNoChunkRefsOnEntireBlock() { var err error // Calculate the cache ID before we filter out anything from the postings, // so that the key doesn't depend on the series hash cache or any other filtering we do on the postings list. @@ -937,7 +937,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Next() bool { } s.currentSet = nextSet - if s.strategy.isNoChunks() && cachedSeriesID.isSet() { + if cachedSeriesID.isSet() { storeCachedSeriesForPostings(s.ctx, s.indexCache, s.tenantID, s.blockID, s.shard, cachedSeriesID, nextSet, s.logger) } return true @@ -965,7 +965,7 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p if err != nil { return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "read series") } - if !s.strategy.isNoChunks() { + if !s.strategy.isNoChunkRefs() { clampLastChunkLength(symbolizedSet.series, metas) series.chunksRanges = metasToRanges(partitionChunks(metas, s.chunkRangesPerSeries, minChunksPerRange), s.blockID, s.minTime, s.maxTime) } @@ -1023,7 +1023,7 @@ func (s *loadingSeriesChunkRefsSetIterator) filterSeries(set seriesChunkRefsSet, for sIdx, series := range set.series { // An empty label set means the series had no chunks in this block, so we skip it. // No chunk ranges means the series doesn't have a single chunk range in the requested range. - if series.lset.IsEmpty() || (!s.strategy.isNoChunks() && len(series.chunksRanges) == 0) { + if series.lset.IsEmpty() || (!s.strategy.isNoChunkRefs() && len(series.chunksRanges) == 0) { continue } if !shardOwned(s.shard, s.seriesHasher, postings[sIdx], series.lset, stats) { diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 8a06e0c0023..ab32a94ea33 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1359,7 +1359,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { } var strategy seriesIteratorStrategy if tc.skipChunks { - strategy |= noChunks + strategy |= noChunkRefs } if tc.streamingSeries { strategy |= overlapMintMaxt @@ -1717,7 +1717,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { var strategy seriesIteratorStrategy if testCase.skipChunks { - strategy |= noChunks + strategy |= noChunkRefs } iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( ctx, @@ -1831,7 +1831,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { testCase.matchers, nil, cachedSeriesHasher{hashCache}, - noChunks, // skip chunks since we are testing labels filtering + noChunkRefs, // skip chunks since we are testing labels filtering block.meta.MinTime, block.meta.MaxTime, 2, @@ -2445,7 +2445,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { testCase.matchers, testCase.shard, seriesHasher, - noChunks, + noChunkRefs, b.meta.MinTime, b.meta.MaxTime, 1, @@ -2477,7 +2477,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { testCase.matchers, testCase.shard, seriesHasher, - noChunks, + noChunkRefs, b.meta.MinTime, b.meta.MaxTime, 1, diff --git a/pkg/storegateway/stats.go b/pkg/storegateway/stats.go index 9cae3e545cf..dad370d8acc 100644 --- a/pkg/storegateway/stats.go +++ b/pkg/storegateway/stats.go @@ -174,3 +174,11 @@ func (s *safeQueryStats) export() *queryStats { copied := *s.unsafeStats return &copied } + +// seriesAndChunksCount the value of mergedSeriesCount and mergedChunksCount fields. +func (s *safeQueryStats) seriesAndChunksCount() (seriesCount, chunksCount int) { + s.unsafeStatsMx.Lock() + defer s.unsafeStatsMx.Unlock() + + return s.unsafeStats.mergedSeriesCount, s.unsafeStats.mergedChunksCount +} diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index 9ae61689574..3f20cb2adb1 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -90,7 +90,7 @@ message SeriesRequest { // sending any streaming_chunks, with the last streaming_series response containing is_end_of_series_stream=true. // The order of series in both streaming_series/streaming_chunks must match and the size of the batch must not // cross streaming_chunks_batch_size, although it can be lower than that. - // It is 100 so that we have an option to bring back compatibility with Thanos' storage API. + // The proto field ID is 100 so that we have an option to bring back compatibility with Thanos' storage API. uint64 streaming_chunks_batch_size = 100; } From c53309bcefe0bb515316e3b3cd7c05938b4f298b Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 18:00:34 +0530 Subject: [PATCH 57/75] Fix integration tests Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index ca020f1fb1d..46b152af725 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -28,7 +28,7 @@ import ( ) func TestQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T) { - for _, streamingEnabled := range []bool{true} { + for _, streamingEnabled := range []bool{true, false} { t.Run(fmt.Sprintf("streaming=%t", streamingEnabled), func(t *testing.T) { testQuerierWithBlocksStorageRunningInMicroservicesMode(t, streamingEnabled, generateFloatSeries) }) @@ -253,9 +253,9 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream if streamingEnabled { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(3), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(2), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+6), "thanos_memcached_operations_total")) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+5), "thanos_memcached_operations_total")) } } else { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 @@ -280,7 +280,7 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5+3), "thanos_store_index_cache_requests_total")) require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(6), "thanos_store_index_cache_hits_total")) if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+2+6), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+2+5), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) } } else { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+2), "thanos_store_index_cache_requests_total")) From 3c44a6d7ea07dafb6c49b35d872b622cf0f6fec5 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 18:46:52 +0530 Subject: [PATCH 58/75] Split streamingSeriesSetForBlocks Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 164 +++++++++++++++++++++++++++---------- 1 file changed, 122 insertions(+), 42 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index b75a0229131..fa0cf1e94cd 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -624,18 +624,20 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie var ( reusePostings [][]storage.SeriesRef reusePendingMatchers [][]*labels.Matcher + resHints = &hintspb.SeriesResponseHints{} ) + for _, b := range blocks { + resHints.AddQueriedBlock(b.meta.ULID) + } if req.StreamingChunksBatchSize > 0 { - // The streaming feature is enabled where we stream the series labels first, followed - // by the chunks later. Send only the labels here. - req.SkipChunks = true - seriesLoadStart := time.Now() - reusePostings = make([][]storage.SeriesRef, len(blocks)) - reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) - chunksLimiter := s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) - seriesLimiter := s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) + var ( + seriesSet storepb.SeriesSet + seriesLoadStart = time.Now() + chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) + seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) + ) - seriesSet, _, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, nil, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + seriesSet, reusePostings, reusePendingMatchers, err = s.streamingSeriesForBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) if err != nil { return err } @@ -654,28 +656,33 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie "num_series", numSeries, "duration", time.Since(seriesLoadStart), ) - - req.SkipChunks = false } // We create the limiter twice in the case of streaming so that we don't double count the series // and hit the limit prematurely. chunksLimiter := s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) seriesLimiter := s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) - seriesSet, seriesChunkIt, resHints, err := s.streamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) - if err != nil { - return err - } start := time.Now() if req.StreamingChunksBatchSize > 0 { + var seriesChunkIt seriesChunksSetIterator + seriesChunkIt, err = s.streamingChunksSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + if err != nil { + return err + } err = s.sendStreamingChunks(req, srv, seriesChunkIt, stats) } else { + var seriesSet storepb.SeriesSet + seriesSet, err = s.nonStreamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + if err != nil { + return err + } err = s.sendSeriesChunks(req, srv, seriesSet, stats) } if err != nil { return } + numSeries, numChunks := stats.seriesAndChunksCount() debugMessage := "sent series" if req.StreamingChunksBatchSize > 0 { @@ -962,7 +969,8 @@ func chunksSize(chks []storepb.AggrChunk) (size int) { return size } -func (s *BucketStore) streamingSeriesSetForBlocks( +// nonStreamingSeriesSetForBlocks is used when the streaming feature is not enabled. +func (s *BucketStore) nonStreamingSeriesSetForBlocks( ctx context.Context, req *storepb.SeriesRequest, blocks []*bucketBlock, @@ -975,27 +983,111 @@ func (s *BucketStore) streamingSeriesSetForBlocks( stats *safeQueryStats, reusePostings [][]storage.SeriesRef, // Used if not empty. reusePendingMatchers [][]*labels.Matcher, // Used if not empty. -) (storepb.SeriesSet, seriesChunksSetIterator, *hintspb.SeriesResponseHints, error) { - var ( - resHints = &hintspb.SeriesResponseHints{} - mtx = sync.Mutex{} - batches = make([]seriesChunkRefsSetIterator, 0, len(blocks)) - g, _ = errgroup.WithContext(ctx) - begin = time.Now() - ) +) (storepb.SeriesSet, error) { var strategy seriesIteratorStrategy if req.SkipChunks { - strategy |= noChunkRefs + strategy = noChunkRefs } - if req.StreamingChunksBatchSize > 0 { - strategy |= overlapMintMaxt + it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers, strategy) + if err != nil { + return nil, err + } + + var set storepb.SeriesSet + if !req.SkipChunks { + var cache chunkscache.Cache + if s.fineGrainedChunksCachingEnabled { + cache = s.chunksCache + } + ss := newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, it, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) + set = newSeriesChunksSeriesSet(ss) + } else { + set = newSeriesSetWithoutChunks(ctx, it, stats) + } + return set, nil +} + +// streamingSeriesForBlocks is used when streaming feature is enabled. +// It returns a series set that only contains the series labels without any chunks information. +// The returned postings (series ref) and matches should be re-used when getting chunks to save on computation. +func (s *BucketStore) streamingSeriesForBlocks( + ctx context.Context, + req *storepb.SeriesRequest, + blocks []*bucketBlock, + indexReaders map[ulid.ULID]*bucketIndexReader, + shardSelector *sharding.ShardSelector, + matchers []*labels.Matcher, + chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. + seriesLimiter SeriesLimiter, // Rate limiter for loading series. + stats *safeQueryStats, +) (storepb.SeriesSet, [][]storage.SeriesRef, [][]*labels.Matcher, error) { + var ( + reusePostings = make([][]storage.SeriesRef, len(blocks)) + reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) + strategy = noChunkRefs | overlapMintMaxt + ) + it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers, strategy) + if err != nil { + return nil, nil, nil, err } + return newSeriesSetWithoutChunks(ctx, it, stats), reusePostings, reusePendingMatchers, nil +} + +// streamingChunksSetForBlocks is used when streaming feature is enabled. +// It returns an iterator to go over the chunks for the series returned in the streamingSeriesForBlocks call. +// It is recommended to pass the reusePostings and reusePendingMatches returned by the streamingSeriesForBlocks call. +func (s *BucketStore) streamingChunksSetForBlocks( + ctx context.Context, + req *storepb.SeriesRequest, + blocks []*bucketBlock, + indexReaders map[ulid.ULID]*bucketIndexReader, + chunkReaders *bucketChunkReaders, + shardSelector *sharding.ShardSelector, + matchers []*labels.Matcher, + chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. + seriesLimiter SeriesLimiter, // Rate limiter for loading series. + stats *safeQueryStats, + reusePostings [][]storage.SeriesRef, // Should come from streamingSeriesForBlocks. + reusePendingMatchers [][]*labels.Matcher, // Should come from streamingSeriesForBlocks. +) (seriesChunksSetIterator, error) { + it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers, defaultStrategy) + if err != nil { + return nil, err + } + + var cache chunkscache.Cache + if s.fineGrainedChunksCachingEnabled { + cache = s.chunksCache + } + scsi := newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, it, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) + return scsi, nil +} + +func (s *BucketStore) getSeriesIteratorFromBlocks( + ctx context.Context, + req *storepb.SeriesRequest, + blocks []*bucketBlock, + indexReaders map[ulid.ULID]*bucketIndexReader, + shardSelector *sharding.ShardSelector, + matchers []*labels.Matcher, + chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. + seriesLimiter SeriesLimiter, // Rate limiter for loading series. + stats *safeQueryStats, + reusePostings [][]storage.SeriesRef, // Used if not empty. + reusePendingMatchers [][]*labels.Matcher, // Used if not empty. + strategy seriesIteratorStrategy, +) (seriesChunkRefsSetIterator, error) { + var ( + mtx = sync.Mutex{} + batches = make([]seriesChunkRefsSetIterator, 0, len(blocks)) + g, _ = errgroup.WithContext(ctx) + begin = time.Now() + ) for i, b := range blocks { b := b i := i // Keep track of queried blocks. - resHints.AddQueriedBlock(b.meta.ULID) indexr := indexReaders[b.meta.ULID] // If query sharding is enabled we have to get the block-specific series hash cache @@ -1047,7 +1139,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( err := g.Wait() if err != nil { - return nil, nil, nil, err + return nil, err } stats.update(func(stats *queryStats) { @@ -1061,19 +1153,7 @@ func (s *BucketStore) streamingSeriesSetForBlocks( // counted once towards the limit. mergedIterator = newLimitingSeriesChunkRefsSetIterator(mergedIterator, chunksLimiter, seriesLimiter) - var set storepb.SeriesSet - var scsi seriesChunksSetIterator - if !req.SkipChunks { - var cache chunkscache.Cache - if s.fineGrainedChunksCachingEnabled { - cache = s.chunksCache - } - scsi = newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, mergedIterator, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) - set = newSeriesChunksSeriesSet(scsi) - } else { - set = newSeriesSetWithoutChunks(ctx, mergedIterator, stats) - } - return set, scsi, resHints, nil + return mergedIterator, nil } func (s *BucketStore) recordSeriesCallResult(safeStats *safeQueryStats) { From 43622d4e62480e07921a03ca415920b86dc77b30 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 18:49:45 +0530 Subject: [PATCH 59/75] lint Signed-off-by: Ganesh Vernekar --- pkg/storegateway/chunkscache/cache_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/storegateway/chunkscache/cache_test.go b/pkg/storegateway/chunkscache/cache_test.go index 0610a47c258..e1d0051f96e 100644 --- a/pkg/storegateway/chunkscache/cache_test.go +++ b/pkg/storegateway/chunkscache/cache_test.go @@ -4,13 +4,14 @@ package chunkscache import ( "context" + "testing" + "github.com/go-kit/log" "github.com/oklog/ulid" "github.com/pkg/errors" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" "github.com/prometheus/prometheus/tsdb/chunks" "github.com/stretchr/testify/assert" - "testing" ) func TestDskitChunksCache_FetchMultiChunks(t *testing.T) { From 227b3e86193d08f982a4df45fccc4327329c5ca7 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 27 Jun 2023 19:26:02 +0530 Subject: [PATCH 60/75] lint and test Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 6 +++--- pkg/storegateway/storepb/rpc.pb.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index 46b152af725..753775d2873 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -252,8 +252,8 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream // Check the in-memory index cache metrics (in the store-gateway). if streamingEnabled { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(2), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 + require.NoError(t, storeGateways.WaitSumMetrics(e2e.Greater(0), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+5), "thanos_memcached_operations_total")) } @@ -278,7 +278,7 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream if streamingEnabled { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5+3), "thanos_store_index_cache_requests_total")) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(6), "thanos_store_index_cache_hits_total")) + require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(4), "thanos_store_index_cache_hits_total")) if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+2+5), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) } diff --git a/pkg/storegateway/storepb/rpc.pb.go b/pkg/storegateway/storepb/rpc.pb.go index 75a54dd2fbb..fcfdd75884e 100644 --- a/pkg/storegateway/storepb/rpc.pb.go +++ b/pkg/storegateway/storepb/rpc.pb.go @@ -50,7 +50,7 @@ type SeriesRequest struct { // sending any streaming_chunks, with the last streaming_series response containing is_end_of_series_stream=true. // The order of series in both streaming_series/streaming_chunks must match and the size of the batch must not // cross streaming_chunks_batch_size, although it can be lower than that. - // It is 100 so that we have an option to bring back compatibility with Thanos' storage API. + // The proto field ID is 100 so that we have an option to bring back compatibility with Thanos' storage API. StreamingChunksBatchSize uint64 `protobuf:"varint,100,opt,name=streaming_chunks_batch_size,json=streamingChunksBatchSize,proto3" json:"streaming_chunks_batch_size,omitempty"` } From 4333c6f2e738f21d360dc25c97bd7c17e3587f2a Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Fri, 30 Jun 2023 18:29:15 +0530 Subject: [PATCH 61/75] Fix 'cannot reverse reader' bug with more test coverage Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 60 ++++---- pkg/storegateway/bucket_chunk_reader_test.go | 4 +- pkg/storegateway/bucket_index_postings.go | 1 - pkg/storegateway/gateway_test.go | 147 ++++++++++--------- pkg/storegateway/series_chunks.go | 1 - pkg/storegateway/series_refs.go | 43 +++++- pkg/storegateway/series_refs_test.go | 20 +-- 7 files changed, 153 insertions(+), 123 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index fa0cf1e94cd..8bfad13c8f2 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -622,9 +622,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie // twice. So we use these slices to re-use them. // Each reusePostings[i] and reusePendingMatchers[i] corresponds to a single block. var ( - reusePostings [][]storage.SeriesRef - reusePendingMatchers [][]*labels.Matcher - resHints = &hintspb.SeriesResponseHints{} + reuse []*reusedPostingsAndMatchers + resHints = &hintspb.SeriesResponseHints{} ) for _, b := range blocks { resHints.AddQueriedBlock(b.meta.ULID) @@ -637,7 +636,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) ) - seriesSet, reusePostings, reusePendingMatchers, err = s.streamingSeriesForBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) + seriesSet, reuse, err = s.streamingSeriesForBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) if err != nil { return err } @@ -666,14 +665,14 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie start := time.Now() if req.StreamingChunksBatchSize > 0 { var seriesChunkIt seriesChunksSetIterator - seriesChunkIt, err = s.streamingChunksSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + seriesChunkIt, err = s.streamingChunksSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reuse) if err != nil { return err } err = s.sendStreamingChunks(req, srv, seriesChunkIt, stats) } else { var seriesSet storepb.SeriesSet - seriesSet, err = s.nonStreamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers) + seriesSet, err = s.nonStreamingSeriesSetForBlocks(ctx, req, blocks, indexReaders, readers, shardSelector, matchers, chunksLimiter, seriesLimiter, stats) if err != nil { return err } @@ -981,14 +980,12 @@ func (s *BucketStore) nonStreamingSeriesSetForBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, - reusePostings [][]storage.SeriesRef, // Used if not empty. - reusePendingMatchers [][]*labels.Matcher, // Used if not empty. ) (storepb.SeriesSet, error) { var strategy seriesIteratorStrategy if req.SkipChunks { strategy = noChunkRefs } - it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers, strategy) + it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, nil, strategy) if err != nil { return nil, err } @@ -1020,17 +1017,19 @@ func (s *BucketStore) streamingSeriesForBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, -) (storepb.SeriesSet, [][]storage.SeriesRef, [][]*labels.Matcher, error) { +) (storepb.SeriesSet, []*reusedPostingsAndMatchers, error) { var ( - reusePostings = make([][]storage.SeriesRef, len(blocks)) - reusePendingMatchers = make([][]*labels.Matcher, len(blocks)) - strategy = noChunkRefs | overlapMintMaxt + reuse = make([]*reusedPostingsAndMatchers, len(blocks)) + strategy = noChunkRefs | overlapMintMaxt ) - it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers, strategy) + for i := range reuse { + reuse[i] = &reusedPostingsAndMatchers{} + } + it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reuse, strategy) if err != nil { - return nil, nil, nil, err + return nil, nil, err } - return newSeriesSetWithoutChunks(ctx, it, stats), reusePostings, reusePendingMatchers, nil + return newSeriesSetWithoutChunks(ctx, it, stats), reuse, nil } // streamingChunksSetForBlocks is used when streaming feature is enabled. @@ -1047,10 +1046,9 @@ func (s *BucketStore) streamingChunksSetForBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, - reusePostings [][]storage.SeriesRef, // Should come from streamingSeriesForBlocks. - reusePendingMatchers [][]*labels.Matcher, // Should come from streamingSeriesForBlocks. + reuse []*reusedPostingsAndMatchers, // Should come from streamingSeriesForBlocks. ) (seriesChunksSetIterator, error) { - it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reusePostings, reusePendingMatchers, defaultStrategy) + it, err := s.getSeriesIteratorFromBlocks(ctx, req, blocks, indexReaders, shardSelector, matchers, chunksLimiter, seriesLimiter, stats, reuse, defaultStrategy) if err != nil { return nil, err } @@ -1073,8 +1071,7 @@ func (s *BucketStore) getSeriesIteratorFromBlocks( chunksLimiter ChunksLimiter, // Rate limiter for loading chunks. seriesLimiter SeriesLimiter, // Rate limiter for loading series. stats *safeQueryStats, - reusePostings [][]storage.SeriesRef, // Used if not empty. - reusePendingMatchers [][]*labels.Matcher, // Used if not empty. + reuse []*reusedPostingsAndMatchers, // Used if not empty. If not empty, len(reuse) must be len(blocks). strategy seriesIteratorStrategy, ) (seriesChunkRefsSetIterator, error) { var ( @@ -1096,13 +1093,12 @@ func (s *BucketStore) getSeriesIteratorFromBlocks( if shardSelector != nil { blockSeriesHashCache = s.seriesHashCache.GetBlockCache(b.meta.ULID.String()) } - var ps []storage.SeriesRef - var pendingMatchers []*labels.Matcher - if len(reusePostings) > 0 { - ps, pendingMatchers = reusePostings[i], reusePendingMatchers[i] + var r *reusedPostingsAndMatchers + if len(reuse) > 0 { + r = reuse[i] } g.Go(func() error { - part, newPs, newPendingMatchers, err := openBlockSeriesChunkRefsSetsIterator( + part, err := openBlockSeriesChunkRefsSetsIterator( ctx, s.maxSeriesPerBatch, s.userID, @@ -1116,19 +1112,13 @@ func (s *BucketStore) getSeriesIteratorFromBlocks( req.MinTime, req.MaxTime, s.numChunksRangesPerSeries, stats, - ps, - pendingMatchers, + r, s.logger, ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) } - if len(reusePostings) > 0 { - reusePostings[i] = newPs - reusePendingMatchers[i] = newPendingMatchers - } - mtx.Lock() batches = append(batches, part) mtx.Unlock() @@ -1398,7 +1388,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] // We ignore request's min/max time and query the entire block to make the result cacheable. minTime, maxTime := indexr.block.meta.MinTime, indexr.block.meta.MaxTime - seriesSetsIterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( + seriesSetsIterator, err := openBlockSeriesChunkRefsSetsIterator( ctx, seriesPerBatch, indexr.block.userID, @@ -1412,7 +1402,7 @@ func blockLabelNames(ctx context.Context, indexr *bucketIndexReader, matchers [] minTime, maxTime, 1, // we skip chunks, so this doesn't make any difference stats, - nil, nil, + nil, logger, ) if err != nil { diff --git a/pkg/storegateway/bucket_chunk_reader_test.go b/pkg/storegateway/bucket_chunk_reader_test.go index 7b04b7bb339..186a136d197 100644 --- a/pkg/storegateway/bucket_chunk_reader_test.go +++ b/pkg/storegateway/bucket_chunk_reader_test.go @@ -28,7 +28,7 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { newTestBucketBlock := prepareTestBlock(test.NewTB(t), appendTestSeries(1000)) block := newTestBucketBlock() - seriesRefsIterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( + seriesRefsIterator, err := openBlockSeriesChunkRefsSetsIterator( ctx, 5000, "tenant-1", @@ -43,7 +43,7 @@ func TestBucketChunkReader_refetchChunks(t *testing.T) { block.meta.MaxTime, 2, newSafeQueryStats(), - nil, nil, + nil, log.NewNopLogger(), ) require.NoError(t, err) diff --git a/pkg/storegateway/bucket_index_postings.go b/pkg/storegateway/bucket_index_postings.go index 20cc41792f4..f3bd0708ecc 100644 --- a/pkg/storegateway/bucket_index_postings.go +++ b/pkg/storegateway/bucket_index_postings.go @@ -280,7 +280,6 @@ func filterPostingsByCachedShardHash(ps []storage.SeriesRef, shard *sharding.Sha // Shrink the size. ps = ps[:writeIdx] - return ps } diff --git a/pkg/storegateway/gateway_test.go b/pkg/storegateway/gateway_test.go index 1866ba50453..fcb81838c82 100644 --- a/pkg/storegateway/gateway_test.go +++ b/pkg/storegateway/gateway_test.go @@ -1246,30 +1246,35 @@ func TestStoreGateway_Series_QueryShardingShouldGuaranteeSeriesShardingConsisten // Query all series, 1 shard at a time. for shardID := 0; shardID < numShards; shardID++ { - shardLabel := sharding.FormatShardIDLabelValue(uint64(shardID), numShards) - expectedSeriesIDs := expectedSeriesIDByShard[shardLabel] - - req := &storepb.SeriesRequest{ - MinTime: math.MinInt64, - MaxTime: math.MaxInt64, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: "series_id", Value: ".+"}, - {Type: storepb.LabelMatcher_EQ, Name: sharding.ShardLabel, Value: shardLabel}, - }, - } + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + shardLabel := sharding.FormatShardIDLabelValue(uint64(shardID), numShards) + expectedSeriesIDs := expectedSeriesIDByShard[shardLabel] + + req := &storepb.SeriesRequest{ + MinTime: math.MinInt64, + MaxTime: math.MaxInt64, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: "series_id", Value: ".+"}, + {Type: storepb.LabelMatcher_EQ, Name: sharding.ShardLabel, Value: shardLabel}, + }, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } - seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) - require.NoError(t, err) - assert.Empty(t, warnings) - require.Greater(t, len(seriesSet), 0) + seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) + require.NoError(t, err) + assert.Empty(t, warnings) + require.Greater(t, len(seriesSet), 0) - for _, series := range seriesSet { - // Ensure the series below to the right shard. - seriesLabels := mimirpb.FromLabelAdaptersToLabels(series.Labels) - seriesID, err := strconv.Atoi(seriesLabels.Get("series_id")) - require.NoError(t, err) + for _, series := range seriesSet { + // Ensure the series below to the right shard. + seriesLabels := mimirpb.FromLabelAdaptersToLabels(series.Labels) + seriesID, err := strconv.Atoi(seriesLabels.Get("series_id")) + require.NoError(t, err) - assert.Contains(t, expectedSeriesIDs, seriesID, "series:", seriesLabels.String()) + assert.Contains(t, expectedSeriesIDs, seriesID, "series:", seriesLabels.String()) + } + }) } } } @@ -1321,63 +1326,69 @@ func TestStoreGateway_Series_QueryShardingConcurrency(t *testing.T) { srv := newStoreGatewayTestServer(t, g) - // Keep track of all responses received (by shard). - responsesMx := sync.Mutex{} - responses := make(map[int][][]*storepb.Series) - - wg := sync.WaitGroup{} - wg.Add(numQueries) - - for i := 0; i < numQueries; i++ { - go func(shardIndex int) { - defer wg.Done() - - req := &storepb.SeriesRequest{ - MinTime: math.MinInt64, - MaxTime: math.MaxInt64, - Matchers: []storepb.LabelMatcher{ - {Type: storepb.LabelMatcher_RE, Name: labels.MetricName, Value: ".*"}, - {Type: storepb.LabelMatcher_EQ, Name: sharding.ShardLabel, Value: sharding.ShardSelector{ - ShardIndex: uint64(shardIndex), - ShardCount: uint64(shardCount), - }.LabelValue()}, - }, - } + for _, streamingBatchSize := range []int{0, 1, 5} { + t.Run(fmt.Sprintf("streamingBatchSize=%d", streamingBatchSize), func(t *testing.T) { + // Keep track of all responses received (by shard). + responsesMx := sync.Mutex{} + responses := make(map[int][][]*storepb.Series) - seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) - require.NoError(t, err) - assert.Empty(t, warnings) + wg := sync.WaitGroup{} + wg.Add(numQueries) - responsesMx.Lock() - responses[shardIndex] = append(responses[shardIndex], seriesSet) - responsesMx.Unlock() - }(i % shardCount) - } + for i := 0; i < numQueries; i++ { + go func(shardIndex int) { + defer wg.Done() - // Wait until all requests completed. - wg.Wait() + req := &storepb.SeriesRequest{ + MinTime: math.MinInt64, + MaxTime: math.MaxInt64, + Matchers: []storepb.LabelMatcher{ + {Type: storepb.LabelMatcher_RE, Name: labels.MetricName, Value: ".*"}, + {Type: storepb.LabelMatcher_EQ, Name: sharding.ShardLabel, Value: sharding.ShardSelector{ + ShardIndex: uint64(shardIndex), + ShardCount: uint64(shardCount), + }.LabelValue()}, + }, + StreamingChunksBatchSize: uint64(streamingBatchSize), + } + + seriesSet, warnings, _, err := srv.Series(setUserIDToGRPCContext(ctx, userID), req) + require.NoError(t, err) + assert.Empty(t, warnings) - // We expect all responses for a given shard contain the same series - // and all shards merged together contain all the series in the TSDB block. - totalSeries := 0 + responsesMx.Lock() + responses[shardIndex] = append(responses[shardIndex], seriesSet) + responsesMx.Unlock() + }(i % shardCount) + } - for shardIndex := 0; shardIndex < shardCount; shardIndex++ { - var expected []*storepb.Series + // Wait until all requests completed. + wg.Wait() - for resIdx, res := range responses[shardIndex] { - // We consider the 1st response for a shard as the expected one - // (all in all we expect all responses to be the same). - if resIdx == 0 { - expected = res - totalSeries += len(res) - continue + // We expect all responses for a given shard contain the same series + // and all shards merged together contain all the series in the TSDB block. + totalSeries := 0 + + for shardIndex := 0; shardIndex < shardCount; shardIndex++ { + var expected []*storepb.Series + + for resIdx, res := range responses[shardIndex] { + // We consider the 1st response for a shard as the expected one + // (all in all we expect all responses to be the same). + if resIdx == 0 { + expected = res + totalSeries += len(res) + continue + } + + assert.Equalf(t, expected, res, "shard: %d", shardIndex) + } } - assert.Equalf(t, expected, res, "shard: %d", shardIndex) - } + assert.Equal(t, numSeries, totalSeries) + }) } - assert.Equal(t, numSeries, totalSeries) } func TestStoreGateway_SeriesQueryingShouldEnforceMaxChunksPerQueryLimit(t *testing.T) { diff --git a/pkg/storegateway/series_chunks.go b/pkg/storegateway/series_chunks.go index 1186c7bc34b..28b59d12ed9 100644 --- a/pkg/storegateway/series_chunks.go +++ b/pkg/storegateway/series_chunks.go @@ -419,7 +419,6 @@ func (c *loadingSeriesChunksSetIterator) Next() (retHasNext bool) { c.recordCachedChunks(cachedRanges) } c.chunkReaders.reset() - for sIdx, s := range nextUnloaded.series { nextSet.series[sIdx].lset = s.lset nextSet.series[sIdx].chks = nextSet.newSeriesAggrChunkSlice(s.numChunks()) diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index b1fda57ed23..818a3ea2795 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -745,22 +745,34 @@ func openBlockSeriesChunkRefsSetsIterator( minTime, maxTime int64, // Series must have data in this time range to be returned (ignored if skipChunks=true). chunkRangesPerSeries int, stats *safeQueryStats, - ps []storage.SeriesRef, // If this is not empty, these posting are used as it is without fetching new ones. - pendingMatchers []*labels.Matcher, // This is used in conjunction with 'ps'. + reuse *reusedPostingsAndMatchers, // If this is not nil, these posting and matchers are used as it is without fetching new ones. + logger log.Logger, -) (seriesChunkRefsSetIterator, []storage.SeriesRef, []*labels.Matcher, error) { +) (seriesChunkRefsSetIterator, error) { if batchSize <= 0 { - return nil, nil, nil, errors.New("set size must be a positive number") + return nil, errors.New("set size must be a positive number") } + var ps []storage.SeriesRef + var pendingMatchers []*labels.Matcher + if reuse != nil { + ps = reuse.ps + pendingMatchers = reuse.matchers + } if len(ps) == 0 { var err error ps, pendingMatchers, err = indexr.ExpandedPostings(ctx, matchers, stats) if err != nil { - return nil, nil, nil, errors.Wrap(err, "expanded matching postings") + return nil, errors.Wrap(err, "expanded matching postings") + } + if reuse != nil { + reuse.put(ps, pendingMatchers) } } + returnPs := make([]storage.SeriesRef, len(ps)) + copy(returnPs, ps) + var iterator seriesChunkRefsSetIterator iterator = newLoadingSeriesChunkRefsSetIterator( ctx, @@ -782,7 +794,26 @@ func openBlockSeriesChunkRefsSetsIterator( iterator = newFilteringSeriesChunkRefsSetIterator(pendingMatchers, iterator, stats) } - return seriesStreamingFetchRefsDurationIterator(iterator, stats), ps, pendingMatchers, nil + return seriesStreamingFetchRefsDurationIterator(iterator, stats), nil +} + +// reusedPostings is used to share the postings and matches across function calls for re-use +// in case of streaming series. We have it as a separate struct so that we can give a safe way +// to use it by making a copy where required. You can use it to put items only once. +type reusedPostingsAndMatchers struct { + ps []storage.SeriesRef + matchers []*labels.Matcher +} + +func (p *reusedPostingsAndMatchers) put(ps []storage.SeriesRef, matchers []*labels.Matcher) { + if len(p.ps) > 0 { + // We already have something here. + return + } + // Postings list can be modified later, so we make a copy here. + p.ps = make([]storage.SeriesRef, len(ps)) + copy(p.ps, ps) + p.matchers = matchers } // seriesStreamingFetchRefsDurationIterator tracks the time spent loading series and chunk refs. diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index ab32a94ea33..8227d3a538e 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1719,7 +1719,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { if testCase.skipChunks { strategy |= noChunkRefs } - iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( + iterator, err := openBlockSeriesChunkRefsSetsIterator( ctx, testCase.batchSize, "", @@ -1734,7 +1734,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator(t *testing.T) { maxT, 2, newSafeQueryStats(), - nil, nil, + nil, log.NewNopLogger(), ) require.NoError(t, err) @@ -1821,7 +1821,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { block.pendingReaders.Add(2) // this is hacky, but can be replaced only block.indexReade() accepts a strategy querySeries := func(indexReader *bucketIndexReader) []seriesChunkRefsSet { hashCache := hashcache.NewSeriesHashCache(1024 * 1024).GetBlockCache(block.meta.ULID.String()) - iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( + iterator, err := openBlockSeriesChunkRefsSetsIterator( ctx, testCase.batchSize, "", @@ -1836,7 +1836,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_pendingMatchers(t *testing.T) { block.meta.MaxTime, 2, newSafeQueryStats(), - nil, nil, + nil, log.NewNopLogger(), ) require.NoError(t, err) @@ -1886,7 +1886,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { b.ReportAllocs() for i := 0; i < b.N; i++ { - iterator, _, _, err := openBlockSeriesChunkRefsSetsIterator( + iterator, err := openBlockSeriesChunkRefsSetsIterator( ctx, 5000, "", @@ -1901,7 +1901,7 @@ func BenchmarkOpenBlockSeriesChunkRefsSetsIterator(b *testing.B) { block.meta.MaxTime, 2, newSafeQueryStats(), - nil, nil, + nil, log.NewNopLogger(), ) require.NoError(b, err) @@ -2435,7 +2435,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { // All test cases have a single matcher, so the strategy wouldn't really make a difference. // Pending matchers are tested in other tests. indexReader := b.indexReader(selectAllStrategy{}) - ss, _, _, err := openBlockSeriesChunkRefsSetsIterator( + ss, err := openBlockSeriesChunkRefsSetsIterator( context.Background(), batchSize, "", @@ -2450,7 +2450,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { b.meta.MaxTime, 1, statsColdCache, - nil, nil, + nil, log.NewNopLogger(), ) @@ -2467,7 +2467,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { } statsWarnCache := newSafeQueryStats() - ss, _, _, err = openBlockSeriesChunkRefsSetsIterator( + ss, err = openBlockSeriesChunkRefsSetsIterator( context.Background(), batchSize, "", @@ -2482,7 +2482,7 @@ func TestOpenBlockSeriesChunkRefsSetsIterator_SeriesCaching(t *testing.T) { b.meta.MaxTime, 1, statsWarnCache, - nil, nil, + nil, log.NewNopLogger(), ) require.NoError(t, err) From 9343e3133d0dbe2e0f51fa8fd697f5f3aaf51c5c Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 3 Jul 2023 14:55:31 +0530 Subject: [PATCH 62/75] Fix review comments Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 3c7235ebcca..e63bbe783a1 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -584,17 +584,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return status.Error(codes.InvalidArgument, errors.Wrap(err, "parse query sharding label").Error()) } - spanLogger := spanlogger.FromContext(srv.Context(), s.logger) - level.Debug(spanLogger).Log( - "msg", "BucketStore.Series", - "request min time", time.UnixMilli(req.MinTime).UTC().Format(time.RFC3339Nano), - "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), - "request matchers", storepb.PromMatchersToString(matchers...), - "request shard selector", maybeNilShard(shardSelector).LabelValue(), - "streaming chunks batch size", req.StreamingChunksBatchSize, - ) - var ( + spanLogger = spanlogger.FromContext(srv.Context(), s.logger) ctx = srv.Context() stats = newSafeQueryStats() reqBlockMatchers []*labels.Matcher @@ -613,7 +604,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } } - logSeriesRequestToSpan(srv.Context(), s.logger, req.MinTime, req.MaxTime, matchers, reqBlockMatchers, shardSelector) + logSeriesRequestToSpan(srv.Context(), s.logger, req.MinTime, req.MaxTime, matchers, reqBlockMatchers, shardSelector, req.StreamingChunksBatchSize) blocks, indexReaders, chunkReaders := s.openBlocksForReading(ctx, req.SkipChunks, req.MinTime, req.MaxTime, reqBlockMatchers, stats) // We must keep the readers open until all their data has been sent. @@ -666,6 +657,11 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie "num_series", numSeries, "duration", time.Since(seriesLoadStart), ) + + if numSeries == 0 { + // There is no series to send chunks for. + return nil + } } // We create the limiter twice in the case of streaming so that we don't double count the series @@ -960,7 +956,7 @@ func (s *BucketStore) sendHintsAndStats(srv storepb.Store_SeriesServer, resHints return nil } -func logSeriesRequestToSpan(ctx context.Context, l log.Logger, minT, maxT int64, matchers, blockMatchers []*labels.Matcher, shardSelector *sharding.ShardSelector) { +func logSeriesRequestToSpan(ctx context.Context, l log.Logger, minT, maxT int64, matchers, blockMatchers []*labels.Matcher, shardSelector *sharding.ShardSelector, streamingChunksBatchSize uint64) { spanLogger := spanlogger.FromContext(ctx, l) level.Debug(spanLogger).Log( "msg", "BucketStore.Series", @@ -969,6 +965,7 @@ func logSeriesRequestToSpan(ctx context.Context, l log.Logger, minT, maxT int64, "request matchers", storepb.PromMatchersToString(matchers...), "request block matchers", storepb.PromMatchersToString(blockMatchers...), "request shard selector", maybeNilShard(shardSelector).LabelValue(), + "streaming chunks batch size", streamingChunksBatchSize, ) } From 92d30a463809ef89d27c8a1e180084674d864fa9 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 4 Jul 2023 12:58:06 +0530 Subject: [PATCH 63/75] Fix integration tests Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 41 ++++++++++++++----------------------- 1 file changed, 15 insertions(+), 26 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index 753775d2873..6139a42d03b 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -249,24 +249,21 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream // thanos_store_index_cache_requests_total: ExpandedPostings: 5, Postings: 2, Series: 2 instantQueriesCount++ - // Check the in-memory index cache metrics (in the store-gateway). - + comparingFunction := e2e.Equals if streamingEnabled { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + 5 - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Greater(0), "thanos_store_index_cache_hits_total")) // Streaming uses the index cache - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+5), "thanos_memcached_operations_total")) - } - } else { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2), "thanos_memcached_operations_total")) // one set for each get - } + // Some metrics should be higher when streaming is enabled. The exact number is not deterministic in every case. + comparingFunction = e2e.Greater } + + // Check the in-memory index cache metrics (in the store-gateway). + require.NoError(t, storeGateways.WaitSumMetrics(comparingFunction(9), "thanos_store_index_cache_requests_total")) // 5 + 2 + 2 + require.NoError(t, storeGateways.WaitSumMetrics(comparingFunction(0), "thanos_store_index_cache_hits_total")) // no cache hit cause the cache was empty + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // 2 series both for postings and series cache, 2 expanded postings on one block, 3 on another one + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(comparingFunction(9*2), "thanos_memcached_operations_total")) // one set for each get } // Query back again the 1st series from storage. This time it should use the index cache. @@ -276,22 +273,14 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream assert.Equal(t, expectedVector1, result.(model.Vector)) expectedFetchedSeries++ // Storage only. - if streamingEnabled { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+5+3), "thanos_store_index_cache_requests_total")) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(4), "thanos_store_index_cache_hits_total")) - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.GreaterOrEqual(9*2+2+5), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) - } - } else { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9+2), "thanos_store_index_cache_requests_total")) - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache - if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { - require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(9*2+2), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) - } - } + require.NoError(t, storeGateways.WaitSumMetrics(comparingFunction(9+2), "thanos_store_index_cache_requests_total")) + require.NoError(t, storeGateways.WaitSumMetrics(comparingFunction(2), "thanos_store_index_cache_hits_total")) // this time has used the index cache + if testCfg.indexCacheBackend == tsdb.IndexCacheBackendInMemory { require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items")) // as before require.NoError(t, storeGateways.WaitSumMetrics(e2e.Equals(2*2+2+3), "thanos_store_index_cache_items_added_total")) // as before + } else if testCfg.indexCacheBackend == tsdb.IndexCacheBackendMemcached { + require.NoError(t, storeGateways.WaitSumMetrics(comparingFunction(9*2+2), "thanos_memcached_operations_total")) // as before + 2 gets (expanded postings and series) } // Query range. We expect 1 data point with a value of 3 (number of series). From 63c3b59a789c3f25cc1cff0a441d0106ebc488ce Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 4 Jul 2023 13:02:12 +0530 Subject: [PATCH 64/75] Add changelog entry Signed-off-by: Ganesh Vernekar --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c0ca681e91f..9c813750628 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,7 @@ * [CHANGE] Store-gateway: skip verifying index header integrity upon loading. To enable verification set `blocks_storage.bucket_store.index_header.verify_on_load: true`. * [CHANGE] Querier: change the default value of the experimental `-querier.streaming-chunks-per-ingester-buffer-size` flag to 256. #5203 * [CHANGE] Querier: only initiate query requests to ingesters in the `ACTIVE` state in the ring. #5342 +* [CHANGE] Querier: `-querier.prefer-streaming-chunks` was renamed to `-querier.prefer-streaming-chunks-for-ingesters` to enable streaming chunks from ingesters to queriers. * [FEATURE] Cardinality API: Add a new `count_method` parameter which enables counting active series #5136 * [FEATURE] Query-frontend: added experimental support to cache cardinality query responses. The cache will be used when `-query-frontend.cache-results` is enabled and `-query-frontend.results-cache-ttl-for-cardinality-query` set to a value greater than 0. The following metrics have been added to track the query results cache hit ratio per `request_type`: #5212 #5235 * `cortex_frontend_query_result_cache_requests_total{request_type="query_range|cardinality"}` @@ -39,6 +40,7 @@ * [ENHANCEMENT] Querier: add `cortex_queries_rejected_total` metric that counts the number of queries rejected due to hitting a limit (eg. max series per query or max chunks per query). #5316 * [ENHANCEMENT] Querier: add experimental `-querier.minimize-ingester-requests-hedging-delay` option to initiate requests to further ingesters when request minimisation is enabled and not all initial requests have completed. #5368 * [ENHANCEMENT] Clarify docs for `-ingester.client.*` flags to make it clear that these are used by both queriers and distributors. #5375 +* [ENHANCEMENT] Querier and Store-gateway: add experimental support for streaming chunks from store-gateways to queriers while evaluating queries. This can be enabled with `-querier.prefer-streaming-chunks-from-store-gateways=true`. #5182 * [BUGFIX] Ingester: Handle when previous ring state is leaving and the number of tokens has changed. #5204 * [BUGFIX] Querier: fix issue where queries that use the `timestamp()` function fail with `execution: attempted to read series at index 0 from stream, but the stream has already been exhausted` if streaming chunks from ingesters to queriers is enabled. #5370 * [BUGFIX] memberlist: bring back `memberlist_client_kv_store_count` metric that used to exist in Cortex, but got lost during dskit updates before Mimir 2.0. #5377 From a4044464d98832a11800d1ecce9b8981f62f832d Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 4 Jul 2023 13:28:16 +0530 Subject: [PATCH 65/75] Integration tests Signed-off-by: Ganesh Vernekar --- integration/querier_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/integration/querier_test.go b/integration/querier_test.go index 6139a42d03b..463e4071836 100644 --- a/integration/querier_test.go +++ b/integration/querier_test.go @@ -251,8 +251,8 @@ func testQuerierWithBlocksStorageRunningInMicroservicesMode(t *testing.T, stream comparingFunction := e2e.Equals if streamingEnabled { - // Some metrics should be higher when streaming is enabled. The exact number is not deterministic in every case. - comparingFunction = e2e.Greater + // Some metrics can be higher when streaming is enabled. The exact number is not deterministic in every case. + comparingFunction = e2e.GreaterOrEqual } // Check the in-memory index cache metrics (in the store-gateway). From 8903d2a143c02e3546d7ecacf51e982dba1c27f8 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 5 Jul 2023 15:39:43 +0530 Subject: [PATCH 66/75] Move and rename stream reader for store gateway Signed-off-by: Ganesh Vernekar --- pkg/querier/blocks_store_queryable.go | 6 ++--- .../streaming_readers.go} | 24 +++++++++---------- 2 files changed, 15 insertions(+), 15 deletions(-) rename pkg/{storegateway/bucket_streaming_readers.go => querier/streaming_readers.go} (87%) diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 02bb06e726f..609ba0145e8 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -708,7 +708,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor spanLog = spanlogger.FromContext(ctx, q.logger) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) reqStats = stats.FromContext(ctx) - streamReaders []*storegateway.SeriesChunksStreamReader + streamReaders []*StoreGatewayStreamReader streams []storegatewaypb.StoreGateway_SeriesClient ) @@ -831,7 +831,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } reqStats.AddFetchedIndexBytes(indexBytesFetched) - var streamReader *storegateway.SeriesChunksStreamReader + var streamReader *StoreGatewayStreamReader if len(mySeries) > 0 { chunksFetched, chunkBytes := countChunksAndBytes(mySeries...) @@ -850,7 +850,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } else if len(myStreamingSeries) > 0 { // FetchedChunks and FetchedChunkBytes are added by the SeriesChunksStreamReader. reqStats.AddFetchedSeries(uint64(len(myStreamingSeries))) - streamReader = storegateway.NewSeriesChunksStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) + streamReader = NewStoreGatewayStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) level.Debug(spanLog).Log("msg", "received streaming series from store-gateway", "instance", c.RemoteAddress(), "fetched series", len(myStreamingSeries), diff --git a/pkg/storegateway/bucket_streaming_readers.go b/pkg/querier/streaming_readers.go similarity index 87% rename from pkg/storegateway/bucket_streaming_readers.go rename to pkg/querier/streaming_readers.go index 594c2789e36..ee1f97ce141 100644 --- a/pkg/storegateway/bucket_streaming_readers.go +++ b/pkg/querier/streaming_readers.go @@ -1,6 +1,6 @@ // SPDX-License-Identifier: AGPL-3.0-only -package storegateway +package querier import ( "fmt" @@ -19,9 +19,9 @@ import ( // The code in this file is used by the queriers to read the streaming chunks from the storegateway. -// SeriesChunksStreamReader is responsible for managing the streaming of chunks from a storegateway and buffering +// StoreGatewayStreamReader is responsible for managing the streaming of chunks from a storegateway and buffering // chunks in memory until they are consumed by the PromQL engine. -type SeriesChunksStreamReader struct { +type StoreGatewayStreamReader struct { client storegatewaypb.StoreGateway_SeriesClient expectedSeriesCount int queryLimiter *limiter.QueryLimiter @@ -33,8 +33,8 @@ type SeriesChunksStreamReader struct { errorChan chan error } -func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *SeriesChunksStreamReader { - return &SeriesChunksStreamReader{ +func NewStoreGatewayStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *StoreGatewayStreamReader { + return &StoreGatewayStreamReader{ client: client, expectedSeriesCount: expectedSeriesCount, queryLimiter: queryLimiter, @@ -46,20 +46,20 @@ func NewSeriesChunksStreamReader(client storegatewaypb.StoreGateway_SeriesClient } } -// Close cleans up all resources associated with this SeriesChunksStreamReader. +// Close cleans up all resources associated with this StoreGatewayStreamReader. // This method should only be called if StartBuffering is not called. -func (s *SeriesChunksStreamReader) Close() { +func (s *StoreGatewayStreamReader) Close() { if err := s.client.CloseSend(); err != nil { level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) } } // StartBuffering begins streaming series' chunks from the storegateway associated with -// this SeriesChunksStreamReader. Once all series have been consumed with GetChunks, all resources -// associated with this SeriesChunksStreamReader are cleaned up. +// this StoreGatewayStreamReader. Once all series have been consumed with GetChunks, all resources +// associated with this StoreGatewayStreamReader are cleaned up. // If an error occurs while streaming, a subsequent call to GetChunks will return an error. -// To cancel buffering, cancel the context associated with this SeriesChunksStreamReader's storegatewaypb.StoreGateway_SeriesClient. -func (s *SeriesChunksStreamReader) StartBuffering() { +// To cancel buffering, cancel the context associated with this StoreGatewayStreamReader's storegatewaypb.StoreGateway_SeriesClient. +func (s *StoreGatewayStreamReader) StartBuffering() { ctxDone := s.client.Context().Done() go func() { @@ -138,7 +138,7 @@ func (s *SeriesChunksStreamReader) StartBuffering() { // GetChunks returns the chunks for the series with index seriesIndex. // This method must be called with monotonically increasing values of seriesIndex. -func (s *SeriesChunksStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { +func (s *StoreGatewayStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { if len(s.chunksBatch) == 0 { chks, channelOpen := <-s.seriesChunksChan From a545503cdc17fd8c3f650c67122e127bba7d588c Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 5 Jul 2023 17:01:14 +0530 Subject: [PATCH 67/75] Fix StoreGatewayStreamReader and add tests for it Signed-off-by: Ganesh Vernekar --- pkg/querier/streaming_readers.go | 16 +- pkg/querier/streaming_readers_test.go | 319 ++++++++++++++++++++++++++ 2 files changed, 329 insertions(+), 6 deletions(-) create mode 100644 pkg/querier/streaming_readers_test.go diff --git a/pkg/querier/streaming_readers.go b/pkg/querier/streaming_readers.go index ee1f97ce141..11e08596f5d 100644 --- a/pkg/querier/streaming_readers.go +++ b/pkg/querier/streaming_readers.go @@ -91,14 +91,13 @@ func (s *StoreGatewayStreamReader) StartBuffering() { return } - totalSeries++ - if totalSeries > s.expectedSeriesCount { - s.errorChan <- fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries) - return + if len(c.Series) == 0 { + continue } - if err := s.queryLimiter.AddChunks(len(c.Series)); err != nil { - s.errorChan <- validation.LimitError(err.Error()) + totalSeries += len(c.Series) + if totalSeries > s.expectedSeriesCount { + s.errorChan <- fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries) return } @@ -110,6 +109,11 @@ func (s *StoreGatewayStreamReader) StartBuffering() { chunkBytes += ch.Size() } } + + if err := s.queryLimiter.AddChunks(numChunks); err != nil { + s.errorChan <- validation.LimitError(err.Error()) + return + } if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { s.errorChan <- validation.LimitError(err.Error()) return diff --git a/pkg/querier/streaming_readers_test.go b/pkg/querier/streaming_readers_test.go new file mode 100644 index 00000000000..8fd7aa67598 --- /dev/null +++ b/pkg/querier/streaming_readers_test.go @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package querier + +import ( + "context" + "errors" + "io" + "testing" + "time" + + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "google.golang.org/grpc/metadata" + + "github.com/grafana/mimir/pkg/querier/stats" + "github.com/grafana/mimir/pkg/storegateway/storepb" + "github.com/grafana/mimir/pkg/util/limiter" + "github.com/grafana/mimir/pkg/util/test" +) + +func TestStoreGatewayStreamReader_HappyPaths(t *testing.T) { + series0 := []storepb.AggrChunk{createChunk(t, 1000, 1)} + series1 := []storepb.AggrChunk{createChunk(t, 1000, 2)} + series2 := []storepb.AggrChunk{createChunk(t, 1000, 3)} + series3 := []storepb.AggrChunk{createChunk(t, 1000, 4)} + series4 := []storepb.AggrChunk{createChunk(t, 1000, 5)} + + testCases := map[string]struct { + batches []storepb.StreamingChunksBatch + }{ + "single series per batch": { + batches: []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: series0}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 1, Chunks: series1}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 2, Chunks: series2}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 3, Chunks: series3}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 4, Chunks: series4}}}, + }, + }, + "multiple series per batch": { + batches: []storepb.StreamingChunksBatch{ + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 0, Chunks: series0}, + {SeriesIndex: 1, Chunks: series1}, + {SeriesIndex: 2, Chunks: series2}, + }, + }, + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 3, Chunks: series3}, + {SeriesIndex: 4, Chunks: series4}, + }, + }, + }, + }, + "empty batches": { + batches: []storepb.StreamingChunksBatch{ + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 0, Chunks: series0}, + {SeriesIndex: 1, Chunks: series1}, + {SeriesIndex: 2, Chunks: series2}, + }, + }, + {}, + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 3, Chunks: series3}, + {SeriesIndex: 4, Chunks: series4}, + }, + }, + {}, + }, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: testCase.batches} + reader := NewStoreGatewayStreamReader(mockClient, 5, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + for i, expected := range [][]storepb.AggrChunk{series0, series1, series2, series3, series4} { + actual, err := reader.GetChunks(uint64(i)) + require.NoError(t, err) + require.Equalf(t, expected, actual, "received unexpected chunk for series index %v", i) + } + + require.Eventually(t, func() bool { + return mockClient.closed.Load() + }, time.Second, 10*time.Millisecond) + }) + } +} + +func TestStoreGatewayStreamReader_AbortsWhenContextCancelled(t *testing.T) { + // Ensure that the buffering goroutine is not leaked after context cancellation. + test.VerifyNoLeak(t) + + // Create multiple batches to ensure that the buffering goroutine becomes blocked waiting to send further chunks to GetChunks(). + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 1, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 4.56)}}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 2, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 7.89)}}}}, + } + + ctx, cancel := context.WithCancel(context.Background()) + mockClient := &mockStoreGatewayQueryStreamClient{ctx: ctx, batches: batches} + + reader := NewStoreGatewayStreamReader(mockClient, 3, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + cancel() + reader.StartBuffering() + + for i := 0; i < 3; i++ { + _, err := reader.GetChunks(uint64(i)) + + if errors.Is(err, context.Canceled) { + break + } + + require.NoError(t, err) + + if i == 2 { + require.Fail(t, "expected GetChunks to report context cancellation error before reaching end of stream") + } + } + + require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after context cancelled") +} + +func TestStoreGatewayStreamReader_ReadingSeriesOutOfOrder(t *testing.T) { + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(1) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has series with index 0") +} + +func TestStoreGatewayStreamReader_ReadingMoreSeriesThanAvailable(t *testing.T) { + firstSeries := []storepb.AggrChunk{createChunk(t, 1000, 1.23)} + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: firstSeries}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(0) + require.NoError(t, err) + require.Equal(t, s, firstSeries) + + s, err = reader.GetChunks(1) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has already been exhausted") +} + +func TestStoreGatewayStreamReader_ReceivedFewerSeriesThanExpected(t *testing.T) { + firstSeries := []storepb.AggrChunk{createChunk(t, 1000, 1.23)} + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: firstSeries}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := NewStoreGatewayStreamReader(mockClient, 3, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(0) + require.NoError(t, err) + require.Equal(t, s, firstSeries) + + s, err = reader.GetChunks(1) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has failed: expected to receive 3 series, but got EOF after receiving 1 series") + + require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after failure") +} + +func TestStoreGatewayStreamReader_ReceivedMoreSeriesThanExpected(t *testing.T) { + batches := []storepb.StreamingChunksBatch{ + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, + {SeriesIndex: 1, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, + {SeriesIndex: 2, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, + }, + }, + } + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(0) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 0 from stream, but the stream has failed: expected to receive only 1 series, but received at least 3 series") + + require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after receiving more series than expected") +} + +func TestStoreGatewayStreamReader_ChunksLimits(t *testing.T) { + testCases := map[string]struct { + maxChunks int + maxChunkBytes int + expectedError string + }{ + "query under both limits": { + maxChunks: 4, + maxChunkBytes: 200, + expectedError: "", + }, + "query selects too many chunks": { + maxChunks: 2, + maxChunkBytes: 200, + expectedError: "the query exceeded the maximum number of chunks (limit: 2 chunks) (err-mimir-max-chunks-per-query). Consider reducing the time range and/or number of series selected by the query. One way to reduce the number of selected series is to add more label matchers to the query. Otherwise, to adjust the related per-tenant limit, configure -querier.max-fetched-chunks-per-query, or contact your service administrator.", + }, + "query selects too many chunk bytes": { + maxChunks: 4, + maxChunkBytes: 50, + expectedError: "the query exceeded the aggregated chunks size limit (limit: 50 bytes) (err-mimir-max-chunks-bytes-per-query). Consider reducing the time range and/or number of series selected by the query. One way to reduce the number of selected series is to add more label matchers to the query. Otherwise, to adjust the related per-tenant limit, configure -querier.max-fetched-chunk-bytes-per-query, or contact your service administrator.", + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{ + createChunk(t, 1000, 1.23), + createChunk(t, 1100, 1.23), + createChunk(t, 1200, 1.23), + }}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + queryMetrics := stats.NewQueryMetrics(prometheus.NewPedanticRegistry()) + reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, testCase.maxChunkBytes, testCase.maxChunks, queryMetrics), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + _, err := reader.GetChunks(0) + + if testCase.expectedError == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, testCase.expectedError) + } + + require.Eventually(t, mockClient.closed.Load, time.Second, 10*time.Millisecond, "expected gRPC client to be closed") + }) + } +} + +func createChunk(t *testing.T, time int64, value float64) storepb.AggrChunk { + promChunk := chunkenc.NewXORChunk() + app, err := promChunk.Appender() + require.NoError(t, err) + + app.Append(time, value) + + return storepb.AggrChunk{ + MinTime: time, + MaxTime: time, + Raw: &storepb.Chunk{ + Type: storepb.Chunk_XOR, + Data: promChunk.Bytes(), + }, + } +} + +type mockStoreGatewayQueryStreamClient struct { + ctx context.Context + batches []storepb.StreamingChunksBatch + closed atomic.Bool +} + +func (m *mockStoreGatewayQueryStreamClient) Recv() (*storepb.SeriesResponse, error) { + if len(m.batches) == 0 { + return nil, io.EOF + } + + batch := m.batches[0] + m.batches = m.batches[1:] + + return storepb.NewStreamingChunksResponse(&batch), nil +} + +func (m *mockStoreGatewayQueryStreamClient) Header() (metadata.MD, error) { + panic("not supported on mock") +} + +func (m *mockStoreGatewayQueryStreamClient) Trailer() metadata.MD { + panic("not supported on mock") +} + +func (m *mockStoreGatewayQueryStreamClient) CloseSend() error { + m.closed.Store(true) + return nil +} + +func (m *mockStoreGatewayQueryStreamClient) Context() context.Context { + return m.ctx +} + +func (m *mockStoreGatewayQueryStreamClient) SendMsg(interface{}) error { + panic("not supported on mock") +} + +func (m *mockStoreGatewayQueryStreamClient) RecvMsg(interface{}) error { + panic("not supported on mock") +} From e78fc3d742443b5f05d600cb18b49d728681dc08 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 5 Jul 2023 17:42:45 +0530 Subject: [PATCH 68/75] Fix comments Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 25 +++++++++---------------- pkg/storegateway/bucket_index_reader.go | 4 ++-- pkg/storegateway/series_refs.go | 15 ++++++++++++++- pkg/storegateway/storepb/rpc.proto | 3 +++ 4 files changed, 28 insertions(+), 19 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index e63bbe783a1..27c6c9ee4cf 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -1983,7 +1983,7 @@ type symbolizedLabel struct { // decodeSeries decodes a series entry from the given byte slice decoding all chunk metas of the series. // If skipChunks is specified decodeSeries does not return any chunks, but only labels and only if at least single chunk is within time range. // decodeSeries returns false, when there are no series data for given time range. -func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]chunks.Meta, resMint, resMaxt int64, strategy seriesIteratorStrategy) (ok bool, lset []symbolizedLabel, err error) { +func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]chunks.Meta, strategy seriesIteratorStrategy) (ok bool, lset []symbolizedLabel, err error) { *chks = (*chks)[:0] @@ -2009,8 +2009,7 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch // Similar for first ref. ref := int64(d.Uvarint64()) - isNoChunks := strategy.isNoChunkRefs() - isNoChunkOverlapMintMaxt := strategy.isNoChunkRefsAndOverlapMintMaxt() + isNoChunks := strategy.isNoChunkRefsOnEntireBlock() for i := 0; i < k; i++ { if i > 0 { mint += int64(d.Uvarint64()) @@ -2019,23 +2018,17 @@ func decodeSeries(b []byte, lsetPool *pool.SlabPool[symbolizedLabel], chks *[]ch } // Found a chunk. - if isNoChunkOverlapMintMaxt { - // We are not interested in chunks, but we want the series to overlap with the query mint-maxt. - if maxt >= resMint && mint <= resMaxt { - // Chunk overlaps. - return true, lset, nil - } - } else if isNoChunks { + if isNoChunks { // We are not interested in chunks and we know there is at least one, that's enough to return series. return true, lset, nil - } else { - *chks = append(*chks, chunks.Meta{ - Ref: chunks.ChunkRef(ref), - MinTime: mint, - MaxTime: maxt, - }) } + *chks = append(*chks, chunks.Meta{ + Ref: chunks.ChunkRef(ref), + MinTime: mint, + MaxTime: maxt, + }) + mint = maxt } return len(*chks) > 0, lset, d.Err() diff --git a/pkg/storegateway/bucket_index_reader.go b/pkg/storegateway/bucket_index_reader.go index e0d6adb7d07..a8cc3daccae 100644 --- a/pkg/storegateway/bucket_index_reader.go +++ b/pkg/storegateway/bucket_index_reader.go @@ -763,12 +763,12 @@ func (l *bucketIndexLoadedSeries) addSeries(ref storage.SeriesRef, data []byte) // Error is returned on decoding error or if the reference does not resolve to a known series. // // It's NOT safe to call this function concurrently with addSeries(). -func (l *bucketIndexLoadedSeries) unsafeLoadSeries(ref storage.SeriesRef, chks *[]chunks.Meta, mint, maxt int64, strategy seriesIteratorStrategy, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) (ok bool, _ []symbolizedLabel, err error) { +func (l *bucketIndexLoadedSeries) unsafeLoadSeries(ref storage.SeriesRef, chks *[]chunks.Meta, strategy seriesIteratorStrategy, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) (ok bool, _ []symbolizedLabel, err error) { b, ok := l.series[ref] if !ok { return false, nil, errors.Errorf("series %d not found", ref) } stats.seriesProcessed++ stats.seriesProcessedSizeSum += len(b) - return decodeSeries(b, lsetPool, chks, mint, maxt, strategy) + return decodeSeries(b, lsetPool, chks, strategy) } diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index 818a3ea2795..f5d7e2c0f4e 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -987,12 +987,25 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "preload series") } + isNoChunkRefsAndOverlapMintMaxt := s.strategy.isNoChunkRefsAndOverlapMintMaxt() for _, id := range postings { var ( metas []chunks.Meta series symbolizedSeriesChunkRefs ) series.lset, metas, err = s.loadSeries(id, loadedSeries, stats, symbolizedSet.labelsPool) + if isNoChunkRefsAndOverlapMintMaxt { + overlaps := false + for _, m := range metas { + if m.MaxTime >= s.minTime && m.MinTime <= s.maxTime { + overlaps = true + } + } + if !overlaps { + continue + } + } + if err != nil { return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "read series") } @@ -1189,7 +1202,7 @@ func (s *loadingSeriesChunkRefsSetIterator) Err() error { // loadSeries returns a for chunks. It is not safe to use the returned []chunks.Meta after calling loadSeries again func (s *loadingSeriesChunkRefsSetIterator) loadSeries(ref storage.SeriesRef, loadedSeries *bucketIndexLoadedSeries, stats *queryStats, lsetPool *pool.SlabPool[symbolizedLabel]) ([]symbolizedLabel, []chunks.Meta, error) { - ok, lbls, err := loadedSeries.unsafeLoadSeries(ref, &s.chunkMetasBuffer, s.minTime, s.maxTime, s.strategy, stats, lsetPool) + ok, lbls, err := loadedSeries.unsafeLoadSeries(ref, &s.chunkMetasBuffer, s.strategy, stats, lsetPool) if !ok || err != nil { return nil, nil, errors.Wrap(err, "loadSeries") } diff --git a/pkg/storegateway/storepb/rpc.proto b/pkg/storegateway/storepb/rpc.proto index 3f20cb2adb1..dc6c9cd09b1 100644 --- a/pkg/storegateway/storepb/rpc.proto +++ b/pkg/storegateway/storepb/rpc.proto @@ -121,11 +121,14 @@ message SeriesResponse { /// streaming_series is a list of series labels sent as part of a streaming Series call. /// These are populated only when streaming_chunks_batch_size > 0 in the series request. + /// Series are sent in batches because sending one at a time has additional CPU overhead for not much memory gains. StreamingSeriesBatch streaming_series = 5; /// streaming_chunks is a list of chunks sent as part of a streaming Series request. /// They are associated with series labels sent as streaming_series earlier in the same Series request. /// These are populated only when streaming_chunks_batch_size > 0 in the series request. + /// Chunks are sent in batches because sending one series' chunks at a time has additional + // CPU overhead for not much memory gains. StreamingChunksBatch streaming_chunks = 6; } } From 2c5ee20e00318eb0efd6fbf2befaf88423d5d285 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 5 Jul 2023 20:11:30 +0530 Subject: [PATCH 69/75] Fix review comments part 1 Signed-off-by: Ganesh Vernekar --- CHANGELOG.md | 2 +- pkg/storegateway/bucket.go | 34 ++++++++++++----------- pkg/storegateway/series_refs.go | 26 +++++++++++------- pkg/storegateway/series_refs_test.go | 40 ++++++++++++++++++++++++++++ 4 files changed, 75 insertions(+), 27 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a7147c68496..bbbca22cc77 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ * [CHANGE] Store-gateway: skip verifying index header integrity upon loading. To enable verification set `blocks_storage.bucket_store.index_header.verify_on_load: true`. * [CHANGE] Querier: change the default value of the experimental `-querier.streaming-chunks-per-ingester-buffer-size` flag to 256. #5203 * [CHANGE] Querier: only initiate query requests to ingesters in the `ACTIVE` state in the ring. #5342 -* [CHANGE] Querier: `-querier.prefer-streaming-chunks` was renamed to `-querier.prefer-streaming-chunks-for-ingesters` to enable streaming chunks from ingesters to queriers. +* [CHANGE] Querier: Renamed `-querier.prefer-streaming-chunks` to `-querier.prefer-streaming-chunks-for-ingesters` to enable streaming chunks from ingesters to queriers. #5182 * [CHANGE] Querier: `-query-frontend.cache-unaligned-requests` has been moved from a global flag to a per-tenant override. #5312 * [CHANGE] Ingester: removed `cortex_ingester_shipper_dir_syncs_total` and `cortex_ingester_shipper_dir_sync_failures_total` metrics. The former metric was not much useful, and the latter was never incremented. #5396 * [FEATURE] Cardinality API: Add a new `count_method` parameter which enables counting active series #5136 diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 27c6c9ee4cf..cb4e3152ba6 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -620,16 +620,18 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie readers = newChunkReaders(chunkReaders) } - // If we are streaming the series labels and chunks separately, we don't need to fetch the postings - // twice. So we use these slices to re-use them. - // Each reusePostings[i] and reusePendingMatchers[i] corresponds to a single block. var ( + // If we are streaming the series labels and chunks separately, we don't need to fetch the postings + // twice. So we use these slices to re-use them. Each reuse[i] corresponds to a single block. reuse []*reusedPostingsAndMatchers resHints = &hintspb.SeriesResponseHints{} ) for _, b := range blocks { resHints.AddQueriedBlock(b.meta.ULID) } + if err := s.sendHints(srv, resHints); err != nil { + return err + } if req.StreamingChunksBatchSize > 0 { var ( seriesSet storepb.SeriesSet @@ -643,7 +645,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - numSeries, err := s.sendStreamingSeriesLabelsHintsStats(req, srv, stats, seriesSet, resHints) + numSeries, err := s.sendStreamingSeriesLabelsAndStats(req, srv, stats, seriesSet, resHints) if err != nil { return err } @@ -653,7 +655,6 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), "request matchers", storepb.PromMatchersToString(matchers...), "request shard selector", maybeNilShard(shardSelector).LabelValue(), - "streaming chunks batch size", req.StreamingChunksBatchSize, "num_series", numSeries, "duration", time.Since(seriesLoadStart), ) @@ -700,24 +701,23 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), "request matchers", storepb.PromMatchersToString(matchers...), "request shard selector", maybeNilShard(shardSelector).LabelValue(), - "streaming chunks batch size", req.StreamingChunksBatchSize, "num_series", numSeries, "num_chunks", numChunks, "duration", time.Since(start), ) - if req.StreamingChunksBatchSize == 0 || req.SkipChunks { - // Hints and stats were not sent before, so send it now. - return s.sendHintsAndStats(srv, resHints, stats) + if req.StreamingChunksBatchSize == 0 { + // Stats were not sent before, so send it now. + return s.sendStats(srv, stats) } return nil } -// sendStreamingSeriesLabelsHintsStats sends the labels of the streaming series. +// sendStreamingSeriesLabelsAndStats sends the labels of the streaming series. // Since hints and stats need to be sent before the "end of stream" streaming series message, // this function also sends the hints and the stats. -func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( +func (s *BucketStore) sendStreamingSeriesLabelsAndStats( req *storepb.SeriesRequest, srv storepb.Store_SeriesServer, stats *safeQueryStats, @@ -772,9 +772,8 @@ func (s *BucketStore) sendStreamingSeriesLabelsHintsStats( return 0, errors.Wrap(seriesSet.Err(), "expand series set") } - // We need to send hints and stats before sending the chunks. - // Also, these need to be sent before we send IsEndOfSeriesStream=true. - if err := s.sendHintsAndStats(srv, resHints, stats); err != nil { + // We need to send stats before sending IsEndOfSeriesStream=true. + if err := s.sendStats(srv, stats); err != nil { return 0, err } @@ -937,7 +936,7 @@ func (s *BucketStore) sendMessage(typ string, srv storepb.Store_SeriesServer, ms return nil } -func (s *BucketStore) sendHintsAndStats(srv storepb.Store_SeriesServer, resHints *hintspb.SeriesResponseHints, stats *safeQueryStats) error { +func (s *BucketStore) sendHints(srv storepb.Store_SeriesServer, resHints *hintspb.SeriesResponseHints) error { var anyHints *types.Any var err error if anyHints, err = types.MarshalAny(resHints); err != nil { @@ -948,11 +947,14 @@ func (s *BucketStore) sendHintsAndStats(srv storepb.Store_SeriesServer, resHints return status.Error(codes.Unknown, errors.Wrap(err, "send series response hints").Error()) } + return nil +} + +func (s *BucketStore) sendStats(srv storepb.Store_SeriesServer, stats *safeQueryStats) error { unsafeStats := stats.export() if err := srv.Send(storepb.NewStatsResponse(unsafeStats.postingsTouchedSizeSum + unsafeStats.seriesProcessedSizeSum)); err != nil { return status.Error(codes.Unknown, errors.Wrap(err, "sends series response stats").Error()) } - return nil } diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index f5d7e2c0f4e..f4d49504594 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -746,33 +746,33 @@ func openBlockSeriesChunkRefsSetsIterator( chunkRangesPerSeries int, stats *safeQueryStats, reuse *reusedPostingsAndMatchers, // If this is not nil, these posting and matchers are used as it is without fetching new ones. - logger log.Logger, ) (seriesChunkRefsSetIterator, error) { if batchSize <= 0 { return nil, errors.New("set size must be a positive number") } - var ps []storage.SeriesRef - var pendingMatchers []*labels.Matcher + var ( + ps []storage.SeriesRef + pendingMatchers []*labels.Matcher + fetchPostings = true + ) if reuse != nil { + fetchPostings = !reuse.isSet() ps = reuse.ps pendingMatchers = reuse.matchers } - if len(ps) == 0 { + if fetchPostings { var err error ps, pendingMatchers, err = indexr.ExpandedPostings(ctx, matchers, stats) if err != nil { return nil, errors.Wrap(err, "expanded matching postings") } if reuse != nil { - reuse.put(ps, pendingMatchers) + reuse.set(ps, pendingMatchers) } } - returnPs := make([]storage.SeriesRef, len(ps)) - copy(returnPs, ps) - var iterator seriesChunkRefsSetIterator iterator = newLoadingSeriesChunkRefsSetIterator( ctx, @@ -803,10 +803,11 @@ func openBlockSeriesChunkRefsSetsIterator( type reusedPostingsAndMatchers struct { ps []storage.SeriesRef matchers []*labels.Matcher + filled bool } -func (p *reusedPostingsAndMatchers) put(ps []storage.SeriesRef, matchers []*labels.Matcher) { - if len(p.ps) > 0 { +func (p *reusedPostingsAndMatchers) set(ps []storage.SeriesRef, matchers []*labels.Matcher) { + if p.filled { // We already have something here. return } @@ -814,6 +815,11 @@ func (p *reusedPostingsAndMatchers) put(ps []storage.SeriesRef, matchers []*labe p.ps = make([]storage.SeriesRef, len(ps)) copy(p.ps, ps) p.matchers = matchers + p.filled = true +} + +func (p *reusedPostingsAndMatchers) isSet() bool { + return p.filled } // seriesStreamingFetchRefsDurationIterator tracks the time spent loading series and chunk refs. diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 8227d3a538e..9aaa7405b7f 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -2564,6 +2564,46 @@ func TestPostingsSetsIterator(t *testing.T) { } } +func TestReusedPostingsAndMatchers(t *testing.T) { + postingsList := [][]storage.SeriesRef{ + nil, + {}, + {1, 2, 3}, + } + matchersList := [][]*labels.Matcher{ + nil, + {}, + {labels.MustNewMatcher(labels.MatchEqual, "a", "b")}, + } + + for _, firstPostings := range postingsList { + for _, firstMatchers := range matchersList { + for _, secondPostings := range postingsList { + for _, secondMatchers := range matchersList { + r := reusedPostingsAndMatchers{} + require.False(t, r.isSet()) + + verify := func() { + r.set(firstPostings, firstMatchers) + require.True(t, r.isSet()) + if firstPostings == nil { + require.Equal(t, []storage.SeriesRef{}, r.ps) + } else { + require.Equal(t, firstPostings, r.ps) + } + require.Equal(t, firstMatchers, r.matchers) + } + verify() + + // This should not overwrite the first set. + r.set(secondPostings, secondMatchers) + verify() + } + } + } + } +} + type mockSeriesHasher struct { cached map[storage.SeriesRef]uint64 hashes map[string]uint64 From 3002e8381ca6c7a4682f19ceba99b144321f4d81 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Wed, 5 Jul 2023 20:57:36 +0530 Subject: [PATCH 70/75] Fix review comments part 2 Signed-off-by: Ganesh Vernekar --- pkg/querier/block_streaming.go | 168 +++++++++++ pkg/querier/block_streaming_test.go | 307 ++++++++++++++++++++ pkg/querier/blocks_store_queryable.go | 6 +- pkg/querier/streaming_readers.go | 180 ------------ pkg/querier/streaming_readers_test.go | 319 --------------------- pkg/storegateway/bucket.go | 7 +- pkg/storegateway/bucket_e2e_test.go | 25 +- pkg/storegateway/chunkscache/cache.go | 45 --- pkg/storegateway/chunkscache/cache_test.go | 48 +++- pkg/storegateway/series_chunks.go | 2 +- pkg/storegateway/series_refs.go | 8 +- pkg/storegateway/series_refs_test.go | 88 +++--- pkg/storegateway/stats.go | 2 +- 13 files changed, 581 insertions(+), 624 deletions(-) delete mode 100644 pkg/querier/streaming_readers.go delete mode 100644 pkg/querier/streaming_readers_test.go diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index ed53e9e3970..ab8a8d339ce 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -3,16 +3,24 @@ package querier import ( + "fmt" + "io" "sort" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "github.com/pkg/errors" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storage/series" + "github.com/grafana/mimir/pkg/storegateway/storegatewaypb" "github.com/grafana/mimir/pkg/storegateway/storepb" + "github.com/grafana/mimir/pkg/util/limiter" + "github.com/grafana/mimir/pkg/util/validation" ) // Implementation of storage.SeriesSet, based on individual responses from store client. @@ -110,3 +118,163 @@ func (bqs *blockStreamingQuerierSeries) Iterator(reuse chunkenc.Iterator) chunke return it } + +// storeGatewayStreamReader is responsible for managing the streaming of chunks from a storegateway and buffering +// chunks in memory until they are consumed by the PromQL engine. +type storeGatewayStreamReader struct { + client storegatewaypb.StoreGateway_SeriesClient + expectedSeriesCount int + queryLimiter *limiter.QueryLimiter + stats *stats.Stats + log log.Logger + + seriesChunksChan chan *storepb.StreamingChunksBatch + chunksBatch []*storepb.StreamingChunks + errorChan chan error +} + +func newStoreGatewayStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *storeGatewayStreamReader { + return &storeGatewayStreamReader{ + client: client, + expectedSeriesCount: expectedSeriesCount, + queryLimiter: queryLimiter, + stats: stats, + log: log, + seriesChunksChan: make(chan *storepb.StreamingChunksBatch, 1), + // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. + errorChan: make(chan error, 1), + } +} + +// Close cleans up all resources associated with this storeGatewayStreamReader. +// This method should only be called if StartBuffering is not called. +func (s *storeGatewayStreamReader) Close() { + if err := s.client.CloseSend(); err != nil { + level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) + } +} + +// StartBuffering begins streaming series' chunks from the storegateway associated with +// this storeGatewayStreamReader. Once all series have been consumed with GetChunks, all resources +// associated with this storeGatewayStreamReader are cleaned up. +// If an error occurs while streaming, a subsequent call to GetChunks will return an error. +// To cancel buffering, cancel the context associated with this storeGatewayStreamReader's storegatewaypb.StoreGateway_SeriesClient. +func (s *storeGatewayStreamReader) StartBuffering() { + ctxDone := s.client.Context().Done() + + go func() { + defer func() { + s.Close() + close(s.seriesChunksChan) + close(s.errorChan) + }() + + totalSeries := 0 + + for { + msg, err := s.client.Recv() + if err != nil { + if errors.Is(err, io.EOF) { + if totalSeries < s.expectedSeriesCount { + s.errorChan <- fmt.Errorf("expected to receive %v series, but got EOF after receiving %v series", s.expectedSeriesCount, totalSeries) + } + } else { + s.errorChan <- err + } + + return + } + + c := msg.GetStreamingChunks() + if c == nil { + s.errorChan <- fmt.Errorf("expected to receive StreamingSeriesChunks, but got something else") + return + } + + if len(c.Series) == 0 { + continue + } + + totalSeries += len(c.Series) + if totalSeries > s.expectedSeriesCount { + s.errorChan <- fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries) + return + } + + chunkBytes := 0 + numChunks := 0 + for _, s := range c.Series { + numChunks += len(s.Chunks) + for _, ch := range s.Chunks { + chunkBytes += ch.Size() + } + } + + if err := s.queryLimiter.AddChunks(numChunks); err != nil { + s.errorChan <- validation.LimitError(err.Error()) + return + } + if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { + s.errorChan <- validation.LimitError(err.Error()) + return + } + + s.stats.AddFetchedChunks(uint64(numChunks)) + s.stats.AddFetchedChunkBytes(uint64(chunkBytes)) + + select { + case <-ctxDone: + // Why do we abort if the context is done? + // We want to make sure that this goroutine is never leaked. + // This goroutine could be leaked if nothing is reading from the buffer, but this method is still trying to send + // more series to a full buffer: it would block forever. + // So, here, we try to send the series to the buffer if we can, but if the context is cancelled, then we give up. + // This only works correctly if the context is cancelled when the query request is complete or cancelled, + // which is true at the time of writing. + s.errorChan <- s.client.Context().Err() + return + case s.seriesChunksChan <- c: + // Batch enqueued successfully, nothing else to do for this batch. + } + } + }() +} + +// GetChunks returns the chunks for the series with index seriesIndex. +// This method must be called with monotonically increasing values of seriesIndex. +func (s *storeGatewayStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { + if len(s.chunksBatch) == 0 { + chks, channelOpen := <-s.seriesChunksChan + + if !channelOpen { + // If there's an error, report it. + select { + case err, haveError := <-s.errorChan: + if haveError { + if _, ok := err.(validation.LimitError); ok { + return nil, err + } + return nil, errors.Wrapf(err, "attempted to read series at index %v from stream, but the stream has failed", seriesIndex) + } + default: + } + + return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has already been exhausted", seriesIndex) + } + + s.chunksBatch = chks.Series + } + + chks := s.chunksBatch[0] + if len(s.chunksBatch) > 1 { + s.chunksBatch = s.chunksBatch[1:] + } else { + s.chunksBatch = nil + } + + if chks.SeriesIndex != seriesIndex { + return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has series with index %v", seriesIndex, chks.SeriesIndex) + } + + return chks.Chunks, nil +} diff --git a/pkg/querier/block_streaming_test.go b/pkg/querier/block_streaming_test.go index 2e47823e1f2..1a87f8a220b 100644 --- a/pkg/querier/block_streaming_test.go +++ b/pkg/querier/block_streaming_test.go @@ -3,16 +3,27 @@ package querier import ( + "context" + "errors" "fmt" + "io" "math" "testing" + "time" + "github.com/go-kit/log" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/model/labels" "github.com/prometheus/prometheus/tsdb/chunkenc" "github.com/stretchr/testify/require" + "go.uber.org/atomic" + "google.golang.org/grpc/metadata" "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/querier/stats" "github.com/grafana/mimir/pkg/storegateway/storepb" + "github.com/grafana/mimir/pkg/util/limiter" + "github.com/grafana/mimir/pkg/util/test" ) func TestBlockStreamingQuerierSeriesSet(t *testing.T) { @@ -238,3 +249,299 @@ func (m *mockChunkStreamer) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, Raw: &storepb.Chunk{Data: chk.Bytes()}, }}, nil } + +func TestStoreGatewayStreamReader_HappyPaths(t *testing.T) { + series0 := []storepb.AggrChunk{createChunk(t, 1000, 1)} + series1 := []storepb.AggrChunk{createChunk(t, 1000, 2)} + series2 := []storepb.AggrChunk{createChunk(t, 1000, 3)} + series3 := []storepb.AggrChunk{createChunk(t, 1000, 4)} + series4 := []storepb.AggrChunk{createChunk(t, 1000, 5)} + + testCases := map[string]struct { + batches []storepb.StreamingChunksBatch + }{ + "single series per batch": { + batches: []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: series0}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 1, Chunks: series1}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 2, Chunks: series2}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 3, Chunks: series3}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 4, Chunks: series4}}}, + }, + }, + "multiple series per batch": { + batches: []storepb.StreamingChunksBatch{ + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 0, Chunks: series0}, + {SeriesIndex: 1, Chunks: series1}, + {SeriesIndex: 2, Chunks: series2}, + }, + }, + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 3, Chunks: series3}, + {SeriesIndex: 4, Chunks: series4}, + }, + }, + }, + }, + "empty batches": { + batches: []storepb.StreamingChunksBatch{ + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 0, Chunks: series0}, + {SeriesIndex: 1, Chunks: series1}, + {SeriesIndex: 2, Chunks: series2}, + }, + }, + {}, + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 3, Chunks: series3}, + {SeriesIndex: 4, Chunks: series4}, + }, + }, + {}, + }, + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: testCase.batches} + reader := newStoreGatewayStreamReader(mockClient, 5, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + for i, expected := range [][]storepb.AggrChunk{series0, series1, series2, series3, series4} { + actual, err := reader.GetChunks(uint64(i)) + require.NoError(t, err) + require.Equalf(t, expected, actual, "received unexpected chunk for series index %v", i) + } + + require.Eventually(t, func() bool { + return mockClient.closed.Load() + }, time.Second, 10*time.Millisecond) + }) + } +} + +func TestStoreGatewayStreamReader_AbortsWhenContextCancelled(t *testing.T) { + // Ensure that the buffering goroutine is not leaked after context cancellation. + test.VerifyNoLeak(t) + + // Create multiple batches to ensure that the buffering goroutine becomes blocked waiting to send further chunks to GetChunks(). + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 1, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 4.56)}}}}, + {Series: []*storepb.StreamingChunks{{SeriesIndex: 2, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 7.89)}}}}, + } + + ctx, cancel := context.WithCancel(context.Background()) + mockClient := &mockStoreGatewayQueryStreamClient{ctx: ctx, batches: batches} + + reader := newStoreGatewayStreamReader(mockClient, 3, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + cancel() + reader.StartBuffering() + + for i := 0; i < 3; i++ { + _, err := reader.GetChunks(uint64(i)) + + if errors.Is(err, context.Canceled) { + break + } + + require.NoError(t, err) + + if i == 2 { + require.Fail(t, "expected GetChunks to report context cancellation error before reaching end of stream") + } + } + + require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after context cancelled") +} + +func TestStoreGatewayStreamReader_ReadingSeriesOutOfOrder(t *testing.T) { + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := newStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(1) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has series with index 0") +} + +func TestStoreGatewayStreamReader_ReadingMoreSeriesThanAvailable(t *testing.T) { + firstSeries := []storepb.AggrChunk{createChunk(t, 1000, 1.23)} + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: firstSeries}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := newStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(0) + require.NoError(t, err) + require.Equal(t, s, firstSeries) + + s, err = reader.GetChunks(1) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has already been exhausted") +} + +func TestStoreGatewayStreamReader_ReceivedFewerSeriesThanExpected(t *testing.T) { + firstSeries := []storepb.AggrChunk{createChunk(t, 1000, 1.23)} + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: firstSeries}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := newStoreGatewayStreamReader(mockClient, 3, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(0) + require.NoError(t, err) + require.Equal(t, s, firstSeries) + + s, err = reader.GetChunks(1) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has failed: expected to receive 3 series, but got EOF after receiving 1 series") + + require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after failure") +} + +func TestStoreGatewayStreamReader_ReceivedMoreSeriesThanExpected(t *testing.T) { + batches := []storepb.StreamingChunksBatch{ + { + Series: []*storepb.StreamingChunks{ + {SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, + {SeriesIndex: 1, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, + {SeriesIndex: 2, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, + }, + }, + } + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + reader := newStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + s, err := reader.GetChunks(0) + require.Nil(t, s) + require.EqualError(t, err, "attempted to read series at index 0 from stream, but the stream has failed: expected to receive only 1 series, but received at least 3 series") + + require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after receiving more series than expected") +} + +func TestStoreGatewayStreamReader_ChunksLimits(t *testing.T) { + testCases := map[string]struct { + maxChunks int + maxChunkBytes int + expectedError string + }{ + "query under both limits": { + maxChunks: 4, + maxChunkBytes: 200, + expectedError: "", + }, + "query selects too many chunks": { + maxChunks: 2, + maxChunkBytes: 200, + expectedError: "the query exceeded the maximum number of chunks (limit: 2 chunks) (err-mimir-max-chunks-per-query). Consider reducing the time range and/or number of series selected by the query. One way to reduce the number of selected series is to add more label matchers to the query. Otherwise, to adjust the related per-tenant limit, configure -querier.max-fetched-chunks-per-query, or contact your service administrator.", + }, + "query selects too many chunk bytes": { + maxChunks: 4, + maxChunkBytes: 50, + expectedError: "the query exceeded the aggregated chunks size limit (limit: 50 bytes) (err-mimir-max-chunks-bytes-per-query). Consider reducing the time range and/or number of series selected by the query. One way to reduce the number of selected series is to add more label matchers to the query. Otherwise, to adjust the related per-tenant limit, configure -querier.max-fetched-chunk-bytes-per-query, or contact your service administrator.", + }, + } + + for name, testCase := range testCases { + t.Run(name, func(t *testing.T) { + batches := []storepb.StreamingChunksBatch{ + {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{ + createChunk(t, 1000, 1.23), + createChunk(t, 1100, 1.23), + createChunk(t, 1200, 1.23), + }}}}, + } + + mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} + queryMetrics := stats.NewQueryMetrics(prometheus.NewPedanticRegistry()) + reader := newStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, testCase.maxChunkBytes, testCase.maxChunks, queryMetrics), &stats.Stats{}, log.NewNopLogger()) + reader.StartBuffering() + + _, err := reader.GetChunks(0) + + if testCase.expectedError == "" { + require.NoError(t, err) + } else { + require.EqualError(t, err, testCase.expectedError) + } + + require.Eventually(t, mockClient.closed.Load, time.Second, 10*time.Millisecond, "expected gRPC client to be closed") + }) + } +} + +func createChunk(t *testing.T, time int64, value float64) storepb.AggrChunk { + promChunk := chunkenc.NewXORChunk() + app, err := promChunk.Appender() + require.NoError(t, err) + + app.Append(time, value) + + return storepb.AggrChunk{ + MinTime: time, + MaxTime: time, + Raw: &storepb.Chunk{ + Type: storepb.Chunk_XOR, + Data: promChunk.Bytes(), + }, + } +} + +type mockStoreGatewayQueryStreamClient struct { + ctx context.Context + batches []storepb.StreamingChunksBatch + closed atomic.Bool +} + +func (m *mockStoreGatewayQueryStreamClient) Recv() (*storepb.SeriesResponse, error) { + if len(m.batches) == 0 { + return nil, io.EOF + } + + batch := m.batches[0] + m.batches = m.batches[1:] + + return storepb.NewStreamingChunksResponse(&batch), nil +} + +func (m *mockStoreGatewayQueryStreamClient) Header() (metadata.MD, error) { + panic("not supported on mock") +} + +func (m *mockStoreGatewayQueryStreamClient) Trailer() metadata.MD { + panic("not supported on mock") +} + +func (m *mockStoreGatewayQueryStreamClient) CloseSend() error { + m.closed.Store(true) + return nil +} + +func (m *mockStoreGatewayQueryStreamClient) Context() context.Context { + return m.ctx +} + +func (m *mockStoreGatewayQueryStreamClient) SendMsg(interface{}) error { + panic("not supported on mock") +} + +func (m *mockStoreGatewayQueryStreamClient) RecvMsg(interface{}) error { + panic("not supported on mock") +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 609ba0145e8..99dee4b2891 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -708,7 +708,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor spanLog = spanlogger.FromContext(ctx, q.logger) queryLimiter = limiter.QueryLimiterFromContextWithFallback(ctx) reqStats = stats.FromContext(ctx) - streamReaders []*StoreGatewayStreamReader + streamReaders []*storeGatewayStreamReader streams []storegatewaypb.StoreGateway_SeriesClient ) @@ -831,7 +831,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } reqStats.AddFetchedIndexBytes(indexBytesFetched) - var streamReader *StoreGatewayStreamReader + var streamReader *storeGatewayStreamReader if len(mySeries) > 0 { chunksFetched, chunkBytes := countChunksAndBytes(mySeries...) @@ -850,7 +850,7 @@ func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *stor } else if len(myStreamingSeries) > 0 { // FetchedChunks and FetchedChunkBytes are added by the SeriesChunksStreamReader. reqStats.AddFetchedSeries(uint64(len(myStreamingSeries))) - streamReader = NewStoreGatewayStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) + streamReader = newStoreGatewayStreamReader(stream, len(myStreamingSeries), queryLimiter, reqStats, q.logger) level.Debug(spanLog).Log("msg", "received streaming series from store-gateway", "instance", c.RemoteAddress(), "fetched series", len(myStreamingSeries), diff --git a/pkg/querier/streaming_readers.go b/pkg/querier/streaming_readers.go deleted file mode 100644 index 11e08596f5d..00000000000 --- a/pkg/querier/streaming_readers.go +++ /dev/null @@ -1,180 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package querier - -import ( - "fmt" - "io" - - "github.com/go-kit/log" - "github.com/go-kit/log/level" - "github.com/pkg/errors" - - "github.com/grafana/mimir/pkg/querier/stats" - "github.com/grafana/mimir/pkg/storegateway/storegatewaypb" - "github.com/grafana/mimir/pkg/storegateway/storepb" - "github.com/grafana/mimir/pkg/util/limiter" - "github.com/grafana/mimir/pkg/util/validation" -) - -// The code in this file is used by the queriers to read the streaming chunks from the storegateway. - -// StoreGatewayStreamReader is responsible for managing the streaming of chunks from a storegateway and buffering -// chunks in memory until they are consumed by the PromQL engine. -type StoreGatewayStreamReader struct { - client storegatewaypb.StoreGateway_SeriesClient - expectedSeriesCount int - queryLimiter *limiter.QueryLimiter - stats *stats.Stats - log log.Logger - - seriesChunksChan chan *storepb.StreamingChunksBatch - chunksBatch []*storepb.StreamingChunks - errorChan chan error -} - -func NewStoreGatewayStreamReader(client storegatewaypb.StoreGateway_SeriesClient, expectedSeriesCount int, queryLimiter *limiter.QueryLimiter, stats *stats.Stats, log log.Logger) *StoreGatewayStreamReader { - return &StoreGatewayStreamReader{ - client: client, - expectedSeriesCount: expectedSeriesCount, - queryLimiter: queryLimiter, - stats: stats, - log: log, - seriesChunksChan: make(chan *storepb.StreamingChunksBatch, 1), - // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. - errorChan: make(chan error, 1), - } -} - -// Close cleans up all resources associated with this StoreGatewayStreamReader. -// This method should only be called if StartBuffering is not called. -func (s *StoreGatewayStreamReader) Close() { - if err := s.client.CloseSend(); err != nil { - level.Warn(s.log).Log("msg", "closing storegateway client stream failed", "err", err) - } -} - -// StartBuffering begins streaming series' chunks from the storegateway associated with -// this StoreGatewayStreamReader. Once all series have been consumed with GetChunks, all resources -// associated with this StoreGatewayStreamReader are cleaned up. -// If an error occurs while streaming, a subsequent call to GetChunks will return an error. -// To cancel buffering, cancel the context associated with this StoreGatewayStreamReader's storegatewaypb.StoreGateway_SeriesClient. -func (s *StoreGatewayStreamReader) StartBuffering() { - ctxDone := s.client.Context().Done() - - go func() { - defer func() { - s.Close() - close(s.seriesChunksChan) - close(s.errorChan) - }() - - totalSeries := 0 - - for { - msg, err := s.client.Recv() - if err != nil { - if errors.Is(err, io.EOF) { - if totalSeries < s.expectedSeriesCount { - s.errorChan <- fmt.Errorf("expected to receive %v series, but got EOF after receiving %v series", s.expectedSeriesCount, totalSeries) - } - } else { - s.errorChan <- err - } - - return - } - - c := msg.GetStreamingChunks() - if c == nil { - s.errorChan <- fmt.Errorf("expected to receive StreamingSeriesChunks, but got something else") - return - } - - if len(c.Series) == 0 { - continue - } - - totalSeries += len(c.Series) - if totalSeries > s.expectedSeriesCount { - s.errorChan <- fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries) - return - } - - chunkBytes := 0 - numChunks := 0 - for _, s := range c.Series { - numChunks += len(s.Chunks) - for _, ch := range s.Chunks { - chunkBytes += ch.Size() - } - } - - if err := s.queryLimiter.AddChunks(numChunks); err != nil { - s.errorChan <- validation.LimitError(err.Error()) - return - } - if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { - s.errorChan <- validation.LimitError(err.Error()) - return - } - - s.stats.AddFetchedChunks(uint64(numChunks)) - s.stats.AddFetchedChunkBytes(uint64(chunkBytes)) - - select { - case <-ctxDone: - // Why do we abort if the context is done? - // We want to make sure that this goroutine is never leaked. - // This goroutine could be leaked if nothing is reading from the buffer, but this method is still trying to send - // more series to a full buffer: it would block forever. - // So, here, we try to send the series to the buffer if we can, but if the context is cancelled, then we give up. - // This only works correctly if the context is cancelled when the query request is complete or cancelled, - // which is true at the time of writing. - s.errorChan <- s.client.Context().Err() - return - case s.seriesChunksChan <- c: - // Batch enqueued successfully, nothing else to do for this batch. - } - } - }() -} - -// GetChunks returns the chunks for the series with index seriesIndex. -// This method must be called with monotonically increasing values of seriesIndex. -func (s *StoreGatewayStreamReader) GetChunks(seriesIndex uint64) ([]storepb.AggrChunk, error) { - if len(s.chunksBatch) == 0 { - chks, channelOpen := <-s.seriesChunksChan - - if !channelOpen { - // If there's an error, report it. - select { - case err, haveError := <-s.errorChan: - if haveError { - if _, ok := err.(validation.LimitError); ok { - return nil, err - } - return nil, errors.Wrapf(err, "attempted to read series at index %v from stream, but the stream has failed", seriesIndex) - } - default: - } - - return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has already been exhausted", seriesIndex) - } - - s.chunksBatch = chks.Series - } - - chks := s.chunksBatch[0] - if len(s.chunksBatch) > 1 { - s.chunksBatch = s.chunksBatch[1:] - } else { - s.chunksBatch = nil - } - - if chks.SeriesIndex != seriesIndex { - return nil, fmt.Errorf("attempted to read series at index %v from stream, but the stream has series with index %v", seriesIndex, chks.SeriesIndex) - } - - return chks.Chunks, nil -} diff --git a/pkg/querier/streaming_readers_test.go b/pkg/querier/streaming_readers_test.go deleted file mode 100644 index 8fd7aa67598..00000000000 --- a/pkg/querier/streaming_readers_test.go +++ /dev/null @@ -1,319 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only - -package querier - -import ( - "context" - "errors" - "io" - "testing" - "time" - - "github.com/go-kit/log" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/prometheus/tsdb/chunkenc" - "github.com/stretchr/testify/require" - "go.uber.org/atomic" - "google.golang.org/grpc/metadata" - - "github.com/grafana/mimir/pkg/querier/stats" - "github.com/grafana/mimir/pkg/storegateway/storepb" - "github.com/grafana/mimir/pkg/util/limiter" - "github.com/grafana/mimir/pkg/util/test" -) - -func TestStoreGatewayStreamReader_HappyPaths(t *testing.T) { - series0 := []storepb.AggrChunk{createChunk(t, 1000, 1)} - series1 := []storepb.AggrChunk{createChunk(t, 1000, 2)} - series2 := []storepb.AggrChunk{createChunk(t, 1000, 3)} - series3 := []storepb.AggrChunk{createChunk(t, 1000, 4)} - series4 := []storepb.AggrChunk{createChunk(t, 1000, 5)} - - testCases := map[string]struct { - batches []storepb.StreamingChunksBatch - }{ - "single series per batch": { - batches: []storepb.StreamingChunksBatch{ - {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: series0}}}, - {Series: []*storepb.StreamingChunks{{SeriesIndex: 1, Chunks: series1}}}, - {Series: []*storepb.StreamingChunks{{SeriesIndex: 2, Chunks: series2}}}, - {Series: []*storepb.StreamingChunks{{SeriesIndex: 3, Chunks: series3}}}, - {Series: []*storepb.StreamingChunks{{SeriesIndex: 4, Chunks: series4}}}, - }, - }, - "multiple series per batch": { - batches: []storepb.StreamingChunksBatch{ - { - Series: []*storepb.StreamingChunks{ - {SeriesIndex: 0, Chunks: series0}, - {SeriesIndex: 1, Chunks: series1}, - {SeriesIndex: 2, Chunks: series2}, - }, - }, - { - Series: []*storepb.StreamingChunks{ - {SeriesIndex: 3, Chunks: series3}, - {SeriesIndex: 4, Chunks: series4}, - }, - }, - }, - }, - "empty batches": { - batches: []storepb.StreamingChunksBatch{ - { - Series: []*storepb.StreamingChunks{ - {SeriesIndex: 0, Chunks: series0}, - {SeriesIndex: 1, Chunks: series1}, - {SeriesIndex: 2, Chunks: series2}, - }, - }, - {}, - { - Series: []*storepb.StreamingChunks{ - {SeriesIndex: 3, Chunks: series3}, - {SeriesIndex: 4, Chunks: series4}, - }, - }, - {}, - }, - }, - } - - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: testCase.batches} - reader := NewStoreGatewayStreamReader(mockClient, 5, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) - reader.StartBuffering() - - for i, expected := range [][]storepb.AggrChunk{series0, series1, series2, series3, series4} { - actual, err := reader.GetChunks(uint64(i)) - require.NoError(t, err) - require.Equalf(t, expected, actual, "received unexpected chunk for series index %v", i) - } - - require.Eventually(t, func() bool { - return mockClient.closed.Load() - }, time.Second, 10*time.Millisecond) - }) - } -} - -func TestStoreGatewayStreamReader_AbortsWhenContextCancelled(t *testing.T) { - // Ensure that the buffering goroutine is not leaked after context cancellation. - test.VerifyNoLeak(t) - - // Create multiple batches to ensure that the buffering goroutine becomes blocked waiting to send further chunks to GetChunks(). - batches := []storepb.StreamingChunksBatch{ - {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}}}, - {Series: []*storepb.StreamingChunks{{SeriesIndex: 1, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 4.56)}}}}, - {Series: []*storepb.StreamingChunks{{SeriesIndex: 2, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 7.89)}}}}, - } - - ctx, cancel := context.WithCancel(context.Background()) - mockClient := &mockStoreGatewayQueryStreamClient{ctx: ctx, batches: batches} - - reader := NewStoreGatewayStreamReader(mockClient, 3, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) - cancel() - reader.StartBuffering() - - for i := 0; i < 3; i++ { - _, err := reader.GetChunks(uint64(i)) - - if errors.Is(err, context.Canceled) { - break - } - - require.NoError(t, err) - - if i == 2 { - require.Fail(t, "expected GetChunks to report context cancellation error before reaching end of stream") - } - } - - require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after context cancelled") -} - -func TestStoreGatewayStreamReader_ReadingSeriesOutOfOrder(t *testing.T) { - batches := []storepb.StreamingChunksBatch{ - {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}}}, - } - - mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} - reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) - reader.StartBuffering() - - s, err := reader.GetChunks(1) - require.Nil(t, s) - require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has series with index 0") -} - -func TestStoreGatewayStreamReader_ReadingMoreSeriesThanAvailable(t *testing.T) { - firstSeries := []storepb.AggrChunk{createChunk(t, 1000, 1.23)} - batches := []storepb.StreamingChunksBatch{ - {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: firstSeries}}}, - } - - mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} - reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) - reader.StartBuffering() - - s, err := reader.GetChunks(0) - require.NoError(t, err) - require.Equal(t, s, firstSeries) - - s, err = reader.GetChunks(1) - require.Nil(t, s) - require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has already been exhausted") -} - -func TestStoreGatewayStreamReader_ReceivedFewerSeriesThanExpected(t *testing.T) { - firstSeries := []storepb.AggrChunk{createChunk(t, 1000, 1.23)} - batches := []storepb.StreamingChunksBatch{ - {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: firstSeries}}}, - } - - mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} - reader := NewStoreGatewayStreamReader(mockClient, 3, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) - reader.StartBuffering() - - s, err := reader.GetChunks(0) - require.NoError(t, err) - require.Equal(t, s, firstSeries) - - s, err = reader.GetChunks(1) - require.Nil(t, s) - require.EqualError(t, err, "attempted to read series at index 1 from stream, but the stream has failed: expected to receive 3 series, but got EOF after receiving 1 series") - - require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after failure") -} - -func TestStoreGatewayStreamReader_ReceivedMoreSeriesThanExpected(t *testing.T) { - batches := []storepb.StreamingChunksBatch{ - { - Series: []*storepb.StreamingChunks{ - {SeriesIndex: 0, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, - {SeriesIndex: 1, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, - {SeriesIndex: 2, Chunks: []storepb.AggrChunk{createChunk(t, 1000, 1.23)}}, - }, - }, - } - mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} - reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, 0, 0, nil), &stats.Stats{}, log.NewNopLogger()) - reader.StartBuffering() - - s, err := reader.GetChunks(0) - require.Nil(t, s) - require.EqualError(t, err, "attempted to read series at index 0 from stream, but the stream has failed: expected to receive only 1 series, but received at least 3 series") - - require.True(t, mockClient.closed.Load(), "expected gRPC client to be closed after receiving more series than expected") -} - -func TestStoreGatewayStreamReader_ChunksLimits(t *testing.T) { - testCases := map[string]struct { - maxChunks int - maxChunkBytes int - expectedError string - }{ - "query under both limits": { - maxChunks: 4, - maxChunkBytes: 200, - expectedError: "", - }, - "query selects too many chunks": { - maxChunks: 2, - maxChunkBytes: 200, - expectedError: "the query exceeded the maximum number of chunks (limit: 2 chunks) (err-mimir-max-chunks-per-query). Consider reducing the time range and/or number of series selected by the query. One way to reduce the number of selected series is to add more label matchers to the query. Otherwise, to adjust the related per-tenant limit, configure -querier.max-fetched-chunks-per-query, or contact your service administrator.", - }, - "query selects too many chunk bytes": { - maxChunks: 4, - maxChunkBytes: 50, - expectedError: "the query exceeded the aggregated chunks size limit (limit: 50 bytes) (err-mimir-max-chunks-bytes-per-query). Consider reducing the time range and/or number of series selected by the query. One way to reduce the number of selected series is to add more label matchers to the query. Otherwise, to adjust the related per-tenant limit, configure -querier.max-fetched-chunk-bytes-per-query, or contact your service administrator.", - }, - } - - for name, testCase := range testCases { - t.Run(name, func(t *testing.T) { - batches := []storepb.StreamingChunksBatch{ - {Series: []*storepb.StreamingChunks{{SeriesIndex: 0, Chunks: []storepb.AggrChunk{ - createChunk(t, 1000, 1.23), - createChunk(t, 1100, 1.23), - createChunk(t, 1200, 1.23), - }}}}, - } - - mockClient := &mockStoreGatewayQueryStreamClient{ctx: context.Background(), batches: batches} - queryMetrics := stats.NewQueryMetrics(prometheus.NewPedanticRegistry()) - reader := NewStoreGatewayStreamReader(mockClient, 1, limiter.NewQueryLimiter(0, testCase.maxChunkBytes, testCase.maxChunks, queryMetrics), &stats.Stats{}, log.NewNopLogger()) - reader.StartBuffering() - - _, err := reader.GetChunks(0) - - if testCase.expectedError == "" { - require.NoError(t, err) - } else { - require.EqualError(t, err, testCase.expectedError) - } - - require.Eventually(t, mockClient.closed.Load, time.Second, 10*time.Millisecond, "expected gRPC client to be closed") - }) - } -} - -func createChunk(t *testing.T, time int64, value float64) storepb.AggrChunk { - promChunk := chunkenc.NewXORChunk() - app, err := promChunk.Appender() - require.NoError(t, err) - - app.Append(time, value) - - return storepb.AggrChunk{ - MinTime: time, - MaxTime: time, - Raw: &storepb.Chunk{ - Type: storepb.Chunk_XOR, - Data: promChunk.Bytes(), - }, - } -} - -type mockStoreGatewayQueryStreamClient struct { - ctx context.Context - batches []storepb.StreamingChunksBatch - closed atomic.Bool -} - -func (m *mockStoreGatewayQueryStreamClient) Recv() (*storepb.SeriesResponse, error) { - if len(m.batches) == 0 { - return nil, io.EOF - } - - batch := m.batches[0] - m.batches = m.batches[1:] - - return storepb.NewStreamingChunksResponse(&batch), nil -} - -func (m *mockStoreGatewayQueryStreamClient) Header() (metadata.MD, error) { - panic("not supported on mock") -} - -func (m *mockStoreGatewayQueryStreamClient) Trailer() metadata.MD { - panic("not supported on mock") -} - -func (m *mockStoreGatewayQueryStreamClient) CloseSend() error { - m.closed.Store(true) - return nil -} - -func (m *mockStoreGatewayQueryStreamClient) Context() context.Context { - return m.ctx -} - -func (m *mockStoreGatewayQueryStreamClient) SendMsg(interface{}) error { - panic("not supported on mock") -} - -func (m *mockStoreGatewayQueryStreamClient) RecvMsg(interface{}) error { - panic("not supported on mock") -} diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index cb4e3152ba6..07c0c40422a 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -645,7 +645,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return err } - numSeries, err := s.sendStreamingSeriesLabelsAndStats(req, srv, stats, seriesSet, resHints) + numSeries, err := s.sendStreamingSeriesLabelsAndStats(req, srv, stats, seriesSet) if err != nil { return err } @@ -722,7 +722,6 @@ func (s *BucketStore) sendStreamingSeriesLabelsAndStats( srv storepb.Store_SeriesServer, stats *safeQueryStats, seriesSet storepb.SeriesSet, - resHints *hintspb.SeriesResponseHints, ) (numSeries int, err error) { var ( encodeDuration = time.Duration(0) @@ -1006,7 +1005,7 @@ func (s *BucketStore) nonStreamingSeriesSetForBlocks( if s.fineGrainedChunksCachingEnabled { cache = s.chunksCache } - ss := newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, it, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) + ss := newChunksPreloadingIterator(ctx, s.logger, s.userID, cache, *chunkReaders, it, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) set = newSeriesChunksSeriesSet(ss) } else { set = newSeriesSetWithoutChunks(ctx, it, stats) @@ -1067,7 +1066,7 @@ func (s *BucketStore) streamingChunksSetForBlocks( if s.fineGrainedChunksCachingEnabled { cache = s.chunksCache } - scsi := newSeriesSetWithChunks(ctx, s.logger, s.userID, cache, *chunkReaders, it, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) + scsi := newChunksPreloadingIterator(ctx, s.logger, s.userID, cache, *chunkReaders, it, s.maxSeriesPerBatch, stats, req.MinTime, req.MaxTime) return scsi, nil } diff --git a/pkg/storegateway/bucket_e2e_test.go b/pkg/storegateway/bucket_e2e_test.go index 185ec7dd732..a3b28f104ba 100644 --- a/pkg/storegateway/bucket_e2e_test.go +++ b/pkg/storegateway/bucket_e2e_test.go @@ -66,8 +66,11 @@ type storeSuite struct { logger log.Logger } +// When nonOverlappingBlocks is false, prepareTestBlocks creates 2 blocks per block range. +// When nonOverlappingBlocks is true, it shifts the 2nd block ahead by 2hrs for every block range. +// This way the first and the last blocks created have no overlapping blocks. func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt objstore.Bucket, - series []labels.Labels, extLset labels.Labels, shiftedBlocks bool) (minTime, maxTime int64) { + series []labels.Labels, extLset labels.Labels, nonOverlappingBlocks bool) (minTime, maxTime int64) { ctx := context.Background() logger := log.NewNopLogger() @@ -85,10 +88,7 @@ func prepareTestBlocks(t testing.TB, now time.Time, count int, dir string, bkt o // gets created each. This way we can easily verify we got 10 chunks per series below. id1, err := block.CreateBlock(ctx, dir, series[:4], 10, mint, maxt, extLset) assert.NoError(t, err) - if shiftedBlocks { - // This shifts the 2nd block ahead by 2hrs. This way the first and the - // last blocks created have no overlapping blocks. - // This is used to test some case with streaming series. + if nonOverlappingBlocks { mint = maxt maxt = timestamp.FromTime(now.Add(2 * time.Hour)) maxTime = maxt @@ -125,7 +125,10 @@ type prepareStoreConfig struct { chunksCache chunkscache.Cache metricsRegistry *prometheus.Registry postingsStrategy postingsSelectionStrategy - shiftedBlocks bool + // When nonOverlappingBlocks is false, prepare store creates 2 blocks per block range. + // When nonOverlappingBlocks is true, it shifts the 2nd block ahead by 2hrs for every block range. + // This way the first and the last blocks created have no overlapping blocks. + nonOverlappingBlocks bool } func (c *prepareStoreConfig) apply(opts ...prepareStoreConfigOption) *prepareStoreConfig { @@ -173,7 +176,7 @@ func withManyParts() prepareStoreConfigOption { func prepareStoreWithTestBlocks(t testing.TB, bkt objstore.Bucket, cfg *prepareStoreConfig) *storeSuite { extLset := labels.FromStrings("ext1", "value1") - minTime, maxTime := prepareTestBlocks(t, time.Now(), 3, cfg.tempDir, bkt, cfg.series, extLset, cfg.shiftedBlocks) + minTime, maxTime := prepareTestBlocks(t, time.Now(), 3, cfg.tempDir, bkt, cfg.series, extLset, cfg.nonOverlappingBlocks) s := &storeSuite{ logger: log.NewNopLogger(), @@ -527,10 +530,9 @@ func TestBucketStore_e2e(t *testing.T) { MaxSize: 2e5, }) assert.NoError(t, err) - chunksCache, err := chunkscache.NewChunksCache(s.logger, chunkscache.NewMockedCacheClient(nil), nil) assert.NoError(t, err) s.cache.SwapIndexCacheWith(indexCache) - s.cache.SwapChunksCacheWith(chunksCache) + s.cache.SwapChunksCacheWith(newInMemoryChunksCache()) testBucketStore_e2e(t, ctx, s) }) }) @@ -542,7 +544,7 @@ func TestBucketStore_e2e_StreamingEdgeCases(t *testing.T) { defer cancel() s := newSuite(func(config *prepareStoreConfig) { - config.shiftedBlocks = true + config.nonOverlappingBlocks = true }) _, maxt := s.store.TimeRange() @@ -606,10 +608,9 @@ func TestBucketStore_e2e_StreamingEdgeCases(t *testing.T) { MaxSize: 2e5, }) assert.NoError(t, err) - chunksCache, err := chunkscache.NewChunksCache(s.logger, chunkscache.NewMockedCacheClient(nil), nil) assert.NoError(t, err) s.cache.SwapIndexCacheWith(indexCache) - s.cache.SwapChunksCacheWith(chunksCache) + s.cache.SwapChunksCacheWith(newInMemoryChunksCache()) testBucketStore_e2e(t, ctx, s) }) }) diff --git a/pkg/storegateway/chunkscache/cache.go b/pkg/storegateway/chunkscache/cache.go index f539142cd40..055d0b83c03 100644 --- a/pkg/storegateway/chunkscache/cache.go +++ b/pkg/storegateway/chunkscache/cache.go @@ -145,48 +145,3 @@ func (c *ChunksCache) StoreChunks(userID string, ranges map[Range][]byte) { } c.cache.StoreAsync(rangesWithTenant, defaultTTL) } - -// NewMockedCacheClient must be used only for testing. -func NewMockedCacheClient(mockedGetMultiErr error) cache.Cache { - return &mockedCacheClient{ - cache: map[string][]byte{}, - mockedGetMultiErr: mockedGetMultiErr, - } -} - -type mockedCacheClient struct { - cache map[string][]byte - mockedGetMultiErr error -} - -func (c *mockedCacheClient) Fetch(_ context.Context, keys []string, _ ...cache.Option) map[string][]byte { - if c.mockedGetMultiErr != nil { - return nil - } - - hits := map[string][]byte{} - - for _, key := range keys { - if value, ok := c.cache[key]; ok { - hits[key] = value - } - } - - return hits -} - -func (c *mockedCacheClient) StoreAsync(data map[string][]byte, _ time.Duration) { - for key, value := range data { - c.cache[key] = value - } -} - -func (c *mockedCacheClient) Delete(_ context.Context, key string) error { - delete(c.cache, key) - - return nil -} - -func (c *mockedCacheClient) Name() string { - return "mockedCacheClient" -} diff --git a/pkg/storegateway/chunkscache/cache_test.go b/pkg/storegateway/chunkscache/cache_test.go index e1d0051f96e..0c92313b58e 100644 --- a/pkg/storegateway/chunkscache/cache_test.go +++ b/pkg/storegateway/chunkscache/cache_test.go @@ -5,8 +5,10 @@ package chunkscache import ( "context" "testing" + "time" "github.com/go-kit/log" + "github.com/grafana/dskit/cache" "github.com/oklog/ulid" "github.com/pkg/errors" prom_testutil "github.com/prometheus/client_golang/prometheus/testutil" @@ -79,7 +81,7 @@ func TestDskitChunksCache_FetchMultiChunks(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { - cacheClient := NewMockedCacheClient(testData.mockedErr) + cacheClient := newMockedCacheClient(testData.mockedErr) c, err := NewChunksCache(log.NewNopLogger(), cacheClient, nil) assert.NoError(t, err) @@ -125,3 +127,47 @@ type mockedChunks struct { r Range value []byte } + +type mockedCacheClient struct { + cache map[string][]byte + mockedGetMultiErr error +} + +func newMockedCacheClient(mockedGetMultiErr error) *mockedCacheClient { + return &mockedCacheClient{ + cache: map[string][]byte{}, + mockedGetMultiErr: mockedGetMultiErr, + } +} + +func (c *mockedCacheClient) Fetch(_ context.Context, keys []string, _ ...cache.Option) map[string][]byte { + if c.mockedGetMultiErr != nil { + return nil + } + + hits := map[string][]byte{} + + for _, key := range keys { + if value, ok := c.cache[key]; ok { + hits[key] = value + } + } + + return hits +} + +func (c *mockedCacheClient) StoreAsync(data map[string][]byte, _ time.Duration) { + for key, value := range data { + c.cache[key] = value + } +} + +func (c *mockedCacheClient) Delete(_ context.Context, key string) error { + delete(c.cache, key) + + return nil +} + +func (c *mockedCacheClient) Name() string { + return "mockedCacheClient" +} diff --git a/pkg/storegateway/series_chunks.go b/pkg/storegateway/series_chunks.go index 28b59d12ed9..47ffb1fe498 100644 --- a/pkg/storegateway/series_chunks.go +++ b/pkg/storegateway/series_chunks.go @@ -183,7 +183,7 @@ func newSeriesChunksSeriesSet(from seriesChunksSetIterator) storepb.SeriesSet { } } -func newSeriesSetWithChunks( +func newChunksPreloadingIterator( ctx context.Context, logger log.Logger, userID string, diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index f4d49504594..ddd094363e8 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -1000,11 +1000,16 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p series symbolizedSeriesChunkRefs ) series.lset, metas, err = s.loadSeries(id, loadedSeries, stats, symbolizedSet.labelsPool) + if err != nil { + return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "read series") + } + if isNoChunkRefsAndOverlapMintMaxt { overlaps := false for _, m := range metas { if m.MaxTime >= s.minTime && m.MinTime <= s.maxTime { overlaps = true + break } } if !overlaps { @@ -1012,9 +1017,6 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p } } - if err != nil { - return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "read series") - } if !s.strategy.isNoChunkRefs() { clampLastChunkLength(symbolizedSet.series, metas) series.chunksRanges = metasToRanges(partitionChunks(metas, s.chunkRangesPerSeries, minChunksPerRange), s.blockID, s.minTime, s.maxTime) diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 9aaa7405b7f..4bb2a06f2b4 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1117,13 +1117,13 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { }) type testCase struct { - blockFactory func() *bucketBlock // if nil, defaultTestBlockFactory is used - shard *sharding.ShardSelector - matchers []*labels.Matcher - seriesHasher seriesHasher - skipChunks, streamingSeries bool - minT, maxT int64 - batchSize int + blockFactory func() *bucketBlock // if nil, defaultTestBlockFactory is used + shard *sharding.ShardSelector + matchers []*labels.Matcher + seriesHasher seriesHasher + strategy seriesIteratorStrategy + minT, maxT int64 + batchSize int expectedSets []seriesChunkRefsSet } @@ -1177,11 +1177,11 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { }, }, "skips chunks": { - skipChunks: true, - minT: 0, - maxT: 40, - batchSize: 100, - matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, + strategy: noChunkRefs, + minT: 0, + maxT: 40, + batchSize: 100, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, expectedSets: []seriesChunkRefsSet{ {series: []seriesChunkRefs{ {lset: labels.FromStrings("l1", "v1")}, @@ -1251,11 +1251,11 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { }, }, "ignores mixT/maxT when skipping chunks": { - minT: 0, - maxT: 10, - skipChunks: true, - batchSize: 4, - matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, + minT: 0, + maxT: 10, + strategy: noChunkRefs, + batchSize: 4, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, expectedSets: []seriesChunkRefsSet{ {series: []seriesChunkRefs{ {lset: labels.FromStrings("l1", "v1")}, @@ -1269,7 +1269,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { blockFactory: largerTestBlockFactory, minT: 0, maxT: math.MaxInt64, - skipChunks: true, // There is still no easy way to assert on the refs of 100K chunks, so we skip them. + strategy: noChunkRefs, // There is still no easy way to assert on the refs of 100K chunks, so we skip them. batchSize: largerTestBlockSeriesCount, matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", ".*")}, expectedSets: func() []seriesChunkRefsSet { @@ -1288,7 +1288,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { blockFactory: largerTestBlockFactory, minT: 0, maxT: math.MaxInt64, - skipChunks: true, // There is still no easy way to assert on the refs of 100K chunks, so we skip them. + strategy: noChunkRefs, // There is still no easy way to assert on the refs of 100K chunks, so we skip them. batchSize: 5000, matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", ".*")}, expectedSets: func() []seriesChunkRefsSet { @@ -1309,13 +1309,12 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { return sets }(), }, - "skip chunks with streaming on 1": { - minT: 0, - maxT: 25, - batchSize: 100, - skipChunks: true, - streamingSeries: true, // mint and maxt is considered. - matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, + "skip chunks with streaming on block 1": { + minT: 0, + maxT: 25, + batchSize: 100, + strategy: noChunkRefs | overlapMintMaxt, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, expectedSets: []seriesChunkRefsSet{ {series: []seriesChunkRefs{ {lset: labels.FromStrings("l1", "v1")}, @@ -1323,13 +1322,12 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { }}, }, }, - "skip chunks with streaming on 2": { - minT: 15, - maxT: 35, - batchSize: 100, - skipChunks: true, - streamingSeries: true, // mint and maxt is considered. - matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, + "skip chunks with streaming on block 2": { + minT: 15, + maxT: 35, + batchSize: 100, + strategy: noChunkRefs | overlapMintMaxt, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-4]")}, expectedSets: []seriesChunkRefsSet{ {series: []seriesChunkRefs{ {lset: labels.FromStrings("l1", "v2")}, @@ -1357,13 +1355,6 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { if hasher == nil { hasher = cachedSeriesHasher{hashcache.NewSeriesHashCache(100).GetBlockCache("")} } - var strategy seriesIteratorStrategy - if tc.skipChunks { - strategy |= noChunkRefs - } - if tc.streamingSeries { - strategy |= overlapMintMaxt - } loadingIterator := newLoadingSeriesChunkRefsSetIterator( context.Background(), postingsIterator, @@ -1373,7 +1364,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { block.meta, tc.shard, hasher, - strategy, + tc.strategy, tc.minT, tc.maxT, "t1", @@ -1391,20 +1382,7 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { testName, tc := testName, testCase t.Run(testName, func(t *testing.T) { t.Parallel() - if tc.skipChunks { - runTest(tc) - } else { - // We test with both streaming on and off when we are fetching chunks. - for _, streaming := range []bool{true, false} { - tcCopy := tc - streaming := streaming - t.Run(fmt.Sprintf("streaming=%t", streaming), func(t *testing.T) { - t.Parallel() - tcCopy.streamingSeries = streaming - runTest(tcCopy) - }) - } - } + runTest(tc) }) } } diff --git a/pkg/storegateway/stats.go b/pkg/storegateway/stats.go index dad370d8acc..3d56211950a 100644 --- a/pkg/storegateway/stats.go +++ b/pkg/storegateway/stats.go @@ -175,7 +175,7 @@ func (s *safeQueryStats) export() *queryStats { return &copied } -// seriesAndChunksCount the value of mergedSeriesCount and mergedChunksCount fields. +// seriesAndChunksCount return the value of mergedSeriesCount and mergedChunksCount fields. func (s *safeQueryStats) seriesAndChunksCount() (seriesCount, chunksCount int) { s.unsafeStatsMx.Lock() defer s.unsafeStatsMx.Unlock() From e16bfb27f36b2c8f3a8b88739c0162bfefb8001e Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Thu, 6 Jul 2023 16:01:01 +0530 Subject: [PATCH 71/75] Fix preloading metrics Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket_test.go | 4 ++-- pkg/storegateway/series_chunks.go | 4 +++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index 2765785ca8f..078f562b928 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1435,8 +1435,8 @@ func benchBucketSeries(t test.TB, skipChunk bool, samplesPerSeries, totalSeries // Check exposed metrics. assertHistograms := map[string]bool{ "cortex_bucket_store_series_request_stage_duration_seconds": true, - "cortex_bucket_store_series_batch_preloading_load_duration_seconds": st.maxSeriesPerBatch < totalSeries || (!skipChunk && streamingBatchSize > 0), // Tracked only when a request is split in multiple batches. - "cortex_bucket_store_series_batch_preloading_wait_duration_seconds": st.maxSeriesPerBatch < totalSeries || (!skipChunk && streamingBatchSize > 0), // Tracked only when a request is split in multiple batches. + "cortex_bucket_store_series_batch_preloading_load_duration_seconds": st.maxSeriesPerBatch < totalSeries, // Tracked only when a request is split in multiple batches. + "cortex_bucket_store_series_batch_preloading_wait_duration_seconds": st.maxSeriesPerBatch < totalSeries, // Tracked only when a request is split in multiple batches. "cortex_bucket_store_series_refs_fetch_duration_seconds": true, } diff --git a/pkg/storegateway/series_chunks.go b/pkg/storegateway/series_chunks.go index 47ffb1fe498..8ab9b17f53e 100644 --- a/pkg/storegateway/series_chunks.go +++ b/pkg/storegateway/series_chunks.go @@ -307,6 +307,7 @@ func (p *preloadingSetIterator[Set]) Err() error { func newPreloadingAndStatsTrackingSetIterator[Set any](ctx context.Context, preloadedSetsCount int, iterator genericIterator[Set], stats *safeQueryStats) genericIterator[Set] { // Track the time spent loading batches (including preloading). + numBatches := 0 iterator = newNextDurationMeasuringIterator[Set](iterator, func(duration time.Duration, hasNext bool) { stats.update(func(stats *queryStats) { stats.streamingSeriesBatchLoadDuration += duration @@ -314,8 +315,9 @@ func newPreloadingAndStatsTrackingSetIterator[Set any](ctx context.Context, prel // This function is called for each Next() invocation, so we can use it to measure // into how many batches the request has been split. if hasNext { - stats.streamingSeriesBatchCount++ + numBatches++ } + stats.streamingSeriesBatchCount = numBatches }) }) From 7ebf5c0972ec669fc5390e6905fa837ea0c6926b Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Fri, 7 Jul 2023 14:09:02 +0530 Subject: [PATCH 72/75] Fix logs Signed-off-by: Ganesh Vernekar --- pkg/storegateway/bucket.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pkg/storegateway/bucket.go b/pkg/storegateway/bucket.go index 07c0c40422a..5b539e1d642 100644 --- a/pkg/storegateway/bucket.go +++ b/pkg/storegateway/bucket.go @@ -651,10 +651,6 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } level.Debug(spanLogger).Log( "msg", "sent streaming series", - "request min time", time.UnixMilli(req.MinTime).UTC().Format(time.RFC3339Nano), - "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), - "request matchers", storepb.PromMatchersToString(matchers...), - "request shard selector", maybeNilShard(shardSelector).LabelValue(), "num_series", numSeries, "duration", time.Since(seriesLoadStart), ) @@ -697,10 +693,6 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie } level.Debug(spanLogger).Log( "msg", debugMessage, - "request min time", time.UnixMilli(req.MinTime).UTC().Format(time.RFC3339Nano), - "request max time", time.UnixMilli(req.MaxTime).UTC().Format(time.RFC3339Nano), - "request matchers", storepb.PromMatchersToString(matchers...), - "request shard selector", maybeNilShard(shardSelector).LabelValue(), "num_series", numSeries, "num_chunks", numChunks, "duration", time.Since(start), From 6ed86ace60b3e8e49c8d9528c667b8e2fae8254d Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Mon, 10 Jul 2023 11:11:06 +0530 Subject: [PATCH 73/75] Fix comments Signed-off-by: Ganesh Vernekar --- CHANGELOG.md | 4 ++-- pkg/distributor/distributor.go | 2 +- pkg/distributor/distributor_test.go | 2 +- pkg/distributor/query.go | 2 +- pkg/mimir/modules.go | 2 +- pkg/querier/block_streaming.go | 34 ++++++++++++++++++--------- pkg/querier/blocks_store_queryable.go | 10 ++++---- pkg/querier/querier.go | 8 +++---- 8 files changed, 38 insertions(+), 26 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 080f778c2f4..2c8f083c4d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,7 +7,7 @@ * [CHANGE] Store-gateway: skip verifying index header integrity upon loading. To enable verification set `blocks_storage.bucket_store.index_header.verify_on_load: true`. * [CHANGE] Querier: change the default value of the experimental `-querier.streaming-chunks-per-ingester-buffer-size` flag to 256. #5203 * [CHANGE] Querier: only initiate query requests to ingesters in the `ACTIVE` state in the ring. #5342 -* [CHANGE] Querier: Renamed `-querier.prefer-streaming-chunks` to `-querier.prefer-streaming-chunks-for-ingesters` to enable streaming chunks from ingesters to queriers. #5182 +* [CHANGE] Querier: Renamed `-querier.prefer-streaming-chunks` to `-querier.prefer-streaming-chunks-from-ingesters` to enable streaming chunks from ingesters to queriers. #5182 * [CHANGE] Querier: `-query-frontend.cache-unaligned-requests` has been moved from a global flag to a per-tenant override. #5312 * [CHANGE] Ingester: removed `cortex_ingester_shipper_dir_syncs_total` and `cortex_ingester_shipper_dir_sync_failures_total` metrics. The former metric was not much useful, and the latter was never incremented. #5396 * [FEATURE] Cardinality API: Add a new `count_method` parameter which enables counting active series #5136 @@ -43,7 +43,7 @@ * [ENHANCEMENT] Querier: add `cortex_querier_queries_rejected_total` metric that counts the number of queries rejected due to hitting a limit (eg. max series per query or max chunks per query). #5316 #5440 * [ENHANCEMENT] Querier: add experimental `-querier.minimize-ingester-requests-hedging-delay` option to initiate requests to further ingesters when request minimisation is enabled and not all initial requests have completed. #5368 * [ENHANCEMENT] Clarify docs for `-ingester.client.*` flags to make it clear that these are used by both queriers and distributors. #5375 -* [ENHANCEMENT] Querier and Store-gateway: add experimental support for streaming chunks from store-gateways to queriers while evaluating queries. This can be enabled with `-querier.prefer-streaming-chunks-from-store-gateways=true`. #5182 +* [ENHANCEMENT] Querier and store-gateway: add experimental support for streaming chunks from store-gateways to queriers while evaluating queries. This can be enabled with `-querier.prefer-streaming-chunks-from-store-gateways=true`. #5182 * [ENHANCEMENT] Querier: enforce `max-chunks-per-query` limit earlier in query processing when streaming chunks from ingesters to queriers to avoid unnecessarily consuming resources for queries that will be aborted. #5369 * [ENHANCEMENT] Ingester: added `cortex_ingester_shipper_last_successful_upload_timestamp_seconds` metric tracking the last successful TSDB block uploaded to the bucket (unix timestamp in seconds). #5396 * [BUGFIX] Ingester: Handle when previous ring state is leaving and the number of tokens has changed. #5204 diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 1c3142aaafe..9be96f18286 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -166,7 +166,7 @@ type Config struct { // This config is dynamically injected because it is defined in the querier config. ShuffleShardingLookbackPeriod time.Duration `yaml:"-"` - PreferStreamingChunks bool `yaml:"-"` + PreferStreamingChunksFromIngesters bool `yaml:"-"` StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"-"` MinimizeIngesterRequests bool `yaml:"-"` MinimiseIngesterRequestsHedgingDelay time.Duration `yaml:"-"` diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index bc6e529062c..7e09ca73f42 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -3023,7 +3023,7 @@ func prepare(t *testing.T, cfg prepConfig) ([]*Distributor, []mockIngester, []*p distributorCfg.DefaultLimits.MaxInflightPushRequestsBytes = cfg.maxInflightRequestsBytes distributorCfg.DefaultLimits.MaxIngestionRate = cfg.maxIngestionRate distributorCfg.ShuffleShardingLookbackPeriod = time.Hour - distributorCfg.PreferStreamingChunks = cfg.preferStreamingChunks + distributorCfg.PreferStreamingChunksFromIngesters = cfg.preferStreamingChunks distributorCfg.StreamingChunksPerIngesterSeriesBufferSize = 128 cfg.limits.IngestionTenantShardSize = cfg.shuffleShardSize diff --git a/pkg/distributor/query.go b/pkg/distributor/query.go index a72cccb732b..c7d70897164 100644 --- a/pkg/distributor/query.go +++ b/pkg/distributor/query.go @@ -71,7 +71,7 @@ func (d *Distributor) QueryStream(ctx context.Context, from, to model.Time, matc return err } - if d.cfg.PreferStreamingChunks { + if d.cfg.PreferStreamingChunksFromIngesters { req.StreamingChunksBatchSize = d.cfg.StreamingChunksPerIngesterSeriesBufferSize } diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index d1c10467893..ae7765410c9 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -361,7 +361,7 @@ func (t *Mimir) initDistributorService() (serv services.Service, err error) { // ruler's dependency) canJoinDistributorsRing := t.Cfg.isAnyModuleEnabled(Distributor, Write, All) - t.Cfg.Distributor.PreferStreamingChunks = t.Cfg.Querier.PreferStreamingChunksFromIngesters + t.Cfg.Distributor.PreferStreamingChunksFromIngesters = t.Cfg.Querier.PreferStreamingChunksFromIngesters t.Cfg.Distributor.StreamingChunksPerIngesterSeriesBufferSize = t.Cfg.Querier.StreamingChunksPerIngesterSeriesBufferSize t.Cfg.Distributor.MinimizeIngesterRequests = t.Cfg.Querier.MinimizeIngesterRequests t.Cfg.Distributor.MinimiseIngesterRequestsHedgingDelay = t.Cfg.Querier.MinimiseIngesterRequestsHedgingDelay diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index ab8a8d339ce..db4ed2d995a 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -20,6 +20,7 @@ import ( "github.com/grafana/mimir/pkg/storegateway/storegatewaypb" "github.com/grafana/mimir/pkg/storegateway/storepb" "github.com/grafana/mimir/pkg/util/limiter" + "github.com/grafana/mimir/pkg/util/spanlogger" "github.com/grafana/mimir/pkg/util/validation" ) @@ -141,8 +142,6 @@ func newStoreGatewayStreamReader(client storegatewaypb.StoreGateway_SeriesClient stats: stats, log: log, seriesChunksChan: make(chan *storepb.StreamingChunksBatch, 1), - // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. - errorChan: make(chan error, 1), } } @@ -160,26 +159,39 @@ func (s *storeGatewayStreamReader) Close() { // If an error occurs while streaming, a subsequent call to GetChunks will return an error. // To cancel buffering, cancel the context associated with this storeGatewayStreamReader's storegatewaypb.StoreGateway_SeriesClient. func (s *storeGatewayStreamReader) StartBuffering() { - ctxDone := s.client.Context().Done() + // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. + s.errorChan = make(chan error, 1) + ctxDone := s.client.Context().Done() go func() { + log, _ := spanlogger.NewWithLogger(s.client.Context(), s.log, "storeGatewayStreamReader.StartBuffering") + defer func() { s.Close() close(s.seriesChunksChan) close(s.errorChan) + log.Finish() }() + onError := func(err error) { + s.errorChan <- err + log.Error(err) + } + totalSeries := 0 + totalChunks := 0 for { msg, err := s.client.Recv() if err != nil { if errors.Is(err, io.EOF) { if totalSeries < s.expectedSeriesCount { - s.errorChan <- fmt.Errorf("expected to receive %v series, but got EOF after receiving %v series", s.expectedSeriesCount, totalSeries) + onError(fmt.Errorf("expected to receive %v series, but got EOF after receiving %v series", s.expectedSeriesCount, totalSeries)) + } else { + level.Debug(log).Log("msg", "finished streaming", "series", totalSeries, "chunks", totalChunks) } } else { - s.errorChan <- err + onError(err) } return @@ -187,7 +199,7 @@ func (s *storeGatewayStreamReader) StartBuffering() { c := msg.GetStreamingChunks() if c == nil { - s.errorChan <- fmt.Errorf("expected to receive StreamingSeriesChunks, but got something else") + onError(fmt.Errorf("expected to receive StreamingSeriesChunks, but got something else")) return } @@ -197,7 +209,7 @@ func (s *storeGatewayStreamReader) StartBuffering() { totalSeries += len(c.Series) if totalSeries > s.expectedSeriesCount { - s.errorChan <- fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries) + onError(fmt.Errorf("expected to receive only %v series, but received at least %v series", s.expectedSeriesCount, totalSeries)) return } @@ -209,13 +221,13 @@ func (s *storeGatewayStreamReader) StartBuffering() { chunkBytes += ch.Size() } } - + totalChunks += numChunks if err := s.queryLimiter.AddChunks(numChunks); err != nil { - s.errorChan <- validation.LimitError(err.Error()) + onError(validation.LimitError(err.Error())) return } if err := s.queryLimiter.AddChunkBytes(chunkBytes); err != nil { - s.errorChan <- validation.LimitError(err.Error()) + onError(validation.LimitError(err.Error())) return } @@ -231,7 +243,7 @@ func (s *storeGatewayStreamReader) StartBuffering() { // So, here, we try to send the series to the buffer if we can, but if the context is cancelled, then we give up. // This only works correctly if the context is cancelled when the query request is complete or cancelled, // which is true at the time of writing. - s.errorChan <- s.client.Context().Err() + onError(s.client.Context().Err()) return case s.seriesChunksChan <- c: // Batch enqueued successfully, nothing else to do for this batch. diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index 99dee4b2891..0a5b08aa883 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -264,8 +264,8 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa reg, ) - streamingBufferSize := querierCfg.StreamingChunksPerStoregatewaySeriesBufferSize - if !querierCfg.PreferStreamingChunksFromStoregateways { + streamingBufferSize := querierCfg.StreamingChunksPerStoreGatewaySeriesBufferSize + if !querierCfg.PreferStreamingChunksFromStoreGateways { streamingBufferSize = 0 } @@ -694,9 +694,9 @@ func canBlockWithCompactorShardIndexContainQueryShard(queryShardIndex, queryShar // requests to the store-gateways (e.g., if a chunk or series limit is hit) are // considered serious errors. All other errors are not returned, but they give rise to fetch retrials. // -// In case of a successful run, fetchSeriesFromStores returns a streamCloser function if it was a streaming -// call for series+chunks. If you are ending the execution of the query later without iterating through all the series -// and consuming the chunks, the streamCloser MUST be called to avoid leaking goroutines and gRPC connections. +// In case of a successful run, fetchSeriesFromStores returns a startStreamingChunks function to start streaming +// chunks for the fetched series iff it was a streaming call for series+chunks. startStreamingChunks must be called +// before iterating on the series. func (q *blocksStoreQuerier) fetchSeriesFromStores(ctx context.Context, sp *storage.SelectHints, clients map[BlocksStoreClient][]ulid.ULID, minT int64, maxT int64, convertedMatchers []storepb.LabelMatcher) (_ []storage.SeriesSet, _ []ulid.ULID, _ storage.Warnings, startStreamingChunks func(), _ error) { var ( reqCtx = grpc_metadata.AppendToOutgoingContext(ctx, storegateway.GrpcContextMetadataTenantID, q.userID) diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 2cdd78760ab..b93c352ab4a 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -52,9 +52,9 @@ type Config struct { ShuffleShardingIngestersEnabled bool `yaml:"shuffle_sharding_ingesters_enabled" category:"advanced"` PreferStreamingChunksFromIngesters bool `yaml:"prefer_streaming_chunks_from_ingesters" category:"experimental"` - PreferStreamingChunksFromStoregateways bool `yaml:"prefer_streaming_chunks_from_store_gateways" category:"experimental"` + PreferStreamingChunksFromStoreGateways bool `yaml:"prefer_streaming_chunks_from_store_gateways" category:"experimental"` StreamingChunksPerIngesterSeriesBufferSize uint64 `yaml:"streaming_chunks_per_ingester_series_buffer_size" category:"experimental"` - StreamingChunksPerStoregatewaySeriesBufferSize uint64 `yaml:"streaming_chunks_per_store_gateway_series_buffer_size" category:"experimental"` + StreamingChunksPerStoreGatewaySeriesBufferSize uint64 `yaml:"streaming_chunks_per_store_gateway_series_buffer_size" category:"experimental"` MinimizeIngesterRequests bool `yaml:"minimize_ingester_requests" category:"experimental"` MinimiseIngesterRequestsHedgingDelay time.Duration `yaml:"minimize_ingester_requests_hedging_delay" category:"experimental"` @@ -86,7 +86,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.QueryStoreAfter, queryStoreAfterFlag, 12*time.Hour, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.BoolVar(&cfg.ShuffleShardingIngestersEnabled, "querier.shuffle-sharding-ingesters-enabled", true, fmt.Sprintf("Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -%s. If this setting is false or -%s is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", validation.QueryIngestersWithinFlag, validation.QueryIngestersWithinFlag)) f.BoolVar(&cfg.PreferStreamingChunksFromIngesters, "querier.prefer-streaming-chunks-from-ingesters", false, "Request ingesters stream chunks. Ingesters will only respond with a stream of chunks if the target ingester supports this, and this preference will be ignored by ingesters that do not support this.") - f.BoolVar(&cfg.PreferStreamingChunksFromStoregateways, "querier.prefer-streaming-chunks-from-store-gateways", false, "Request store-gateways stream chunks. Store-gateways will only respond with a stream of chunks if the target store-gateway supports this, and this preference will be ignored by store-gateways that do not support this.") + f.BoolVar(&cfg.PreferStreamingChunksFromStoreGateways, "querier.prefer-streaming-chunks-from-store-gateways", false, "Request store-gateways stream chunks. Store-gateways will only respond with a stream of chunks if the target store-gateway supports this, and this preference will be ignored by store-gateways that do not support this.") const minimiseIngesterRequestsFlagName = "querier.minimize-ingester-requests" f.BoolVar(&cfg.MinimizeIngesterRequests, minimiseIngesterRequestsFlagName, false, "If true, when querying ingesters, only the minimum required ingesters required to reach quorum will be queried initially, with other ingesters queried only if needed due to failures from the initial set of ingesters. Enabling this option reduces resource consumption for the happy path at the cost of increased latency for the unhappy path.") @@ -95,7 +95,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // Why 256 series / ingester/store-gateway? // Based on our testing, 256 series / ingester was a good balance between memory consumption and the CPU overhead of managing a batch of series. f.Uint64Var(&cfg.StreamingChunksPerIngesterSeriesBufferSize, "querier.streaming-chunks-per-ingester-buffer-size", 256, "Number of series to buffer per ingester when streaming chunks from ingesters.") - f.Uint64Var(&cfg.StreamingChunksPerStoregatewaySeriesBufferSize, "querier.streaming-chunks-per-store-gateway-buffer-size", 256, "Number of series to buffer per store-gateway when streaming chunks from store-gateways.") + f.Uint64Var(&cfg.StreamingChunksPerStoreGatewaySeriesBufferSize, "querier.streaming-chunks-per-store-gateway-buffer-size", 256, "Number of series to buffer per store-gateway when streaming chunks from store-gateways.") // The querier.query-ingesters-within flag has been moved to the limits.go file // We still need to set a default value for cfg.QueryIngestersWithin since we need to keep supporting the querier yaml field until Mimir 2.11.0 From 93e8d0a86ec3867f637a663a59350be90096db10 Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Mon, 10 Jul 2023 14:10:59 +0200 Subject: [PATCH 74/75] Store-gateway querier streaming: prevent series hash cache pollution (#5459) * Add test for polluted series hash cache with streaming Signed-off-by: Dimitar Dimitrov * Fix series hash cache corruption Signed-off-by: Dimitar Dimitrov * Update pkg/storegateway/series_refs_test.go --------- Signed-off-by: Dimitar Dimitrov Co-authored-by: Ganesh Vernekar --- pkg/storegateway/series_refs.go | 25 ++-- pkg/storegateway/series_refs_test.go | 172 ++++++++++++++++++++------- 2 files changed, 144 insertions(+), 53 deletions(-) diff --git a/pkg/storegateway/series_refs.go b/pkg/storegateway/series_refs.go index ddd094363e8..f1459a67ab3 100644 --- a/pkg/storegateway/series_refs.go +++ b/pkg/storegateway/series_refs.go @@ -993,7 +993,6 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "preload series") } - isNoChunkRefsAndOverlapMintMaxt := s.strategy.isNoChunkRefsAndOverlapMintMaxt() for _, id := range postings { var ( metas []chunks.Meta @@ -1004,7 +1003,8 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p return symbolizedSeriesChunkRefsSet{}, errors.Wrap(err, "read series") } - if isNoChunkRefsAndOverlapMintMaxt { + switch { + case s.strategy.isNoChunkRefsAndOverlapMintMaxt(): overlaps := false for _, m := range metas { if m.MaxTime >= s.minTime && m.MinTime <= s.maxTime { @@ -1013,11 +1013,9 @@ func (s *loadingSeriesChunkRefsSetIterator) symbolizedSet(ctx context.Context, p } } if !overlaps { - continue + series.lset = nil // setting the labels to nil ends up skipping the series } - } - - if !s.strategy.isNoChunkRefs() { + case !s.strategy.isNoChunkRefs(): clampLastChunkLength(symbolizedSet.series, metas) series.chunksRanges = metasToRanges(partitionChunks(metas, s.chunkRangesPerSeries, minChunksPerRange), s.blockID, s.minTime, s.maxTime) } @@ -1069,15 +1067,22 @@ func clampLastChunkLength(series []symbolizedSeriesChunkRefs, nextSeriesChunkMet // filterSeries filters out series that don't belong to this shard (if sharding is configured) or that don't have any // chunk ranges and skipChunks=false. Empty chunks ranges indicates that the series doesn't have any chunk ranges in the -// requested time range. +// requested time range. filterSeries expects that the number of series matches the number of postings. func (s *loadingSeriesChunkRefsSetIterator) filterSeries(set seriesChunkRefsSet, postings []storage.SeriesRef, stats *queryStats) seriesChunkRefsSet { writeIdx := 0 for sIdx, series := range set.series { - // An empty label set means the series had no chunks in this block, so we skip it. - // No chunk ranges means the series doesn't have a single chunk range in the requested range. - if series.lset.IsEmpty() || (!s.strategy.isNoChunkRefs() && len(series.chunksRanges) == 0) { + + // We skip this series under three conditions: + // 1. The series doesn't have any chunks in this block OR the series didn't have any chunks in the requested time range, + // but also the request didn't require the chunks (i.e. s.strategy.isNoChunkRefs()). This is signified by an empty label set. + if series.lset.IsEmpty() { + continue + } + // 2. The series doesn't have any chunks in the requested time range but the request required the chunks (i.e. !s.strategy.isNoChunkRefs()). + if !s.strategy.isNoChunkRefs() && len(series.chunksRanges) == 0 { continue } + // 3. The series doesn't belong to this shard. if !shardOwned(s.shard, s.seriesHasher, postings[sIdx], series.lset, stats) { continue } diff --git a/pkg/storegateway/series_refs_test.go b/pkg/storegateway/series_refs_test.go index 4bb2a06f2b4..d5ed99f1307 100644 --- a/pkg/storegateway/series_refs_test.go +++ b/pkg/storegateway/series_refs_test.go @@ -1128,6 +1128,9 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { expectedSets []seriesChunkRefsSet } + sharedSeriesHasher := cachedSeriesHasher{hashcache.NewSeriesHashCache(1000).GetBlockCache("")} + sharedSeriesHasher2 := cachedSeriesHasher{hashcache.NewSeriesHashCache(1000).GetBlockCache("")} + testCases := map[string]testCase{ "loads one batch": { minT: 0, @@ -1335,54 +1338,137 @@ func TestLoadingSeriesChunkRefsSetIterator(t *testing.T) { }}, }, }, - } - runTest := func(tc testCase) { - // Setup - blockFactory := defaultTestBlockFactory - if tc.blockFactory != nil { - blockFactory = tc.blockFactory - } - block := blockFactory() - indexr := block.indexReader(selectAllStrategy{}) - postings, _, err := indexr.ExpandedPostings(context.Background(), tc.matchers, newSafeQueryStats()) - require.NoError(t, err) - postingsIterator := newPostingsSetsIterator( - postings, - tc.batchSize, - ) - hasher := tc.seriesHasher - if hasher == nil { - hasher = cachedSeriesHasher{hashcache.NewSeriesHashCache(100).GetBlockCache("")} - } - loadingIterator := newLoadingSeriesChunkRefsSetIterator( - context.Background(), - postingsIterator, - indexr, - noopCache{}, - newSafeQueryStats(), - block.meta, - tc.shard, - hasher, - tc.strategy, - tc.minT, - tc.maxT, - "t1", - 1, - log.NewNopLogger(), - ) + // If the first test case stored incorrect hashes in the cache, the second test case would fail. + "doesn't pollute the series hash cache with incorrect hashes (pt. 1)": { + minT: 15, + maxT: 45, + seriesHasher: sharedSeriesHasher, + shard: &sharding.ShardSelector{ShardIndex: 1, ShardCount: 2}, + batchSize: 100, + strategy: noChunkRefs | overlapMintMaxt, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-5]")}, + expectedSets: []seriesChunkRefsSet{ + {series: []seriesChunkRefs{ + {lset: labels.FromStrings("l1", "v2")}, + {lset: labels.FromStrings("l1", "v3")}, + }}, + }, + }, + "doesn't pollute the series hash cache with incorrect hashes (pt. 2)": { + minT: 15, + maxT: 45, + seriesHasher: sharedSeriesHasher, + shard: &sharding.ShardSelector{ShardIndex: 1, ShardCount: 2}, + batchSize: 100, + strategy: noChunkRefs | overlapMintMaxt, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-5]")}, + expectedSets: []seriesChunkRefsSet{ + {series: []seriesChunkRefs{ + {lset: labels.FromStrings("l1", "v2")}, + {lset: labels.FromStrings("l1", "v3")}, + }}, + }, + }, + "doesn't pollute the series hash cache with incorrect hashes (without streaming; pt. 1)": { + minT: 15, + maxT: 45, + seriesHasher: sharedSeriesHasher2, + shard: &sharding.ShardSelector{ShardIndex: 1, ShardCount: 2}, + batchSize: 100, + strategy: overlapMintMaxt, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-5]")}, + expectedSets: []seriesChunkRefsSet{ + {series: []seriesChunkRefs{ + {lset: labels.FromStrings("l1", "v2"), chunksRanges: []seriesChunkRefsRange{{refs: []seriesChunkRef{ + {minTime: 20, maxTime: 20, segFileOffset: 234, length: 208}, + }}}}, + {lset: labels.FromStrings("l1", "v3"), chunksRanges: []seriesChunkRefsRange{{refs: []seriesChunkRef{ + {minTime: 30, maxTime: 30, segFileOffset: 442, length: 208}, + }}}}, + }}, + }, + }, + "doesn't pollute the series hash cache with incorrect hashes (without streaming; pt. 2)": { + minT: 15, + maxT: 45, + seriesHasher: sharedSeriesHasher2, + shard: &sharding.ShardSelector{ShardIndex: 1, ShardCount: 2}, + batchSize: 100, + strategy: overlapMintMaxt, + matchers: []*labels.Matcher{labels.MustNewMatcher(labels.MatchRegexp, "l1", "v[1-5]")}, + expectedSets: []seriesChunkRefsSet{ + {series: []seriesChunkRefs{ + {lset: labels.FromStrings("l1", "v2"), chunksRanges: []seriesChunkRefsRange{{refs: []seriesChunkRef{ + {minTime: 20, maxTime: 20, segFileOffset: 234, length: 208}, + }}}}, + {lset: labels.FromStrings("l1", "v3"), chunksRanges: []seriesChunkRefsRange{{refs: []seriesChunkRef{ + // The chunk length here is different from the one in pt. 1. + // l1=v4 is outside this shard, so it was excluded, and we couldn't use its chunk refs to infer length. + // The next series is l1=v5, and we still don't know its shard, so its chunks refs are fetched, + // and we can use them to infer the chunk sizes of l1=v3 + {minTime: 30, maxTime: 30, segFileOffset: 442, length: 416}, + }}}}, + }}, + }, + }, + } - // Tests - sets := readAllSeriesChunkRefsSet(loadingIterator) - assert.NoError(t, loadingIterator.Err()) - assertSeriesChunkRefsSetsEqual(t, block.meta.ULID, tc.expectedSets, sets) + sortedTestCases := make([]struct { + name string + tc testCase + }, 0, len(testCases)) + for name, tc := range testCases { + sortedTestCases = append(sortedTestCases, struct { + name string + tc testCase + }{name, tc}) } + sort.Slice(sortedTestCases, func(i, j int) bool { + return sortedTestCases[i].name < sortedTestCases[j].name + }) - for testName, testCase := range testCases { - testName, tc := testName, testCase + for _, testCase := range sortedTestCases { + testName, tc := testCase.name, testCase.tc t.Run(testName, func(t *testing.T) { - t.Parallel() - runTest(tc) + // Setup + blockFactory := defaultTestBlockFactory + if tc.blockFactory != nil { + blockFactory = tc.blockFactory + } + block := blockFactory() + indexr := block.indexReader(selectAllStrategy{}) + postings, _, err := indexr.ExpandedPostings(context.Background(), tc.matchers, newSafeQueryStats()) + require.NoError(t, err) + postingsIterator := newPostingsSetsIterator( + postings, + tc.batchSize, + ) + hasher := tc.seriesHasher + if hasher == nil { + hasher = cachedSeriesHasher{hashcache.NewSeriesHashCache(100).GetBlockCache("")} + } + loadingIterator := newLoadingSeriesChunkRefsSetIterator( + context.Background(), + postingsIterator, + indexr, + noopCache{}, + newSafeQueryStats(), + block.meta, + tc.shard, + hasher, + tc.strategy, + tc.minT, + tc.maxT, + "t1", + 1, + log.NewNopLogger(), + ) + + // Tests + sets := readAllSeriesChunkRefsSet(loadingIterator) + assert.NoError(t, loadingIterator.Err()) + assertSeriesChunkRefsSetsEqual(t, block.meta.ULID, tc.expectedSets, sets) }) } } From 020248a8b93ac8b0e8e502340985eb8d4240eab9 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar Date: Tue, 11 Jul 2023 15:59:56 +0530 Subject: [PATCH 75/75] Move initialisation of seriesChunksChan Signed-off-by: Ganesh Vernekar --- pkg/querier/block_streaming.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/querier/block_streaming.go b/pkg/querier/block_streaming.go index db4ed2d995a..803cda8f1fa 100644 --- a/pkg/querier/block_streaming.go +++ b/pkg/querier/block_streaming.go @@ -141,7 +141,6 @@ func newStoreGatewayStreamReader(client storegatewaypb.StoreGateway_SeriesClient queryLimiter: queryLimiter, stats: stats, log: log, - seriesChunksChan: make(chan *storepb.StreamingChunksBatch, 1), } } @@ -161,6 +160,7 @@ func (s *storeGatewayStreamReader) Close() { func (s *storeGatewayStreamReader) StartBuffering() { // Important: to ensure that the goroutine does not become blocked and leak, the goroutine must only ever write to errorChan at most once. s.errorChan = make(chan error, 1) + s.seriesChunksChan = make(chan *storepb.StreamingChunksBatch, 1) ctxDone := s.client.Context().Done() go func() {