diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 8bf70f7..46c7a7d 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -21,12 +21,12 @@ jobs: services: mongodb: - image: mongo:6.0 + image: mongo:latest ports: - 27017:27017 redis: - image: redis:7.0 + image: redis:latest ports: - 6379:6379 diff --git a/.github/workflows/test_engine_compatibility.yaml b/.github/workflows/test_engine_compatibility.yaml new file mode 100644 index 0000000..52092cd --- /dev/null +++ b/.github/workflows/test_engine_compatibility.yaml @@ -0,0 +1,67 @@ +name: Test Engine Compatibility + +on: + pull_request: + branches: + - main + push: + branches: + - main + +env: + GO_VERSION: 1.18 + +permissions: + checks: write + contents: read + +jobs: + test-compatibility: + runs-on: ubuntu-latest + + services: + mongodb: + image: mongo:5.0.18 + ports: + - '27017:27017' + + redis: + image: redis:5.0.14 + ports: + - 6379:6379 + + steps: + - name: Set up Go + uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + + - uses: actions/checkout@v3 + + - name: Cache Go modules + uses: actions/cache@v3 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-golang-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-golang- + + - name: Install mockgen + run: go install github.com/golang/mock/mockgen@latest + + - name: Install gotestsum + run: go install gotest.tools/gotestsum@latest + + - name: Generate Mocks + run: make gen-mocks + + - name: Generate TLS certificates + run: make gen-cert + + - name: Go mod tidy + run: go mod tidy + + - name: Make test + run: make integration-test \ No newline at end of file diff --git a/.golangci.yml b/.golangci.yml new file mode 100644 index 0000000..06ed44c --- /dev/null +++ b/.golangci.yml @@ -0,0 +1,5 @@ +# TODO: configure linter correctly +# Disabling staticcheck since we deprecated proto field +linters: + disable: + - staticcheck diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 13c5675..8f9306f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -140,6 +140,22 @@ You may also download the latest release from the [releases](https://github.com/ > > To change the default configuration see the [configuration section](/README.md?#configuration). +### Mocks + +We use the [GoMock](https://github.com/golang/mock) project to generate mocks. + +To update the mocks you must run the following command: +```shell +make gen-mocks +``` + +Mocks are generated in the [/internal/mocks](/internal/mocks) folder. + +When creating interfaces with the need to generate mocks, you must add the following directive to the interface file: +```go +//go:generate mockgen -destination=/mock_.go -package=mocks -source=.go +``` + ## Running tests To run project tests you must first generate all mock files with the following command: @@ -202,29 +218,6 @@ make gen-cert > > Check the [generation script](/internal/service/cert/gen.sh) for more details. -### Mocks - -We use the [GoMock](https://github.com/golang/mock) project to generate mocks. - -To update the mocks you must run the following command: -```shell -make gen-mocks -``` - -Mocks are generated in the [/internal/mocks](/internal/mocks) folder. - -When creating interfaces with the need to generate mocks, you must add the following directive to the interface file: -```go -//go:generate mockgen -destination=/mock_.go -package=mocks -source=.go -``` - -## Documentation - -If you made any changes in the `.proto` file, you must generate the documentation with the following command: -```shell -make gen-docs -``` - ## Docker Image To generate the Deckard image we use the [ko](https://github.com/google/ko) tool. diff --git a/README.md b/README.md index 7261ccc..0f5d4c0 100644 --- a/README.md +++ b/README.md @@ -8,17 +8,17 @@ [![Slack](https://img.shields.io/badge/slack-Gophers_%28Deckard%29-blue?logo=slack&link=https://gophers.slack.com/archives/C05E1TMS1FY)](https://gophers.slack.com/archives/C05E1TMS1FY) -Deckard is a priority queue system inspired by projects such as Google Cloud PubSub, Nats, Kafka, and others. Its main distinction lies in its ability to associate a priority with each message and have a queue that can be optionally cyclic. This means that messages can be delivered again after a user-managed time. Additionally, Deckard implements a locking mechanism to prevent message processing for a specified duration. +Deckard is a priority queue system inspired by projects such as Google Cloud PubSub, Nats, Kafka, and others. Its main distinction lies in its ability to associate a priority score with each message and have a queue that can be optionally cyclic. This means that messages can be delivered again after a user-managed time. Additionally, Deckard implements a locking mechanism to prevent message processing for a specified duration. ![deckard](docs/deckard_cartoon.webp) Briefly: -- An application inserts a message to be queued and its configuration (TTL, metadata, payload, etc). - - The message will be prioritized with a default timestamp-based algorithm. The priority can also be provided by the application. +- An application inserts a message to be queued and its configuration (TTL, metadata, payload, priority score, etc). + - The message will be prioritized with a default timestamp-based algorithm if the provided score is 0 (the default value). - A worker application pull messages from Deckard at regular intervals and performs any processing. - When it finishes processing a message, the application must notify with the processing result. - When notifying, the application may provide a lock time, to lock the message for a certain duration of time before being requeued and delivered again. - - It is also possible to notify a message changing its priority. + - It is also possible to notify a message changing its priority score. - When the message's TTL is reached, it stops being delivered; - For some use cases the TTL can be set as infinite. - An application can also remove the message when notifying. diff --git a/deckard_service.pb.go b/deckard_service.pb.go index 39026de..2153221 100644 --- a/deckard_service.pb.go +++ b/deckard_service.pb.go @@ -1,15 +1,15 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.1 -// protoc v3.15.8 +// protoc-gen-go v1.31.0 +// protoc v3.12.4 // source: deckard_service.proto package deckard import ( + any1 "github.com/golang/protobuf/ptypes/any" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -267,13 +267,31 @@ type PullRequest struct { // Caution: as greater the amount, as more time it will take to process the request. // Max value is 1000 and the default value is 1 Amount int32 `protobuf:"varint,2,opt,name=amount,proto3" json:"amount,omitempty"` + // Prefer using the max_score field instead of this one. + // This field is deprecated and will be removed in the future. // - //Number to subtract to the current time to filter the max score to return. - //Useful to not return a message just moments after it was last used. + // The `score_filter` behaves differently than `max_score` field. + // The `max_score` field is the upper threshold itself, but the `score_filter` will result in a upper score threshold of the current timestamp minus the score_filter value. // - //For example if in your queue the score is only based on the time (always acking with score_subtract as 0), - //this parameter will be the number of milliseconds since the message's last usage. + // Useful only when your queue's score is only based on the current timestamp to not return a message just moments after it was last used. + // It will only return messages with score lower than the current timestamp minus the score_filter value. + // + // For example if your queue's score is only based on the current timestamp, this parameter will be the number of milliseconds a message must be in the queue before being returned. + // + // Deprecated: Marked as deprecated in deckard_service.proto. ScoreFilter int64 `protobuf:"varint,3,opt,name=score_filter,json=scoreFilter,proto3" json:"score_filter,omitempty"` + // Sets the upper threshold for the priority score of a message to be returned in the pull request. + // + // Only messages with a priority score equal to or lower than the max_score value will be returned. + // + // The maximum score accepted by Deckard is 9007199254740992, any value higher than this will be capped to the maximum score. + // To set this value to the minimum score accepted by Deckard, use any negative number. + // This parameter will be ignored if set to 0 (default value). + MaxScore float64 `protobuf:"fixed64,4,opt,name=max_score,json=maxScore,proto3" json:"max_score,omitempty"` + // Sets the lower threshold for the priority score required for a message to be returned. + // Only messages with a priority score equal to or higher than the min_score value will be returned. + // The minimum score accepted by Deckard is 0 which is also the default value + MinScore float64 `protobuf:"fixed64,5,opt,name=min_score,json=minScore,proto3" json:"min_score,omitempty"` } func (x *PullRequest) Reset() { @@ -322,6 +340,7 @@ func (x *PullRequest) GetAmount() int32 { return 0 } +// Deprecated: Marked as deprecated in deckard_service.proto. func (x *PullRequest) GetScoreFilter() int64 { if x != nil { return x.ScoreFilter @@ -329,6 +348,20 @@ func (x *PullRequest) GetScoreFilter() int64 { return 0 } +func (x *PullRequest) GetMaxScore() float64 { + if x != nil { + return x.MaxScore + } + return 0 +} + +func (x *PullRequest) GetMinScore() float64 { + if x != nil { + return x.MinScore + } + return 0 +} + type PullResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -390,18 +423,22 @@ type Message struct { // Full name of the queue this message belongs (including any prefixes) Queue string `protobuf:"bytes,3,opt,name=queue,proto3" json:"queue,omitempty"` // A payload map with formatted data to be stored and used by clients. - Payload map[string]*anypb.Any `protobuf:"bytes,8,rep,name=payload,proto3" json:"payload,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Payload map[string]*any1.Any `protobuf:"bytes,8,rep,name=payload,proto3" json:"payload,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Metadata is a map of string to be used as a key-value store. // It is a simple way to store data that is not part of the message payload. Metadata map[string]string `protobuf:"bytes,4,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Message string payload. Is responsibility of the caller to know how to encode/decode to a useful format for its purpose. // This field can be used to store simple string data instead of using the payload field. StringPayload string `protobuf:"bytes,5,opt,name=string_payload,json=stringPayload,proto3" json:"string_payload,omitempty"` - // Score is the priority this message currently have in the queue. + // Score represents the priority score the message currently have in the queue. + // The lower the score, the higher the priority. + // The maximum score accepted by Deckard is 9007199254740992 and the minimum is 0 Score float64 `protobuf:"fixed64,6,opt,name=score,proto3" json:"score,omitempty"` // Breakpoint is a field to be used as an auxiliar field for some specific use cases. + // For example if you need to keep a record of the last result processing a message, or want to iteract with a pagination system. // - // For example if you need to keep a record of the last result processing a message, use this field like iteracting with a pagination system. + // Examples: imagine a message representing a web news portal and you want to navigate through the articles. This field could be used to store the last visited article id. + // Or imagine a message representing a user and you want to iterate through the user's publications pages. This field could be used to store the last page number you visited. Breakpoint string `protobuf:"bytes,7,opt,name=breakpoint,proto3" json:"breakpoint,omitempty"` } @@ -458,7 +495,7 @@ func (x *Message) GetQueue() string { return "" } -func (x *Message) GetPayload() map[string]*anypb.Any { +func (x *Message) GetPayload() map[string]*any1.Any { if x != nil { return x.Payload } @@ -643,19 +680,18 @@ type AddMessage struct { // Unique id of this message Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // A payload map with formatted data to be stored and used by clients. - Payload map[string]*anypb.Any `protobuf:"bytes,10,rep,name=payload,proto3" json:"payload,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Payload map[string]*any1.Any `protobuf:"bytes,10,rep,name=payload,proto3" json:"payload,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Non-formatted string payload. // This field can be used to store simple string data instead of using the payload field. StringPayload string `protobuf:"bytes,3,opt,name=string_payload,json=stringPayload,proto3" json:"string_payload,omitempty"` // Metadata is a map of string to be used as a key-value store. // It is a simple way to store data that is not part of the message payload. Metadata map[string]string `protobuf:"bytes,11,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Name of the queue to add this message // - //Name of the queue to add this message - // - //Suggestion: to better observability, provide the name of the application using colon as the separator. Example: : + // Suggestion: to better observability, provide the name of the application using colon as the separator. Example: : // - //You may also add a queue prefix to the queue name using two colons as the separator. Example: ::: + // You may also add a queue prefix to the queue name using two colons as the separator. Example: ::: Queue string `protobuf:"bytes,4,opt,name=queue,proto3" json:"queue,omitempty"` // Indicate this message will never expire and will only be deleted from the queue if explicitly removed. Timeless bool `protobuf:"varint,6,opt,name=timeless,proto3" json:"timeless,omitempty"` @@ -664,6 +700,15 @@ type AddMessage struct { TtlMinutes int64 `protobuf:"varint,7,opt,name=ttl_minutes,json=ttlMinutes,proto3" json:"ttl_minutes,omitempty"` // Description of the message, this should be used as a human readable string to be used in diagnostics. Description string `protobuf:"bytes,8,opt,name=description,proto3" json:"description,omitempty"` + // Score represents the priority score the message currently have in the queue. + // The score is used to determine the order of the messages returned in a pull request. + // The lower the score, the higher the priority. + // + // If the score is not set (or set to 0), the score will be set with the current timestamp in milliseconds at the moment of the message creation. + // + // The maximum score accepted by Deckard is 9007199254740992 and the minimum is 0 + // Negative scores will be converted to 0, adding the message with the lowest score (and highest priority) + Score float64 `protobuf:"fixed64,12,opt,name=score,proto3" json:"score,omitempty"` } func (x *AddMessage) Reset() { @@ -705,7 +750,7 @@ func (x *AddMessage) GetId() string { return "" } -func (x *AddMessage) GetPayload() map[string]*anypb.Any { +func (x *AddMessage) GetPayload() map[string]*any1.Any { if x != nil { return x.Payload } @@ -754,6 +799,13 @@ func (x *AddMessage) GetDescription() string { return "" } +func (x *AddMessage) GetScore() float64 { + if x != nil { + return x.Score + } + return 0 +} + type AddResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -820,7 +872,7 @@ type EditQueueRequest struct { // This includes all prefixes and suffixes Queue string `protobuf:"bytes,1,opt,name=queue,proto3" json:"queue,omitempty"` // Configuration to apply to the queue. It will always update the queue with the newer configuration. - // Only available fields will be updated, meaning that previously configured attributes will not be change unless you explicit set it. + // Only available fields will be updated, meaning that previously configured fields will not be change unless you explicit set it. // If you want to change a configuration to its default value, manually set it to its default value following each field documentation. Configuration *QueueConfiguration `protobuf:"bytes,2,opt,name=configuration,proto3" json:"configuration,omitempty"` } @@ -933,14 +985,13 @@ type QueueConfiguration struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Number of max elements the queue can have. // - //Number of max elements the queue can have. - // - //To apply a max elements to a queue, set a value greater than 0. - //To remove the max elements from a queue, set the value to -1. - //0 will be always ignored and the queue will not be updated. + // To apply a max elements to a queue, set a value greater than 0. + // To remove the max elements from a queue, set the value to -1. + // 0 will be always ignored and the queue will not be updated. // - //All queues are unlimited by default. + // All queues are unlimited by default. MaxElements int64 `protobuf:"varint,1,opt,name=max_elements,json=maxElements,proto3" json:"max_elements,omitempty"` } @@ -1099,29 +1150,50 @@ type AckRequest struct { // Queue where this message is stored Queue string `protobuf:"bytes,2,opt,name=queue,proto3" json:"queue,omitempty"` // Reason of this result. + // // Useful for audit, mostly on 'nack' signals. Reason string `protobuf:"bytes,5,opt,name=reason,proto3" json:"reason,omitempty"` - // The value to subtract the score and increase final message score. - // For example if you want to make this message to have a better score you can add 10000 which will represent 10s of score benefit. + // This field is deprecated and will be removed in the future. If you need to change the message score, use the 'score' field. + // + // The value to subtract the score and increase final message priority. + // For example if you want to make this message to have a higher priority you can set 10000 which will represent 10s of score benefit in the default score algorithm. // If you want to penalize the message you can send a negative number. // - // IMPORTANT: The message will not be locked by, in the example, 10 seconds. This attribute is used only to increase or decrease the message priority in the priority queue. + // IMPORTANT: The message will not be locked by, in the example, 10 seconds. This field is used only to increase or decrease the message priority in the priority queue. + // + // This field is used only for ack requests (since in nack requests the message will return with the lowest score to the queue). + // It will be ignored if used at the same time of 'score' or 'lock_ms' fields. // - // This attribute is used only for ack requests and can't be used at the same time of 'lock_ms' attribute. + // Deprecated: Marked as deprecated in deckard_service.proto. ScoreSubtract float64 `protobuf:"fixed64,3,opt,name=score_subtract,json=scoreSubtract,proto3" json:"score_subtract,omitempty"` - // Breakpoint to set for this message + // Breakpoint is a field to be used as an auxiliar field for some specific use cases. + // For example if you need to keep a record of the last result processing a message, or want to iteract with a pagination system. + // + // Examples: imagine a message representing a web news portal and you want to navigate through the articles. This field could be used to store the last visited article id. + // Or imagine a message representing a user and you want to iterate through the user's publications pages. This field could be used to store the last page number you visited. Breakpoint string `protobuf:"bytes,4,opt,name=breakpoint,proto3" json:"breakpoint,omitempty"` // Time in milliseconds to lock a message before returning it to the queue. - // For nack requests the message will be locked before returning to first position in the priority queue. - // For ack requests the message will be locked before returning to last position in the priority queue. + // For NACK requests the message will be locked before returning to first position in the priority queue. You can change this behavior using the 'score' field. // - // IMPORTANT: The 'score_subtract' attribute will be ignored if this attribute is different than 0. + // For ACK requests the message will be locked before returning to last position in the priority queue. You can change this behavior using the 'score' field. // - // IMPORTANT: Deckard checks for locked messages in a 1-second delay meaning the lock have a second precision and not milliseconds. - // This field is in milliseconds because all scores and duration units on deckard are expressed in milliseconds. + // IMPORTANT: Deckard checks for locked messages in a 1-second precision meaning the lock have a second precision and not milliseconds. + // This field is in milliseconds because all duration units on deckard are expressed in milliseconds and the default score algorithm uses milliseconds as well. LockMs int64 `protobuf:"varint,6,opt,name=lock_ms,json=lockMs,proto3" json:"lock_ms,omitempty"` // Whether the message should be removed when acked/nacked RemoveMessage bool `protobuf:"varint,7,opt,name=removeMessage,proto3" json:"removeMessage,omitempty"` + // Sets the score of the message when ACKed, to override the default score algorithm. + // + // If used at the same time with the 'lock_ms' attribute, the message will be locked for the specified time and then returned to the queue with the specified score. + // + // For ACK requests, if the score is not provided (or set to 0), the message will return to the queue with the default score algorithm which is the current timestamp in milliseconds. + // + // For NACKs requests, if the score is not provided (or set to 0), the message will return to the queue with the minimum score accepted by Deckard which is 0. + // + // Negative values will be converted to 0, which is how to set the highest priority to a message in a ACK/NACK request. + // + // REMEMBER: the maximum score accepted by Deckard is 9007199254740992 and the minimum is 0, so values outside this range will be capped. + Score float64 `protobuf:"fixed64,10,opt,name=score,proto3" json:"score,omitempty"` } func (x *AckRequest) Reset() { @@ -1177,6 +1249,7 @@ func (x *AckRequest) GetReason() string { return "" } +// Deprecated: Marked as deprecated in deckard_service.proto. func (x *AckRequest) GetScoreSubtract() float64 { if x != nil { return x.ScoreSubtract @@ -1205,6 +1278,13 @@ func (x *AckRequest) GetRemoveMessage() bool { return false } +func (x *AckRequest) GetScore() float64 { + if x != nil { + return x.Score + } + return 0 +} + type AckResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1383,190 +1463,197 @@ var file_deckard_service_proto_rawDesc = []byte{ 0x28, 0x03, 0x52, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x12, 0x26, 0x0a, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0e, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, - 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x5e, 0x0a, 0x0b, 0x50, 0x75, 0x6c, 0x6c, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, 0x0a, - 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, 0x61, - 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x66, - 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x43, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x69, - 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x73, 0x73, - 0x61, 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xc0, 0x03, - 0x0a, 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, - 0x75, 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, - 0x65, 0x12, 0x3e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, - 0x61, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, - 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, - 0x63, 0x6f, 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x72, - 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, - 0x74, 0x1a, 0x50, 0x0a, 0x0c, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, - 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, - 0x22, 0x2a, 0x0a, 0x0c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x25, 0x0a, 0x0d, - 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, - 0x05, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x22, 0x44, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, - 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, - 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xe2, 0x03, 0x0a, 0x0a, 0x41, 0x64, - 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x41, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x6c, 0x69, 0x70, - 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x4d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, - 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0b, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, - 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, - 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, - 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x08, 0x74, 0x69, 0x6d, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x74, - 0x6c, 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x0a, 0x74, 0x74, 0x6c, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, - 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, 0x50, 0x0a, - 0x0c, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, - 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x22, 0x9c, 0x01, 0x0a, 0x0b, 0x50, 0x75, 0x6c, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, + 0x0a, 0x06, 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x06, + 0x61, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0c, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x0b, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x08, 0x6d, 0x61, 0x78, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, + 0x6e, 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x6d, + 0x69, 0x6e, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x43, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x62, 0x6c, 0x69, 0x70, + 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xc0, 0x03, 0x0a, + 0x07, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, + 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, + 0x65, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, + 0x12, 0x3e, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x08, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, + 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x12, 0x41, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, + 0x61, 0x72, 0x64, 0x2e, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, + 0x6f, 0x72, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, + 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x1a, 0x50, 0x0a, 0x0c, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, - 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x4a, 0x04, 0x08, 0x05, - 0x10, 0x06, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, 0x09, 0x10, 0x0a, 0x22, 0x57, - 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, - 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x75, - 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x75, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x72, 0x0a, 0x10, 0x45, 0x64, 0x69, 0x74, 0x51, - 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, + 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x2a, 0x0a, 0x0c, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x71, 0x75, 0x65, 0x75, 0x65, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x22, 0x25, 0x0a, 0x0d, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x22, 0x44, 0x0a, 0x0a, 0x41, 0x64, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x36, 0x0a, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, + 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x08, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xf8, 0x03, 0x0a, 0x0a, 0x41, 0x64, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x41, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, + 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, + 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x74, 0x69, 0x6d, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x74, 0x69, 0x6d, 0x65, 0x6c, 0x65, 0x73, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x74, 0x6c, + 0x5f, 0x6d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, + 0x74, 0x74, 0x6c, 0x4d, 0x69, 0x6e, 0x75, 0x74, 0x65, 0x73, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x73, 0x63, 0x6f, + 0x72, 0x65, 0x1a, 0x50, 0x0a, 0x0c, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3b, 0x0a, 0x0d, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x4a, 0x04, 0x08, 0x05, 0x10, 0x06, 0x4a, 0x04, 0x08, 0x02, 0x10, 0x03, 0x4a, 0x04, 0x08, + 0x09, 0x10, 0x0a, 0x22, 0x57, 0x0a, 0x0b, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x63, 0x6f, + 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0c, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x72, 0x0a, 0x10, + 0x45, 0x64, 0x69, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, + 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x51, + 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x43, 0x0a, 0x11, 0x45, 0x64, 0x69, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, + 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x22, 0x37, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, + 0x61, 0x78, 0x5f, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x27, + 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x22, 0x72, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x51, 0x75, + 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x48, 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x43, 0x0a, 0x11, 0x45, - 0x64, 0x69, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, - 0x22, 0x37, 0x0a, 0x12, 0x51, 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x65, 0x6c, - 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x6d, 0x61, - 0x78, 0x45, 0x6c, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x22, 0x27, 0x0a, 0x0f, 0x47, 0x65, 0x74, - 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, - 0x75, 0x65, 0x22, 0x72, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x48, 0x0a, 0x0d, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, - 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x51, 0x75, 0x65, 0x75, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xd0, 0x01, 0x0a, 0x0a, 0x41, 0x63, 0x6b, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, - 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, - 0x73, 0x6f, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, - 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x73, 0x63, 0x6f, - 0x72, 0x65, 0x53, 0x75, 0x62, 0x74, 0x72, 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x72, - 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, - 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x6f, - 0x63, 0x6b, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x6f, 0x63, - 0x6b, 0x4d, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x72, 0x0a, 0x0b, 0x41, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, - 0x73, 0x73, 0x12, 0x49, 0x0a, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x62, - 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0e, 0x0a, - 0x0c, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a, - 0x0d, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, - 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x32, 0xcf, 0x05, 0x0a, 0x07, 0x44, 0x65, 0x63, - 0x6b, 0x61, 0x72, 0x64, 0x12, 0x3e, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x1a, 0x2e, 0x62, 0x6c, - 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, - 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1b, 0x2e, 0x62, - 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x50, 0x75, - 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x62, 0x6c, 0x69, 0x70, - 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x03, 0x41, 0x63, 0x6b, 0x12, 0x1a, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xea, 0x01, 0x0a, 0x0a, + 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, + 0x65, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x71, 0x75, 0x65, 0x75, 0x65, + 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x0e, 0x73, 0x63, 0x6f, 0x72, + 0x65, 0x5f, 0x73, 0x75, 0x62, 0x74, 0x72, 0x61, 0x63, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x53, 0x75, 0x62, 0x74, 0x72, + 0x61, 0x63, 0x74, 0x12, 0x1e, 0x0a, 0x0a, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x70, 0x6f, + 0x69, 0x6e, 0x74, 0x12, 0x17, 0x0a, 0x07, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6d, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x06, 0x6c, 0x6f, 0x63, 0x6b, 0x4d, 0x73, 0x12, 0x24, 0x0a, 0x0d, + 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0d, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x22, 0x72, 0x0a, 0x0b, 0x41, 0x63, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, + 0x73, 0x12, 0x49, 0x0a, 0x10, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x62, 0x6c, + 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x0f, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x0e, 0x0a, 0x0c, + 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x29, 0x0a, 0x0d, + 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x32, 0xcf, 0x05, 0x0a, 0x07, 0x44, 0x65, 0x63, 0x6b, + 0x61, 0x72, 0x64, 0x12, 0x3e, 0x0a, 0x03, 0x41, 0x64, 0x64, 0x12, 0x1a, 0x2e, 0x62, 0x6c, 0x69, + 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, + 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x64, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x41, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1b, 0x2e, 0x62, 0x6c, + 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x50, 0x75, 0x6c, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1c, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, + 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3e, 0x0a, 0x03, 0x41, 0x63, 0x6b, 0x12, 0x1a, 0x2e, + 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, + 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x62, 0x6c, 0x69, 0x70, + 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x63, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x4e, 0x61, 0x63, 0x6b, 0x12, 0x1a, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x63, 0x6b, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x04, 0x4e, 0x61, 0x63, 0x6b, 0x12, - 0x1a, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, - 0x2e, 0x41, 0x63, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x62, 0x6c, - 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x41, 0x63, 0x6b, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x12, 0x1c, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, - 0x72, 0x64, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, - 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, - 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, - 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, - 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x46, 0x6c, 0x75, 0x73, 0x68, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1c, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, - 0x64, 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, + 0x64, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, - 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, - 0x07, 0x47, 0x65, 0x74, 0x42, 0x79, 0x49, 0x64, 0x12, 0x1e, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, - 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x49, - 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, - 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x49, - 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x09, 0x45, 0x64, 0x69, - 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x20, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, - 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, - 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x08, 0x47, - 0x65, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x1f, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x47, 0x0a, + 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x12, 0x1d, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, + 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, + 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x05, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x12, + 0x1c, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, + 0x2e, 0x46, 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x46, + 0x6c, 0x75, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4a, 0x0a, 0x07, + 0x47, 0x65, 0x74, 0x42, 0x79, 0x49, 0x64, 0x12, 0x1e, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, + 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x49, 0x64, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, + 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x79, 0x49, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x50, 0x0a, 0x09, 0x45, 0x64, 0x69, 0x74, + 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x20, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, + 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, + 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x45, 0x64, 0x69, 0x74, 0x51, 0x75, 0x65, + 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4d, 0x0a, 0x08, 0x47, 0x65, + 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x1f, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, + 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x75, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, 0x75, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, - 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, 0x2e, 0x47, 0x65, 0x74, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x0a, 0x15, 0x62, 0x72, - 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, - 0x61, 0x72, 0x64, 0x50, 0x01, 0x5a, 0x1a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x74, 0x61, 0x6b, 0x65, 0x6e, 0x65, 0x74, 0x2f, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, - 0x64, 0xaa, 0x02, 0x0f, 0x54, 0x61, 0x6b, 0x65, 0x6e, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x63, 0x6b, - 0x61, 0x72, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x47, 0x0a, 0x15, 0x62, 0x72, 0x2e, + 0x63, 0x6f, 0x6d, 0x2e, 0x62, 0x6c, 0x69, 0x70, 0x61, 0x69, 0x2e, 0x64, 0x65, 0x63, 0x6b, 0x61, + 0x72, 0x64, 0x50, 0x01, 0x5a, 0x1a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x74, 0x61, 0x6b, 0x65, 0x6e, 0x65, 0x74, 0x2f, 0x64, 0x65, 0x63, 0x6b, 0x61, 0x72, 0x64, + 0xaa, 0x02, 0x0f, 0x54, 0x61, 0x6b, 0x65, 0x6e, 0x65, 0x74, 0x2e, 0x44, 0x65, 0x63, 0x6b, 0x61, + 0x72, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1609,7 +1696,7 @@ var file_deckard_service_proto_goTypes = []interface{}{ nil, // 23: blipai.deckard.Message.MetadataEntry nil, // 24: blipai.deckard.AddMessage.PayloadEntry nil, // 25: blipai.deckard.AddMessage.MetadataEntry - (*anypb.Any)(nil), // 26: google.protobuf.Any + (*any1.Any)(nil), // 26: google.protobuf.Any } var file_deckard_service_proto_depIdxs = []int32{ 6, // 0: blipai.deckard.GetByIdResponse.message:type_name -> blipai.deckard.Message diff --git a/deckard_service_grpc.pb.go b/deckard_service_grpc.pb.go index 5915cec..791c46f 100644 --- a/deckard_service_grpc.pb.go +++ b/deckard_service_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.2.0 -// - protoc v3.15.8 +// - protoc-gen-go-grpc v1.3.0 +// - protoc v3.12.4 // source: deckard_service.proto package deckard @@ -18,6 +18,19 @@ import ( // Requires gRPC-Go v1.32.0 or later. const _ = grpc.SupportPackageIsVersion7 +const ( + Deckard_Add_FullMethodName = "/blipai.deckard.Deckard/Add" + Deckard_Pull_FullMethodName = "/blipai.deckard.Deckard/Pull" + Deckard_Ack_FullMethodName = "/blipai.deckard.Deckard/Ack" + Deckard_Nack_FullMethodName = "/blipai.deckard.Deckard/Nack" + Deckard_Count_FullMethodName = "/blipai.deckard.Deckard/Count" + Deckard_Remove_FullMethodName = "/blipai.deckard.Deckard/Remove" + Deckard_Flush_FullMethodName = "/blipai.deckard.Deckard/Flush" + Deckard_GetById_FullMethodName = "/blipai.deckard.Deckard/GetById" + Deckard_EditQueue_FullMethodName = "/blipai.deckard.Deckard/EditQueue" + Deckard_GetQueue_FullMethodName = "/blipai.deckard.Deckard/GetQueue" +) + // DeckardClient is the client API for Deckard service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. @@ -56,7 +69,7 @@ func NewDeckardClient(cc grpc.ClientConnInterface) DeckardClient { func (c *deckardClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.CallOption) (*AddResponse, error) { out := new(AddResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Add", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Add_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -65,7 +78,7 @@ func (c *deckardClient) Add(ctx context.Context, in *AddRequest, opts ...grpc.Ca func (c *deckardClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) { out := new(PullResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Pull", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Pull_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -74,7 +87,7 @@ func (c *deckardClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc. func (c *deckardClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*AckResponse, error) { out := new(AckResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Ack", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Ack_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -83,7 +96,7 @@ func (c *deckardClient) Ack(ctx context.Context, in *AckRequest, opts ...grpc.Ca func (c *deckardClient) Nack(ctx context.Context, in *AckRequest, opts ...grpc.CallOption) (*AckResponse, error) { out := new(AckResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Nack", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Nack_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -92,7 +105,7 @@ func (c *deckardClient) Nack(ctx context.Context, in *AckRequest, opts ...grpc.C func (c *deckardClient) Count(ctx context.Context, in *CountRequest, opts ...grpc.CallOption) (*CountResponse, error) { out := new(CountResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Count", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Count_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -101,7 +114,7 @@ func (c *deckardClient) Count(ctx context.Context, in *CountRequest, opts ...grp func (c *deckardClient) Remove(ctx context.Context, in *RemoveRequest, opts ...grpc.CallOption) (*RemoveResponse, error) { out := new(RemoveResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Remove", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Remove_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -110,7 +123,7 @@ func (c *deckardClient) Remove(ctx context.Context, in *RemoveRequest, opts ...g func (c *deckardClient) Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error) { out := new(FlushResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/Flush", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_Flush_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -119,7 +132,7 @@ func (c *deckardClient) Flush(ctx context.Context, in *FlushRequest, opts ...grp func (c *deckardClient) GetById(ctx context.Context, in *GetByIdRequest, opts ...grpc.CallOption) (*GetByIdResponse, error) { out := new(GetByIdResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/GetById", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_GetById_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -128,7 +141,7 @@ func (c *deckardClient) GetById(ctx context.Context, in *GetByIdRequest, opts .. func (c *deckardClient) EditQueue(ctx context.Context, in *EditQueueRequest, opts ...grpc.CallOption) (*EditQueueResponse, error) { out := new(EditQueueResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/EditQueue", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_EditQueue_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -137,7 +150,7 @@ func (c *deckardClient) EditQueue(ctx context.Context, in *EditQueueRequest, opt func (c *deckardClient) GetQueue(ctx context.Context, in *GetQueueRequest, opts ...grpc.CallOption) (*GetQueueResponse, error) { out := new(GetQueueResponse) - err := c.cc.Invoke(ctx, "/blipai.deckard.Deckard/GetQueue", in, out, opts...) + err := c.cc.Invoke(ctx, Deckard_GetQueue_FullMethodName, in, out, opts...) if err != nil { return nil, err } @@ -230,7 +243,7 @@ func _Deckard_Add_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Add", + FullMethod: Deckard_Add_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Add(ctx, req.(*AddRequest)) @@ -248,7 +261,7 @@ func _Deckard_Pull_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Pull", + FullMethod: Deckard_Pull_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Pull(ctx, req.(*PullRequest)) @@ -266,7 +279,7 @@ func _Deckard_Ack_Handler(srv interface{}, ctx context.Context, dec func(interfa } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Ack", + FullMethod: Deckard_Ack_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Ack(ctx, req.(*AckRequest)) @@ -284,7 +297,7 @@ func _Deckard_Nack_Handler(srv interface{}, ctx context.Context, dec func(interf } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Nack", + FullMethod: Deckard_Nack_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Nack(ctx, req.(*AckRequest)) @@ -302,7 +315,7 @@ func _Deckard_Count_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Count", + FullMethod: Deckard_Count_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Count(ctx, req.(*CountRequest)) @@ -320,7 +333,7 @@ func _Deckard_Remove_Handler(srv interface{}, ctx context.Context, dec func(inte } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Remove", + FullMethod: Deckard_Remove_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Remove(ctx, req.(*RemoveRequest)) @@ -338,7 +351,7 @@ func _Deckard_Flush_Handler(srv interface{}, ctx context.Context, dec func(inter } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/Flush", + FullMethod: Deckard_Flush_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).Flush(ctx, req.(*FlushRequest)) @@ -356,7 +369,7 @@ func _Deckard_GetById_Handler(srv interface{}, ctx context.Context, dec func(int } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/GetById", + FullMethod: Deckard_GetById_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).GetById(ctx, req.(*GetByIdRequest)) @@ -374,7 +387,7 @@ func _Deckard_EditQueue_Handler(srv interface{}, ctx context.Context, dec func(i } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/EditQueue", + FullMethod: Deckard_EditQueue_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).EditQueue(ctx, req.(*EditQueueRequest)) @@ -392,7 +405,7 @@ func _Deckard_GetQueue_Handler(srv interface{}, ctx context.Context, dec func(in } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/blipai.deckard.Deckard/GetQueue", + FullMethod: Deckard_GetQueue_FullMethodName, } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(DeckardServer).GetQueue(ctx, req.(*GetQueueRequest)) diff --git a/docker/docker-compose.yml b/docker/docker-compose.yml index 56a12c1..8ce63c6 100644 --- a/docker/docker-compose.yml +++ b/docker/docker-compose.yml @@ -2,7 +2,7 @@ version: "3.8" services: redis: - image: "redis:alpine" + image: "redis:5.0.14" ports: - "$REDIS_PORT:$REDIS_PORT" diff --git a/docs/components.md b/docs/components.md index 028a135..3340659 100644 --- a/docs/components.md +++ b/docs/components.md @@ -7,6 +7,7 @@ The storage is responsible for persisting the messages and queue configurations. Deckard currently supports the following Storage engines: - Memory (default) - MongoDB + - Supported versions: `5.0.18` to `6.0.6` (we always run our integration tests against the `5.0.18` and the latest version) The memory implementation is mainly used in tests and local development and is not recommended for production use. @@ -17,6 +18,7 @@ The cache is currently the main component of Deckard. It is used to manage messa Deckard currently supports the following Cache engines: - Memory (default) - Redis + - Supported versions: `5.0.14` to `7.0.11` (we always run our integration tests against the `5.0.14` and the latest version) The memory implementation is mainly used in tests and local development and is not recommended for production use. diff --git a/go.mod b/go.mod index da4863a..db2f64f 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/go-redis/redis/v8 v8.11.5 github.com/gobwas/glob v0.2.3 github.com/golang/mock v1.6.0 + github.com/golang/protobuf v1.5.2 github.com/meirf/gopart v0.0.0-20180520194036-37e9492a85a8 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/prometheus/client_golang v1.14.0 @@ -42,7 +43,6 @@ require ( github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-redis/redis/extra/rediscmd/v8 v8.11.5 // indirect - github.com/golang/protobuf v1.5.2 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.15.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect diff --git a/internal/audit/audit.go b/internal/audit/audit.go index 4c59fa5..1263d61 100644 --- a/internal/audit/audit.go +++ b/internal/audit/audit.go @@ -14,11 +14,11 @@ import ( "time" "github.com/takenet/deckard/internal/config" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/metrics" "github.com/takenet/deckard/internal/project" - "github.com/takenet/deckard/internal/queue/entities" - "github.com/takenet/deckard/internal/queue/utils" + "github.com/takenet/deckard/internal/queue/message" "go.uber.org/zap" "github.com/elastic/go-elasticsearch/v7" @@ -36,7 +36,7 @@ type Entry struct { Queue string `json:"queue"` QueuePrefix string `json:"queue_prefix"` QueueSuffix string `json:"queue_suffix"` - LastScoreSubtract float64 `json:"last_score_subtract"` + LastScoreSubtract float64 `json:"last_score_subtract"` // deprecated Timestamp time.Time `json:"timestamp"` Breakpoint string `json:"breakpoint"` Signal Signal `json:"signal"` @@ -146,9 +146,9 @@ func (a *AuditorImpl) send(ctx context.Context, entries ...Entry) { return } - start := time.Now() + start := dtime.Now() defer func() { - metrics.AuditorStoreLatency.Record(ctx, utils.ElapsedTime(start)) + metrics.AuditorStoreLatency.Record(ctx, dtime.ElapsedTime(start)) }() body := "" @@ -192,9 +192,9 @@ func (a *AuditorImpl) Store(ctx context.Context, entry Entry) { return } - entry.Timestamp = time.Now() + entry.Timestamp = dtime.Now() - queuePrefix, queueSuffix := entities.GetQueueParts(entry.Queue) + queuePrefix, queueSuffix := message.GetQueueParts(entry.Queue) entry.QueuePrefix = queuePrefix @@ -203,7 +203,7 @@ func (a *AuditorImpl) Store(ctx context.Context, entry Entry) { } defer func() { - metrics.AuditorAddToStoreLatency.Record(ctx, utils.ElapsedTime(entry.Timestamp)) + metrics.AuditorAddToStoreLatency.Record(ctx, dtime.ElapsedTime(entry.Timestamp)) }() a.entries <- entry diff --git a/internal/cmd/deckard/main.go b/internal/cmd/deckard/main.go index 5980f12..9efea4c 100644 --- a/internal/cmd/deckard/main.go +++ b/internal/cmd/deckard/main.go @@ -10,12 +10,12 @@ import ( "github.com/takenet/deckard/internal/audit" "github.com/takenet/deckard/internal/config" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/metrics" "github.com/takenet/deckard/internal/queue" "github.com/takenet/deckard/internal/queue/cache" "github.com/takenet/deckard/internal/queue/storage" - "github.com/takenet/deckard/internal/queue/utils" "github.com/takenet/deckard/internal/service" "github.com/takenet/deckard/internal/shutdown" "github.com/takenet/deckard/internal/trace" @@ -189,7 +189,7 @@ func startHouseKeeperJobs(pool *queue.Queue) { shutdown.WaitGroup, config.HousekeeperTaskTTLDelay.GetDuration(), func() bool { - now := time.Now() + now := dtime.Now() metrify, _ := queue.RemoveTTLMessages(ctx, pool, &now) @@ -222,15 +222,15 @@ func scheduleTask(taskName string, lock *sync.Mutex, taskWaitGroup *sync.WaitGro } func executeTask(taskName string, fn func() bool) { - now := time.Now() + now := dtime.Now() var metrify bool defer func() { if metrify { - metrics.HousekeeperTaskLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("task", taskName)) + metrics.HousekeeperTaskLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("task", taskName)) } }() metrify = fn() - logger.S(ctx).Debug("Finished ", taskName, " task. Took ", utils.ElapsedTime(now), ".") + logger.S(ctx).Debug("Finished ", taskName, " task. Took ", dtime.ElapsedTime(now), ".") } diff --git a/internal/cmd/deckard/main_test.go b/internal/cmd/deckard/main_test.go index a6da7f1..5c42102 100644 --- a/internal/cmd/deckard/main_test.go +++ b/internal/cmd/deckard/main_test.go @@ -16,14 +16,73 @@ import ( "google.golang.org/grpc/credentials/insecure" ) -func TestLoadDeckardDefaultSettingsShouldLoadSuccessfullyIntegration(t *testing.T) { +func TestLoadMemoryDeckardDefaultSettingsShouldLoadSuccessfullyIntegration(t *testing.T) { if testing.Short() { return } shutdown.Reset() - os.Setenv(config.GrpcPort.GetKey(), "8050") - defer os.Unsetenv(config.GrpcPort.GetKey()) + + config.Configure(true) + config.GrpcPort.Set(8050) + + go main() + + // Blocks here until deckard is started + for { + if server != nil { + conn, err := dial() + + if err == nil { + conn.Close() + break + } + } + time.Sleep(10 * time.Millisecond) + } + + defer shutdown.PerformShutdown(ctx, cancel, server) + + // Set up a connection to the server. + conn, err := dial() + if err != nil { + log.Fatalf("did not connect: %v", err) + } + defer conn.Close() + + client := deckard.NewDeckardClient(conn) + + response, err := client.Add(ctx, &deckard.AddRequest{ + Messages: []*deckard.AddMessage{ + { + Id: "1", + Queue: "queue_main_test", + Timeless: true, + }, + }, + }) + + require.NoError(t, err) + require.Equal(t, int64(1), response.CreatedCount) + require.Equal(t, int64(0), response.UpdatedCount) + + getResponse, err := client.Pull(ctx, &deckard.PullRequest{Queue: "queue_main_test"}) + require.NoError(t, err) + require.Len(t, getResponse.Messages, 1) + require.Equal(t, "1", getResponse.Messages[0].Id) +} + +func TestLoadRedisAndMongoDBDeckardShouldLoadSuccessfullyIntegration(t *testing.T) { + if testing.Short() { + return + } + + shutdown.Reset() + + config.Configure(true) + config.CacheType.Set("REDIS") + config.StorageType.Set("MONGODB") + config.GrpcPort.Set(8050) go main() @@ -51,11 +110,17 @@ func TestLoadDeckardDefaultSettingsShouldLoadSuccessfullyIntegration(t *testing. client := deckard.NewDeckardClient(conn) + _, err = client.Remove(ctx, &deckard.RemoveRequest{ + Ids: []string{"1"}, + Queue: "queue_main_test", + }) + require.NoError(t, err) + response, err := client.Add(ctx, &deckard.AddRequest{ Messages: []*deckard.AddMessage{ { Id: "1", - Queue: "queue", + Queue: "queue_main_test", Timeless: true, }, }, @@ -65,7 +130,7 @@ func TestLoadDeckardDefaultSettingsShouldLoadSuccessfullyIntegration(t *testing. require.Equal(t, int64(1), response.CreatedCount) require.Equal(t, int64(0), response.UpdatedCount) - getResponse, err := client.Pull(ctx, &deckard.PullRequest{Queue: "queue"}) + getResponse, err := client.Pull(ctx, &deckard.PullRequest{Queue: "queue_main_test"}) require.NoError(t, err) require.Len(t, getResponse.Messages, 1) require.Equal(t, "1", getResponse.Messages[0].Id) diff --git a/internal/dtime/deckard_time.go b/internal/dtime/deckard_time.go new file mode 100644 index 0000000..5f77a61 --- /dev/null +++ b/internal/dtime/deckard_time.go @@ -0,0 +1,81 @@ +package dtime + +import ( + "time" +) + +const ( + millisPerSecond = int64(time.Second / time.Millisecond) + nanosPerMillisecond = int64(time.Millisecond / time.Nanosecond) +) + +var ( + nowProvider = func() time.Time { + return time.Now() + } + internalTimeMocking []time.Time +) + +func resetNowProvider() { + nowProvider = func() time.Time { + return time.Now() + } + internalTimeMocking = nil +} + +// Visible for testing +// Returns the reset function to restore the default provider +func SetNowProvider(provider func() time.Time) func() { + nowProvider = provider + + return resetNowProvider +} + +// Visible for testing +// It will mock the now provider to return each element of the slice +// The last element will be returned if there are no more elements +// Returns the reset function to restore the default provider +// Will panic if the slice is empty +func SetNowProviderValues(value ...time.Time) func() { + internalTimeMocking = value + + nowProvider = func() time.Time { + if len(internalTimeMocking) == 1 { + return internalTimeMocking[0] + } + + t := internalTimeMocking[0] + internalTimeMocking = internalTimeMocking[1:] + + return t + } + + return resetNowProvider +} + +func Now() time.Time { + return nowProvider() +} + +func NowMs() int64 { + t := Now() + + return TimeToMs(&t) +} + +func MsPrecision(t *time.Time) time.Time { + return MsToTime(TimeToMs(t)) +} + +func TimeToMs(t *time.Time) int64 { + return t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) +} + +func MsToTime(msInt int64) time.Time { + return time.Unix(msInt/millisPerSecond, (msInt%millisPerSecond)*nanosPerMillisecond) +} + +// Time in millliseconds elapsed since a time +func ElapsedTime(since time.Time) int64 { + return int64(time.Since(since) / time.Millisecond) +} diff --git a/internal/dtime/deckard_time_test.go b/internal/dtime/deckard_time_test.go new file mode 100644 index 0000000..b64ed1a --- /dev/null +++ b/internal/dtime/deckard_time_test.go @@ -0,0 +1,168 @@ +package dtime + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestSetNowProvider(t *testing.T) { + t.Run("Call default provider", func(t *testing.T) { + now := time.Now() + + require.Equal(t, true, now.Before(nowProvider())) + }) + + t.Run("Set provider to a fixed time", func(t *testing.T) { + mockTime := time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC) + reset := SetNowProvider(func() time.Time { + return mockTime + }) + defer reset() + + // Verify that the provider returns the mock time + got := nowProvider() + if got != mockTime { + t.Errorf("Unexpected time: got %v, want %v", got, mockTime) + } + + // Verify that the provider returns the current time if reseted + reset() + now := time.Now() + <-time.After(time.Millisecond * 1) + require.True(t, Now().After(now)) + }) + + t.Run("Set provider to a time that changes over time", func(t *testing.T) { + defer SetNowProvider(func() time.Time { + return time.Now().Add(time.Second) + })() + + before := time.Now() + + // Verify that the provider returns a time that is one second ahead of the current time + got := nowProvider() + want := time.Now().Add(time.Second * 2) + if before.Before(got) && want.After(want) { + t.Errorf("Unexpected time: got %v, want a time after %v", got, want) + } + }) +} + +func TestSetNowProviderValues(t *testing.T) { + t.Run("Single value", func(t *testing.T) { + mockTime := time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC) + reset := SetNowProviderValues(mockTime) + defer reset() + + // Verify that the provider returns the mock time + got := nowProvider() + if got != mockTime { + t.Errorf("Unexpected time: got %v, want %v", got, mockTime) + } + + // Reset the provider and verify that it returns the current time + reset() + got = nowProvider() + if got.After(time.Now()) { + t.Errorf("Unexpected time: got %v, want a time before %v", got, time.Now()) + } + }) + + t.Run("Multiple values", func(t *testing.T) { + mockTimes := []time.Time{ + time.Date(2022, time.January, 1, 0, 0, 0, 0, time.UTC), + time.Date(2022, time.January, 2, 0, 0, 0, 0, time.UTC), + time.Date(2022, time.January, 3, 0, 0, 0, 0, time.UTC), + } + reset := SetNowProviderValues(mockTimes...) + defer reset() + + // Verify that the provider returns the mock times in order + for _, want := range mockTimes { + got := nowProvider() + if got != want { + t.Errorf("Unexpected time: got %v, want %v", got, want) + } + } + + // Verify that the provider returns the last mock time when there are no more values + got := nowProvider() + if got != mockTimes[len(mockTimes)-1] { + t.Errorf("Unexpected time: got %v, want %v", got, mockTimes[len(mockTimes)-1]) + } + + // Reset the provider and verify that it returns the current time + reset() + got = nowProvider() + if got.After(time.Now()) { + t.Errorf("Unexpected time: got %v, want a time before %v", got, time.Now()) + } + }) + + // Test case 3: Empty slice (should panic) + t.Run("Empty slice", func(t *testing.T) { + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic, but no panic occurred") + } + }() + + defer SetNowProviderValues()() + + nowProvider() + }) +} +func TestElapsedTime(t *testing.T) { + // Create a time object that is 1 second in the past + since := time.Now().Add(-time.Second) + + // Call the ElapsedTime function + got := ElapsedTime(since) + + // Check if the result is within 1 millisecond of 1000 + if got < 999 || got > 1001 { + t.Errorf("ElapsedTime() = %d, expected 1000 +/- 1", got) + } +} + +func TestNowMs(t *testing.T) { + // Get the current time in milliseconds + // This is the exact implementation of NowMS, this test only guarantees that the implementation result doesn't change + now := time.Now().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) + + // Call the NowMs function + got := NowMs() + + // Check if the result is within 1 millisecond of the current time + if got < now-1 || got > now+1 { + t.Errorf("NowMs() = %d, expected %d +/- 1", got, now) + } +} + +func TestMsPrecision(t *testing.T) { + t.Parallel() + + fixedTime := time.Unix(1610578652, 894654759) + + require.Equal(t, int64(1610578652894654759), fixedTime.UnixNano()) + + msPrecision := MsPrecision(&fixedTime) + + require.Equal(t, int64(1610578652894000000), msPrecision.UnixNano()) +} + +func TestTimeToMs(t *testing.T) { + t.Parallel() + + fixedTime := time.Unix(1610578652, 894654759) + + require.Equal(t, int64(1610578652894), TimeToMs(&fixedTime)) +} + +func TestMsToTime(t *testing.T) { + t.Parallel() + + require.Equal(t, int64(1610578652894000000), MsToTime(int64(1610578652894)).UnixNano()) +} diff --git a/internal/project/project_test.go b/internal/project/project_test.go new file mode 100644 index 0000000..b1da84b --- /dev/null +++ b/internal/project/project_test.go @@ -0,0 +1,30 @@ +package project_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/takenet/deckard/internal/project" +) + +func TestProject(t *testing.T) { + t.Run("Verify project name", func(t *testing.T) { + want := "deckard" + + got := project.Name + + require.Equal(t, want, got, "Unexpected project name") + }) + + t.Run("Verify project display name", func(t *testing.T) { + want := "Deckard" + + got := project.DisplayName + + require.Equal(t, want, got, "Unexpected project display name") + }) + + t.Run("Verify project version is a correct version", func(t *testing.T) { + require.Regexp(t, `^\d+\.\d+\.\d+(-SNAPSHOT)?$`, project.Version, "Unexpected project version") + }) +} diff --git a/internal/queue/cache/cache.go b/internal/queue/cache/cache.go index ebf7c5e..bfd2fe1 100644 --- a/internal/queue/cache/cache.go +++ b/internal/queue/cache/cache.go @@ -7,7 +7,8 @@ import ( "errors" "time" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/pool" ) type Type string @@ -27,23 +28,23 @@ const ( ) type Cache interface { - MakeAvailable(ctx context.Context, message *entities.Message) (bool, error) + MakeAvailable(ctx context.Context, message *message.Message) (bool, error) IsProcessing(ctx context.Context, queue string, id string) (bool, error) - PullMessages(ctx context.Context, queue string, n int64, scoreFilter int64) (ids []string, err error) + PullMessages(ctx context.Context, queue string, n int64, minScore *float64, maxScore *float64) (ids []string, err error) TimeoutMessages(ctx context.Context, queue string, timeout time.Duration) (ids []string, err error) // Locks a message for message.LockMs milliseconds. - LockMessage(ctx context.Context, message *entities.Message, lockType LockType) (bool, error) + LockMessage(ctx context.Context, message *message.Message, lockType LockType) (bool, error) // Unlocks all messages from a queue UnlockMessages(ctx context.Context, queue string, lockType LockType) (messages []string, err error) // Lists all queues from a pool using a pattern search. Only glob-style pattern is supported. - ListQueues(ctx context.Context, pattern string, poolType entities.PoolType) (queues []string, err error) + ListQueues(ctx context.Context, pattern string, poolType pool.PoolType) (queues []string, err error) // Inserts 1..n elements to cache and return the number of new elements. // Elements already in cache should have its score updated. - Insert(ctx context.Context, queue string, messages ...*entities.Message) (insertions []string, err error) + Insert(ctx context.Context, queue string, messages ...*message.Message) (insertions []string, err error) Remove(ctx context.Context, queue string, ids ...string) (removed int64, err error) Flush(ctx context.Context) diff --git a/internal/queue/cache/cache_suite_test.go b/internal/queue/cache/cache_suite_test.go index fd0732f..ac9d054 100644 --- a/internal/queue/cache/cache_suite_test.go +++ b/internal/queue/cache/cache_suite_test.go @@ -4,12 +4,15 @@ import ( "context" "fmt" "sort" + "testing" "time" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" - "github.com/takenet/deckard/internal/queue/entities" - "github.com/takenet/deckard/internal/queue/utils" + "github.com/takenet/deckard/internal/dtime" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/pool" + "github.com/takenet/deckard/internal/queue/score" ) var ctx, cancel = context.WithCancel(context.Background()) @@ -32,14 +35,14 @@ func (suite *CacheIntegrationTestSuite) BeforeTest(_, _ string) { } func (suite *CacheIntegrationTestSuite) TestTimeoutMessagesShouldMakeAvailableWithMaxScore() { - _, insertError := suite.cache.Insert(ctx, "queue", &entities.Message{ + _, insertError := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", }) require.NoError(suite.T(), insertError) - _, pullError := suite.cache.PullMessages(ctx, "queue", 1, 0) + _, pullError := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullError) select { @@ -54,7 +57,7 @@ func (suite *CacheIntegrationTestSuite) TestTimeoutMessagesShouldMakeAvailableWi // Insert lower score now := time.Now() - _, insertError := suite.cache.Insert(ctx, "queue", &entities.Message{ + _, insertError := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id2", Description: "desc", Queue: "queue", @@ -62,8 +65,10 @@ func (suite *CacheIntegrationTestSuite) TestTimeoutMessagesShouldMakeAvailableWi }) require.NoError(suite.T(), insertError) + currentScore := float64(time.Now().Unix()) + // Guarantee timeout message has the maximum score - messages, pullAfterTimeout := suite.cache.PullMessages(ctx, "queue", 1, time.Now().Unix()) + messages, pullAfterTimeout := suite.cache.PullMessages(ctx, "queue", 1, nil, ¤tScore) require.NoError(suite.T(), pullAfterTimeout) require.Len(suite.T(), messages, 1) require.Equal(suite.T(), "id1", messages[0]) @@ -73,7 +78,7 @@ func (suite *CacheIntegrationTestSuite) TestTimeoutMessagesShouldMakeAvailableWi func (suite *CacheIntegrationTestSuite) TestFlush() { _ = suite.cache.Set(ctx, RECOVERY_STORAGE_BREAKPOINT_KEY, "asdf") - _, insertError := suite.cache.Insert(ctx, "queue", &entities.Message{ + _, insertError := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -86,7 +91,7 @@ func (suite *CacheIntegrationTestSuite) TestFlush() { require.NoError(suite.T(), err) require.Equal(suite.T(), "", breakpoint) - messages, pullError := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, pullError := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullError) require.Len(suite.T(), messages, 0) } @@ -94,26 +99,26 @@ func (suite *CacheIntegrationTestSuite) TestFlush() { func (suite *CacheIntegrationTestSuite) TestTimeoutMessagesShouldNotMakeAvailable() { threeMinutesTime := time.Now().Add(-3 * time.Minute) - _, insertError := suite.cache.Insert(ctx, "queue", &entities.Message{ + _, insertError := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", - Score: float64(utils.TimeToMs(&threeMinutesTime)), + Score: float64(dtime.TimeToMs(&threeMinutesTime)), }) require.NoError(suite.T(), insertError) - _, insertError2 := suite.cache.Insert(ctx, "queue", &entities.Message{ + _, insertError2 := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id2", Description: "desc", Queue: "queue", }) require.NoError(suite.T(), insertError2) - messages, pullError := suite.cache.PullMessages(ctx, "queue", 2, 0) + messages, pullError := suite.cache.PullMessages(ctx, "queue", 2, nil, nil) require.NoError(suite.T(), pullError) require.Len(suite.T(), messages, 2) - noMessages, pullError := suite.cache.PullMessages(ctx, "queue", 1, 0) + noMessages, pullError := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullError) require.Len(suite.T(), noMessages, 0) @@ -121,7 +126,7 @@ func (suite *CacheIntegrationTestSuite) TestTimeoutMessagesShouldNotMakeAvailabl require.NoError(suite.T(), err) require.Equal(suite.T(), 0, len(result)) - messages, pullAfterTimeout := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, pullAfterTimeout := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullAfterTimeout) require.Len(suite.T(), messages, 0) } @@ -143,7 +148,7 @@ func (suite *CacheIntegrationTestSuite) TestSetStorageBreakpointShouldReturnValu } func (suite *CacheIntegrationTestSuite) TestInsertOneOkShouldNotBeAvailableAgain() { - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -151,13 +156,13 @@ func (suite *CacheIntegrationTestSuite) TestInsertOneOkShouldNotBeAvailableAgain require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id1"}, inserts) - messages, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 1) require.Equal(suite.T(), []string{"id1"}, messages) - messagesAgain, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + messagesAgain, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messagesAgain, 0) @@ -165,17 +170,17 @@ func (suite *CacheIntegrationTestSuite) TestInsertOneOkShouldNotBeAvailableAgain func (suite *CacheIntegrationTestSuite) TestPullShouldResultMaxScore() { // Older last usage = bigger score - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", Score: float64(50), - }, &entities.Message{ + }, &message.Message{ ID: "id2", Description: "desc", Queue: "queue", Score: float64(5), - }, &entities.Message{ + }, &message.Message{ ID: "id3", Description: "desc", Queue: "queue", @@ -185,7 +190,7 @@ func (suite *CacheIntegrationTestSuite) TestPullShouldResultMaxScore() { require.Equal(suite.T(), []string{"id1", "id2", "id3"}, inserts) // Result should be id 2, id 3 and then id 1 - messages, err := suite.cache.PullMessages(ctx, "queue", 100, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 100, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 3) @@ -193,17 +198,17 @@ func (suite *CacheIntegrationTestSuite) TestPullShouldResultMaxScore() { } func (suite *CacheIntegrationTestSuite) TestInsertOrderedPullShouldResultMaxScore() { - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", Score: float64(50), - }, &entities.Message{ + }, &message.Message{ ID: "id2", Description: "desc", Queue: "queue", Score: float64(10), - }, &entities.Message{ + }, &message.Message{ ID: "id3", Description: "desc", Queue: "queue", @@ -212,7 +217,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertOrderedPullShouldResultMaxScor require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id1", "id2", "id3"}, inserts) - messages, err := suite.cache.PullMessages(ctx, "queue", 100, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 100, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 3) @@ -220,7 +225,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertOrderedPullShouldResultMaxScor } func (suite *CacheIntegrationTestSuite) TestInsertSameObjectTwiceShouldNotUpdateScore() { - first, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + first, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -229,7 +234,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectTwiceShouldNotUpdate require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id1"}, first) - second, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + second, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -241,7 +246,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectTwiceShouldNotUpdate require.Equal(suite.T(), 0, len(second)) // insert element with a lower score - third, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + third, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id2", Description: "desc", Queue: "queue", @@ -251,7 +256,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectTwiceShouldNotUpdate require.Equal(suite.T(), []string{"id2"}, third) // Now the element id2 has the best score - messages, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 1) @@ -259,12 +264,12 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectTwiceShouldNotUpdate } func (suite *CacheIntegrationTestSuite) TestInsertSameObjectInSameRequestShouldPreserveLastScore() { - first, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + first, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", Score: float64(50), - }, &entities.Message{ + }, &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -274,7 +279,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectInSameRequestShouldP require.Equal(suite.T(), []string{"id1", "id1"}, first) // insert element with a higher score - third, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + third, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id2", Description: "desc", Queue: "queue", @@ -284,7 +289,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectInSameRequestShouldP require.Equal(suite.T(), []string{"id2"}, third) // Now the element id2 has the best score - messages, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 1) @@ -293,25 +298,25 @@ func (suite *CacheIntegrationTestSuite) TestInsertSameObjectInSameRequestShouldP func (suite *CacheIntegrationTestSuite) TestCacheShouldSupportLowScoreDifferences() { now := time.Now() - firstInsert, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + firstInsert, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", - Score: float64(utils.TimeToMs(&now)), + Score: float64(dtime.TimeToMs(&now)), }) require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id1"}, firstInsert) - secondInsert, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + secondInsert, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id2", Description: "desc", Queue: "queue", - Score: float64(utils.TimeToMs(&now) - 1), + Score: float64(dtime.TimeToMs(&now) - 1), }) require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id2"}, secondInsert) - messages, err := suite.cache.PullMessages(ctx, "queue", 2, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 2, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 2) @@ -319,15 +324,15 @@ func (suite *CacheIntegrationTestSuite) TestCacheShouldSupportLowScoreDifference } func (suite *CacheIntegrationTestSuite) TestPullMoreThanAvailable() { - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", - }, &entities.Message{ + }, &message.Message{ ID: "id2", Description: "desc", Queue: "queue", - }, &entities.Message{ + }, &message.Message{ ID: "id3", Description: "desc", Queue: "queue", @@ -335,7 +340,7 @@ func (suite *CacheIntegrationTestSuite) TestPullMoreThanAvailable() { require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id1", "id2", "id3"}, inserts) - messages, err := suite.cache.PullMessages(ctx, "queue", 100, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 100, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 3) @@ -347,7 +352,7 @@ func (suite *CacheIntegrationTestSuite) TestPullMoreThanAvailable() { } func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteFromProcessingQueue() { - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -355,7 +360,7 @@ func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteFromProcessingQueu require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id1"}, inserts) - _, pullError := suite.cache.PullMessages(ctx, "queue", 1, 0) + _, pullError := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullError) result, err := suite.cache.IsProcessing(ctx, "queue", "id1") @@ -371,11 +376,11 @@ func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteFromProcessingQueu } func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteOnlyCorrectId() { - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", - }, &entities.Message{ + }, &message.Message{ ID: "id2", Description: "desc", Queue: "queue", @@ -387,18 +392,18 @@ func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteOnlyCorrectId() { _, opErr := suite.cache.Remove(ctx, "queue", "id1") require.NoError(suite.T(), opErr) - messages, pullError := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, pullError := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullError) require.Len(suite.T(), messages, 1) require.Equal(suite.T(), "id2", messages[0]) - noResult, pullError := suite.cache.PullMessages(ctx, "queue", 1, 0) + noResult, pullError := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), pullError) require.Len(suite.T(), noResult, 0) } func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteFromActiveQueue() { - _, opErr := suite.cache.Insert(ctx, "queue", &entities.Message{ + _, opErr := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -408,20 +413,20 @@ func (suite *CacheIntegrationTestSuite) TestRemoveShouldDeleteFromActiveQueue() _, removeErr := suite.cache.Remove(ctx, "queue", "id1") require.NoError(suite.T(), removeErr) - pullResult, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + pullResult, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), pullResult, 0) } func (suite *CacheIntegrationTestSuite) TestBulkElementsShouldNotError() { ids := make([]string, 100) - data := make([]*entities.Message, 100) + data := make([]*message.Message, 100) for i := 0; i < 100; i++ { id := fmt.Sprintf("giganicstringtoconsumelotofmemoryofredisscript%d", i) ids[i] = id - data[i] = &entities.Message{ + data[i] = &message.Message{ ID: id, Description: "desc", Queue: "queue", @@ -433,7 +438,7 @@ func (suite *CacheIntegrationTestSuite) TestBulkElementsShouldNotError() { require.NoError(suite.T(), opErr) require.Equal(suite.T(), ids, insertedElements) - pullResult, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + pullResult, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), pullResult, 1) @@ -441,13 +446,13 @@ func (suite *CacheIntegrationTestSuite) TestBulkElementsShouldNotError() { require.NoError(suite.T(), removeErr) require.Equal(suite.T(), int64(100), count) - pullResult, err = suite.cache.PullMessages(ctx, "queue", 1, 0) + pullResult, err = suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), pullResult, 0) } func (suite *CacheIntegrationTestSuite) TestInsertOneWithInvalidQueue() { - inserts, err := suite.cache.Insert(ctx, "queue", &entities.Message{ + inserts, err := suite.cache.Insert(ctx, "queue", &message.Message{ ID: "id1", Description: "desc", Queue: "other_queue", @@ -458,7 +463,7 @@ func (suite *CacheIntegrationTestSuite) TestInsertOneWithInvalidQueue() { } func (suite *CacheIntegrationTestSuite) TestMakeAvailableAfterPull() { - message := &entities.Message{ + message := &message.Message{ ID: "id1", Description: "desc", Queue: "queue", @@ -467,7 +472,7 @@ func (suite *CacheIntegrationTestSuite) TestMakeAvailableAfterPull() { _, opErr := suite.cache.Insert(ctx, "queue", message) require.NoError(suite.T(), opErr) - messages, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + messages, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messages, 1) @@ -477,7 +482,7 @@ func (suite *CacheIntegrationTestSuite) TestMakeAvailableAfterPull() { require.NoError(suite.T(), availableErr) require.True(suite.T(), result) - messagesAgain, err := suite.cache.PullMessages(ctx, "queue", 1, 0) + messagesAgain, err := suite.cache.PullMessages(ctx, "queue", 1, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), messagesAgain, 1) @@ -485,7 +490,7 @@ func (suite *CacheIntegrationTestSuite) TestMakeAvailableAfterPull() { } func (suite *CacheIntegrationTestSuite) TestMakeAvailableWithoutQueue() { - message := &entities.Message{ + message := &message.Message{ ID: "id1", Description: "desc", } @@ -497,15 +502,17 @@ func (suite *CacheIntegrationTestSuite) TestMakeAvailableWithoutQueue() { } func (suite *CacheIntegrationTestSuite) TestLockMessageAck() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.PullMessages(ctx, "q1", 1, 0) + val, pullErr := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.NoError(suite.T(), pullErr) + require.Len(suite.T(), val, 1) - result, makeErr := suite.cache.LockMessage(ctx, &entities.Message{ + result, makeErr := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -519,26 +526,26 @@ func (suite *CacheIntegrationTestSuite) TestLockMessageAck() { require.NoError(suite.T(), err) require.False(suite.T(), result) - queues, err := suite.cache.ListQueues(ctx, "*", entities.LOCK_ACK_POOL) + queues, err := suite.cache.ListQueues(ctx, "*", pool.LOCK_ACK_POOL) require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"q1"}, queues) // Check if all other pools are empty - queues, err = suite.cache.ListQueues(ctx, "*", entities.PRIMARY_POOL) + queues, err = suite.cache.ListQueues(ctx, "*", pool.PRIMARY_POOL) require.NoError(suite.T(), err) require.Empty(suite.T(), queues) - queues, err = suite.cache.ListQueues(ctx, "*", entities.PROCESSING_POOL) + queues, err = suite.cache.ListQueues(ctx, "*", pool.PROCESSING_POOL) require.NoError(suite.T(), err) require.Empty(suite.T(), queues) - queues, err = suite.cache.ListQueues(ctx, "*", entities.LOCK_NACK_POOL) + queues, err = suite.cache.ListQueues(ctx, "*", pool.LOCK_NACK_POOL) require.NoError(suite.T(), err) require.Empty(suite.T(), queues) } func (suite *CacheIntegrationTestSuite) TestLockWithInvalidLockMs() { - result, makeErr := suite.cache.LockMessage(ctx, &entities.Message{ + result, makeErr := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -551,7 +558,7 @@ func (suite *CacheIntegrationTestSuite) TestLockWithInvalidLockMs() { func (suite *CacheIntegrationTestSuite) TestLockWithoutLockMsShouldError() { // Add one of them to lock ACK - lockAckResult, err := suite.cache.LockMessage(ctx, &entities.Message{ + lockAckResult, err := suite.cache.LockMessage(ctx, &message.Message{ ID: "1", Queue: "q1", }, LOCK_ACK) @@ -560,15 +567,15 @@ func (suite *CacheIntegrationTestSuite) TestLockWithoutLockMsShouldError() { } func (suite *CacheIntegrationTestSuite) TestLockMessageNack() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.PullMessages(ctx, "q1", 1, 0) + _, _ = suite.cache.PullMessages(ctx, "q1", 1, nil, nil) - result, makeErr := suite.cache.LockMessage(ctx, &entities.Message{ + result, makeErr := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -582,32 +589,32 @@ func (suite *CacheIntegrationTestSuite) TestLockMessageNack() { require.NoError(suite.T(), err) require.False(suite.T(), result) - queues, err := suite.cache.ListQueues(ctx, "*", entities.LOCK_NACK_POOL) + queues, err := suite.cache.ListQueues(ctx, "*", pool.LOCK_NACK_POOL) require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"q1"}, queues) // Check if all other pools are empty - queues, err = suite.cache.ListQueues(ctx, "*", entities.PRIMARY_POOL) + queues, err = suite.cache.ListQueues(ctx, "*", pool.PRIMARY_POOL) require.NoError(suite.T(), err) require.Empty(suite.T(), queues) - queues, err = suite.cache.ListQueues(ctx, "*", entities.PROCESSING_POOL) + queues, err = suite.cache.ListQueues(ctx, "*", pool.PROCESSING_POOL) require.NoError(suite.T(), err) require.Empty(suite.T(), queues) - queues, err = suite.cache.ListQueues(ctx, "*", entities.LOCK_ACK_POOL) + queues, err = suite.cache.ListQueues(ctx, "*", pool.LOCK_ACK_POOL) require.NoError(suite.T(), err) require.Empty(suite.T(), queues) } func (suite *CacheIntegrationTestSuite) TestLockMessageWithoutQueueShouldReturnError() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - result, makeErr := suite.cache.LockMessage(ctx, &entities.Message{ + result, makeErr := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", LockMs: 10, @@ -618,13 +625,13 @@ func (suite *CacheIntegrationTestSuite) TestLockMessageWithoutQueueShouldReturnE } func (suite *CacheIntegrationTestSuite) TestLockAckWithoutProcessingReturnFalse() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - result, makeErr := suite.cache.LockMessage(ctx, &entities.Message{ + result, makeErr := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -635,13 +642,13 @@ func (suite *CacheIntegrationTestSuite) TestLockAckWithoutProcessingReturnFalse( require.False(suite.T(), result) } func (suite *CacheIntegrationTestSuite) TestLockNackWithoutProcessingReturnFalse() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - result, makeErr := suite.cache.LockMessage(ctx, &entities.Message{ + result, makeErr := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -653,13 +660,13 @@ func (suite *CacheIntegrationTestSuite) TestLockNackWithoutProcessingReturnFalse } func (suite *CacheIntegrationTestSuite) TestMakeAvailableMessageWithoutProcessingReturnFalse() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - result, makeErr := suite.cache.MakeAvailable(ctx, &entities.Message{ + result, makeErr := suite.cache.MakeAvailable(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -670,19 +677,19 @@ func (suite *CacheIntegrationTestSuite) TestMakeAvailableMessageWithoutProcessin } func (suite *CacheIntegrationTestSuite) TestListQueuesPrimaryPool() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q2", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q2", &message.Message{ ID: "id2", Description: "desc", Queue: "q2", }) - result, makeErr := suite.cache.ListQueues(ctx, "*", entities.PRIMARY_POOL) + result, makeErr := suite.cache.ListQueues(ctx, "*", pool.PRIMARY_POOL) sort.Strings(result) @@ -691,43 +698,45 @@ func (suite *CacheIntegrationTestSuite) TestListQueuesPrimaryPool() { } func (suite *CacheIntegrationTestSuite) TestRemoveShouldRemoveFromAllPools() { - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id2", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id3", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id4", Queue: "q1", }) // Remove 3 elements from active - ids, err := suite.cache.PullMessages(ctx, "q1", 3, 0) + ids, err := suite.cache.PullMessages(ctx, "q1", 3, nil, nil) require.NoError(suite.T(), err) require.Len(suite.T(), ids, 3) // Add one of them to lock ACK - lockAckResult, err := suite.cache.LockMessage(ctx, &entities.Message{ + lockAckResult, err := suite.cache.LockMessage(ctx, &message.Message{ ID: ids[0], Queue: "q1", LockMs: 10000, + Score: score.Undefined, }, LOCK_ACK) require.NoError(suite.T(), err) require.True(suite.T(), lockAckResult) - // Add other to lock ACK - lockNackResult, err := suite.cache.LockMessage(ctx, &entities.Message{ + // Add other to lock NACK + lockNackResult, err := suite.cache.LockMessage(ctx, &message.Message{ ID: ids[1], Queue: "q1", LockMs: 10000, - }, LOCK_ACK) + Score: score.Undefined, + }, LOCK_NACK) require.NoError(suite.T(), err) require.True(suite.T(), lockNackResult) @@ -736,50 +745,50 @@ func (suite *CacheIntegrationTestSuite) TestRemoveShouldRemoveFromAllPools() { require.Equal(suite.T(), int64(4), removed) // Asserts all pools are empty - result, makeErr := suite.cache.ListQueues(ctx, "*", entities.PRIMARY_POOL) + result, makeErr := suite.cache.ListQueues(ctx, "*", pool.PRIMARY_POOL) require.NoError(suite.T(), makeErr) require.Empty(suite.T(), result) - result, makeErr = suite.cache.ListQueues(ctx, "*", entities.PROCESSING_POOL) + result, makeErr = suite.cache.ListQueues(ctx, "*", pool.PROCESSING_POOL) require.NoError(suite.T(), makeErr) require.Empty(suite.T(), result) - result, makeErr = suite.cache.ListQueues(ctx, "*", entities.LOCK_ACK_POOL) + result, makeErr = suite.cache.ListQueues(ctx, "*", pool.LOCK_ACK_POOL) require.NoError(suite.T(), makeErr) require.Empty(suite.T(), result) - result, makeErr = suite.cache.ListQueues(ctx, "*", entities.LOCK_NACK_POOL) + result, makeErr = suite.cache.ListQueues(ctx, "*", pool.LOCK_NACK_POOL) require.NoError(suite.T(), makeErr) require.Empty(suite.T(), result) } func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromAckPool() { // Insert data - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id2", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q2", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q2", &message.Message{ ID: "id3", Description: "desc", Queue: "q2", }) // Pull data - ids, _ := suite.cache.PullMessages(ctx, "q1", 2, 0) + ids, _ := suite.cache.PullMessages(ctx, "q1", 2, nil, nil) require.Len(suite.T(), ids, 2) - ids, _ = suite.cache.PullMessages(ctx, "q2", 1, 0) + ids, _ = suite.cache.PullMessages(ctx, "q2", 1, nil, nil) require.Len(suite.T(), ids, 1) // Lock data - result, err := suite.cache.LockMessage(ctx, &entities.Message{ + result, err := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -788,7 +797,7 @@ func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromAckPool() { require.NoError(suite.T(), err) require.True(suite.T(), result) - result, err = suite.cache.LockMessage(ctx, &entities.Message{ + result, err = suite.cache.LockMessage(ctx, &message.Message{ ID: "id2", Description: "desc", Queue: "q1", @@ -797,7 +806,7 @@ func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromAckPool() { require.NoError(suite.T(), err) require.True(suite.T(), result) - result, err = suite.cache.LockMessage(ctx, &entities.Message{ + result, err = suite.cache.LockMessage(ctx, &message.Message{ ID: "id3", Description: "desc", Queue: "q2", @@ -814,33 +823,253 @@ func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromAckPool() { require.Equal(suite.T(), []string{"id2"}, messages) } +func (suite *CacheIntegrationTestSuite) TestAckLockWithUndefinedScoreShouldUnlockDefaultScore() { + // Insert data + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + }) + + // Pull data + ids, _ := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.Len(suite.T(), ids, 1) + + // Lock data + result, err := suite.cache.LockMessage(ctx, &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + LockMs: 1, + Score: score.Undefined, + }, LOCK_ACK) + require.NoError(suite.T(), err) + require.True(suite.T(), result) + + <-time.After(1 * time.Millisecond) + + beforeUnlock := score.GetScoreByDefaultAlgorithm() + // Check for unlocked data + messages, err := suite.cache.UnlockMessages(ctx, "q1", LOCK_ACK) + require.NoError(suite.T(), err) + require.Equal(suite.T(), []string{"id1"}, messages) + afterUnlock := score.GetScoreByDefaultAlgorithm() + + // Check for score pulling with defined score + message, err := suite.cache.PullMessages(ctx, "q1", 1, &beforeUnlock, &afterUnlock) + require.NoError(suite.T(), err) + require.Len(suite.T(), message, 1) +} + +func (suite *CacheIntegrationTestSuite) TestAckLockWithScoreShouldUnlockPreservingScore() { + // Insert data + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + }) + + // Pull data + ids, _ := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.Len(suite.T(), ids, 1) + + // Lock data + result, err := suite.cache.LockMessage(ctx, &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + LockMs: 1, + Score: 1234, + }, LOCK_ACK) + require.NoError(suite.T(), err) + require.True(suite.T(), result) + + <-time.After(1 * time.Millisecond) + + // Check for unlocked data + messages, err := suite.cache.UnlockMessages(ctx, "q1", LOCK_ACK) + require.NoError(suite.T(), err) + require.Equal(suite.T(), []string{"id1"}, messages) + + // Check for score pulling with min and max score as the predefined score + score := float64(1234) + message, err := suite.cache.PullMessages(ctx, "q1", 1, &score, &score) + require.NoError(suite.T(), err) + require.Len(suite.T(), message, 1) +} + +func (suite *CacheIntegrationTestSuite) TestAckLockWithMinScoreShouldUnlockMinScore() { + // Insert data + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + }) + + // Pull data + ids, _ := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.Len(suite.T(), ids, 1) + + // Lock data + result, err := suite.cache.LockMessage(ctx, &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + LockMs: 1, + Score: score.Min, + }, LOCK_ACK) + require.NoError(suite.T(), err) + require.True(suite.T(), result) + + <-time.After(1 * time.Millisecond) + + // Check for unlocked data + messages, err := suite.cache.UnlockMessages(ctx, "q1", LOCK_ACK) + require.NoError(suite.T(), err) + require.Equal(suite.T(), []string{"id1"}, messages) + + // Check for score pulling with min and max score as the predefined score + message, err := suite.cache.PullMessages(ctx, "q1", 1, &score.Min, &score.Min) + require.NoError(suite.T(), err) + require.Len(suite.T(), message, 1) +} + +func (suite *CacheIntegrationTestSuite) TestNackLockWithMinScoreShouldUnlockMinScore() { + // Insert data + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + }) + + // Pull data + ids, _ := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.Len(suite.T(), ids, 1) + + // Lock data + result, err := suite.cache.LockMessage(ctx, &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + LockMs: 1, + Score: score.Min, + }, LOCK_NACK) + require.NoError(suite.T(), err) + require.True(suite.T(), result) + + <-time.After(1 * time.Millisecond) + + // Check for unlocked data + messages, err := suite.cache.UnlockMessages(ctx, "q1", LOCK_NACK) + require.NoError(suite.T(), err) + require.Equal(suite.T(), []string{"id1"}, messages) + + // Check for score pulling with min and max score as the predefined score + message, err := suite.cache.PullMessages(ctx, "q1", 1, &score.Min, &score.Min) + require.NoError(suite.T(), err) + require.Len(suite.T(), message, 1) +} + +func (suite *CacheIntegrationTestSuite) TestNackLockWithScoreShouldUnlockPreservingScore() { + // Insert data + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + }) + + // Pull data + ids, _ := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.Len(suite.T(), ids, 1) + + // Lock data + result, err := suite.cache.LockMessage(ctx, &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + LockMs: 1, + Score: 1234, + }, LOCK_NACK) + require.NoError(suite.T(), err) + require.True(suite.T(), result) + + <-time.After(1 * time.Millisecond) + + // Check for unlocked data + messages, err := suite.cache.UnlockMessages(ctx, "q1", LOCK_NACK) + require.NoError(suite.T(), err) + require.Equal(suite.T(), []string{"id1"}, messages) + + // Check for score pulling with min and max score as the predefined score + score := float64(1234) + message, err := suite.cache.PullMessages(ctx, "q1", 1, &score, &score) + require.NoError(suite.T(), err) + require.Len(suite.T(), message, 1) +} + +func (suite *CacheIntegrationTestSuite) TestNackLockWithUndefinedScoreShouldUnlockMinScore() { + // Insert data + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + }) + + // Pull data + ids, _ := suite.cache.PullMessages(ctx, "q1", 1, nil, nil) + require.Len(suite.T(), ids, 1) + + // Lock data + result, err := suite.cache.LockMessage(ctx, &message.Message{ + ID: "id1", + Description: "desc", + Queue: "q1", + LockMs: 1, + Score: score.Undefined, + }, LOCK_NACK) + require.NoError(suite.T(), err) + require.True(suite.T(), result) + + <-time.After(1 * time.Millisecond) + + // Check for unlocked data + messages, err := suite.cache.UnlockMessages(ctx, "q1", LOCK_NACK) + require.NoError(suite.T(), err) + require.Equal(suite.T(), []string{"id1"}, messages) + + // Check for score pulling with defined score + message, err := suite.cache.PullMessages(ctx, "q1", 1, &score.Min, &score.Min) + require.NoError(suite.T(), err) + require.Len(suite.T(), message, 1) +} + func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromNackPool() { // Insert data - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id2", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q2", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q2", &message.Message{ ID: "id3", Description: "desc", Queue: "q2", }) // Pull data - ids, _ := suite.cache.PullMessages(ctx, "q1", 2, 0) + ids, _ := suite.cache.PullMessages(ctx, "q1", 2, nil, nil) require.Len(suite.T(), ids, 2) - ids, _ = suite.cache.PullMessages(ctx, "q2", 1, 0) + ids, _ = suite.cache.PullMessages(ctx, "q2", 1, nil, nil) require.Len(suite.T(), ids, 1) // Lock data - result, err := suite.cache.LockMessage(ctx, &entities.Message{ + result, err := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -849,7 +1078,7 @@ func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromNackPool() { require.NoError(suite.T(), err) require.True(suite.T(), result) - result, err = suite.cache.LockMessage(ctx, &entities.Message{ + result, err = suite.cache.LockMessage(ctx, &message.Message{ ID: "id2", Description: "desc", Queue: "q1", @@ -858,7 +1087,7 @@ func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromNackPool() { require.NoError(suite.T(), err) require.True(suite.T(), result) - result, err = suite.cache.LockMessage(ctx, &entities.Message{ + result, err = suite.cache.LockMessage(ctx, &message.Message{ ID: "id3", Description: "desc", Queue: "q2", @@ -876,31 +1105,31 @@ func (suite *CacheIntegrationTestSuite) TestUnlockMessagesFromNackPool() { func (suite *CacheIntegrationTestSuite) TestUnlockTiming() { // Insert data - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id1", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q1", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q1", &message.Message{ ID: "id2", Description: "desc", Queue: "q1", }) - _, _ = suite.cache.Insert(ctx, "q2", &entities.Message{ + _, _ = suite.cache.Insert(ctx, "q2", &message.Message{ ID: "id3", Description: "desc", Queue: "q2", }) // Pull data - ids, _ := suite.cache.PullMessages(ctx, "q1", 2, 0) + ids, _ := suite.cache.PullMessages(ctx, "q1", 2, nil, nil) require.Len(suite.T(), ids, 2) - ids, _ = suite.cache.PullMessages(ctx, "q2", 1, 0) + ids, _ = suite.cache.PullMessages(ctx, "q2", 1, nil, nil) require.Len(suite.T(), ids, 1) // Lock data - result, err := suite.cache.LockMessage(ctx, &entities.Message{ + result, err := suite.cache.LockMessage(ctx, &message.Message{ ID: "id1", Description: "desc", Queue: "q1", @@ -909,7 +1138,7 @@ func (suite *CacheIntegrationTestSuite) TestUnlockTiming() { require.NoError(suite.T(), err) require.True(suite.T(), result) - result, err = suite.cache.LockMessage(ctx, &entities.Message{ + result, err = suite.cache.LockMessage(ctx, &message.Message{ ID: "id2", Description: "desc", Queue: "q1", @@ -918,7 +1147,7 @@ func (suite *CacheIntegrationTestSuite) TestUnlockTiming() { require.NoError(suite.T(), err) require.True(suite.T(), result) - result, err = suite.cache.LockMessage(ctx, &entities.Message{ + result, err = suite.cache.LockMessage(ctx, &message.Message{ ID: "id3", Description: "desc", Queue: "q2", @@ -947,3 +1176,64 @@ func (suite *CacheIntegrationTestSuite) TestUnlockTiming() { require.NoError(suite.T(), err) require.Equal(suite.T(), []string{"id3"}, messages) } + +func (suite *CacheIntegrationTestSuite) TestPullMessagesScoreFiltering() { + tests := []struct { + name string + minScore *float64 + maxScore *float64 + wantIDs []string + }{ + { + name: "no minScore or maxScore", + minScore: nil, + maxScore: nil, + wantIDs: []string{"0", "1", "2", "3", "4", "5", "6", "7", "8", "9"}, + }, + { + name: "minScore only", + minScore: float64Ptr(5), + maxScore: nil, + wantIDs: []string{"5", "6", "7", "8", "9"}, + }, + { + name: "maxScore only", + minScore: nil, + maxScore: float64Ptr(5), + wantIDs: []string{"0", "1", "2", "3", "4", "5"}, + }, + { + name: "minScore and maxScore", + minScore: float64Ptr(3), + maxScore: float64Ptr(7), + wantIDs: []string{"3", "4", "5", "6", "7"}, + }, + } + + for _, tt := range tests { + suite.T().Run(tt.name, func(t *testing.T) { + for i := 0; i < 10; i++ { + _, _ = suite.cache.Insert(context.Background(), "test_queue", &message.Message{ + ID: fmt.Sprintf("%d", i), + Queue: "test_queue", + Score: float64(i), + }) + } + + defer suite.cache.Flush(ctx) + + gotIDs, err := suite.cache.PullMessages(context.Background(), "test_queue", 10, tt.minScore, tt.maxScore) + require.NoError(t, err) + + require.Equal(t, len(tt.wantIDs), len(gotIDs)) + + for _, wantID := range tt.wantIDs { + require.Contains(t, gotIDs, wantID) + } + }) + } +} + +func float64Ptr(f float64) *float64 { + return &f +} diff --git a/internal/queue/cache/memory_cache.go b/internal/queue/cache/memory_cache.go index f5ae4fa..5f05ef7 100644 --- a/internal/queue/cache/memory_cache.go +++ b/internal/queue/cache/memory_cache.go @@ -7,11 +7,15 @@ import ( "sync" "time" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/dtime" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/pool" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/utils" ) // MemoryStorage is an implementation of the Storage Interface using memory. +// It was not made to be performant, but to be used in integration tests and in development. // Currently only insert and pull functions are implemented. type MemoryCache struct { queues map[string]*list.List @@ -25,6 +29,8 @@ type MemoryCache struct { type MemoryMessageEntry struct { score float64 id string + + lockUntil float64 } var _ Cache = &MemoryCache{} @@ -111,7 +117,7 @@ func (cache *MemoryCache) removeFromSlice(_ context.Context, data *list.List, id return count, data, nil } -func (cache *MemoryCache) MakeAvailable(_ context.Context, message *entities.Message) (bool, error) { +func (cache *MemoryCache) MakeAvailable(_ context.Context, message *message.Message) (bool, error) { if message.Queue == "" { return false, errors.New("invalid message queue") } @@ -131,7 +137,7 @@ func (cache *MemoryCache) MakeAvailable(_ context.Context, message *entities.Mes return result, nil } -func (cache *MemoryCache) ListQueues(ctx context.Context, pattern string, poolType entities.PoolType) (queues []string, err error) { +func (cache *MemoryCache) ListQueues(ctx context.Context, pattern string, poolType pool.PoolType) (queues []string, err error) { result := make([]string, 0) cache.lock.Lock() @@ -139,13 +145,13 @@ func (cache *MemoryCache) ListQueues(ctx context.Context, pattern string, poolTy var queueMap *map[string]*list.List switch poolType { - case entities.PRIMARY_POOL: + case pool.PRIMARY_POOL: queueMap = &cache.queues - case entities.PROCESSING_POOL: + case pool.PROCESSING_POOL: queueMap = &cache.processingQueues - case entities.LOCK_ACK_POOL: + case pool.LOCK_ACK_POOL: queueMap = &cache.lockAckQueues - case entities.LOCK_NACK_POOL: + case pool.LOCK_NACK_POOL: queueMap = &cache.lockNackQueues } @@ -162,7 +168,7 @@ func (cache *MemoryCache) ListQueues(ctx context.Context, pattern string, poolTy return result, nil } -func (cache *MemoryCache) LockMessage(_ context.Context, message *entities.Message, lockType LockType) (bool, error) { +func (cache *MemoryCache) LockMessage(_ context.Context, message *message.Message, lockType LockType) (bool, error) { cache.lock.Lock() defer cache.lock.Unlock() @@ -184,7 +190,7 @@ func (cache *MemoryCache) LockMessage(_ context.Context, message *entities.Messa cache.processingQueues[message.Queue], result = removeEntry(entry, cache.processingQueues[message.Queue]) if result { - entry.score = float64(utils.NowMs() + message.LockMs) + entry.lockUntil = float64(dtime.NowMs() + message.LockMs) switch lockType { case LOCK_ACK: @@ -201,18 +207,18 @@ func (cache *MemoryCache) UnlockMessages(ctx context.Context, queue string, lock cache.lock.Lock() defer cache.lock.Unlock() - var lockScore float64 + var defaultScore float64 var data map[string]*list.List if lockType == LOCK_ACK { data = cache.lockAckQueues - lockScore = float64(utils.NowMs()) + defaultScore = score.GetScoreByDefaultAlgorithm() } else { data = cache.lockNackQueues - lockScore = 0 + defaultScore = score.Min } - nowMs := utils.NowMs() + nowMs := dtime.NowMs() unlockedMessages := make([]string, 0) for queue := range data { @@ -225,10 +231,12 @@ func (cache *MemoryCache) UnlockMessages(ctx context.Context, queue string, lock for e := list.Front(); e != nil; e = e.Next() { value := e.Value.(*MemoryMessageEntry) - if nowMs >= int64(value.score) { + if nowMs >= int64(value.lockUntil) { list.Remove(e) - value.score = lockScore + if score.IsUndefined(value.score) { + value.score = defaultScore + } cache.queues[queue], _ = insertEntry(value, cache.queues[queue]) @@ -240,7 +248,7 @@ func (cache *MemoryCache) UnlockMessages(ctx context.Context, queue string, lock return unlockedMessages, nil } -func (cache *MemoryCache) PullMessages(ctx context.Context, queue string, n int64, scoreFilter int64) (ids []string, err error) { +func (cache *MemoryCache) PullMessages(ctx context.Context, queue string, n int64, minScore *float64, maxScore *float64) (ids []string, err error) { cache.lock.Lock() defer cache.lock.Unlock() @@ -252,14 +260,7 @@ func (cache *MemoryCache) PullMessages(ctx context.Context, queue string, n int6 result := make([]string, 0, utils.MinInt64(n, total)) - // TODO implement score filter - //now := time.Now().Unix() - // - //score := int64(0) - //if scoreFilter > 0 { - // score = now - scoreFilter - //} - + filteredOut := make([]*MemoryMessageEntry, 0) for i := int64(0); i < n && i < total && cache.queues[queue].Len() != 0; i++ { element := cache.queues[queue].Front() @@ -267,9 +268,15 @@ func (cache *MemoryCache) PullMessages(ctx context.Context, queue string, n int6 cache.queues[queue], _ = removeEntry(entry, cache.queues[queue]) + if minScore != nil && entry.score < *minScore || maxScore != nil && entry.score > *maxScore { + filteredOut = append(filteredOut, entry) + + continue + } + processingEntry := &MemoryMessageEntry{ id: entry.id, - score: float64(time.Now().Unix()), + score: float64(dtime.Now().Unix()), } cache.processingQueues[queue], _ = insertEntry(processingEntry, cache.processingQueues[queue]) @@ -277,6 +284,10 @@ func (cache *MemoryCache) PullMessages(ctx context.Context, queue string, n int6 result = append(result, entry.id) } + for _, entry := range filteredOut { + cache.queues[queue], _ = insertEntry(entry, cache.queues[queue]) + } + return result, nil } @@ -288,7 +299,7 @@ func (cache *MemoryCache) TimeoutMessages(_ context.Context, queue string, timeo return nil, nil } - timeoutTime := time.Now().Add(-1 * timeout).Unix() + timeoutTime := dtime.Now().Add(-1 * timeout).Unix() count := int64(0) @@ -297,7 +308,7 @@ func (cache *MemoryCache) TimeoutMessages(_ context.Context, queue string, timeo value := e.Value.(*MemoryMessageEntry) if value.score <= float64(timeoutTime) { - value.score = entities.MaxScore() + value.score = score.Min result = append(result, value.id) @@ -310,7 +321,7 @@ func (cache *MemoryCache) TimeoutMessages(_ context.Context, queue string, timeo return result, nil } -func (cache *MemoryCache) Insert(ctx context.Context, queue string, messages ...*entities.Message) ([]string, error) { +func (cache *MemoryCache) Insert(ctx context.Context, queue string, messages ...*message.Message) ([]string, error) { cache.lock.Lock() defer cache.lock.Unlock() @@ -321,7 +332,7 @@ func (cache *MemoryCache) Insert(ctx context.Context, queue string, messages ... } insertIds := make([]string, 0) - insertions := make([]*entities.Message, 0) + insertions := make([]*message.Message, 0) for _, message := range messages { isPresent := cache.isPresentOnAnyPool(ctx, queue, message.ID) @@ -340,7 +351,7 @@ func (cache *MemoryCache) Insert(ctx context.Context, queue string, messages ... return insertIds, nil } -func createEntry(message *entities.Message) *MemoryMessageEntry { +func createEntry(message *message.Message) *MemoryMessageEntry { return &MemoryMessageEntry{ id: message.ID, score: message.Score, diff --git a/internal/queue/cache/redis_cache.go b/internal/queue/cache/redis_cache.go index 8cac2aa..41f3917 100644 --- a/internal/queue/cache/redis_cache.go +++ b/internal/queue/cache/redis_cache.go @@ -12,10 +12,12 @@ import ( "github.com/go-redis/redis/v8" "github.com/meirf/gopart" "github.com/takenet/deckard/internal/config" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/metrics" - "github.com/takenet/deckard/internal/queue/entities" - "github.com/takenet/deckard/internal/queue/utils" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/pool" + "github.com/takenet/deckard/internal/queue/score" "go.opentelemetry.io/otel/attribute" ) @@ -30,14 +32,18 @@ const ( pullElement = "pull" removeElement = "remove" moveElement = "move" + lockElement = "lock" addElements = "add" containsElement = "contains" moveFilteredElements = "move_primary_pool" + unlockElements = "unlock_elements" POOL_PREFIX = "deckard:queue:" PROCESSING_POOL_SUFFIX = ":tmp" LOCK_ACK_SUFFIX = ":" + string(LOCK_ACK) LOCK_NACK_SUFFIX = ":" + string(LOCK_NACK) + LOCK_ACK_SCORE_SUFFIX = ":" + string(LOCK_ACK) + ":score" + LOCK_NACK_SCORE_SUFFIX = ":" + string(LOCK_NACK) + ":score" ) var PROCESSING_POOL_REGEX = regexp.MustCompile("(.+)" + PROCESSING_POOL_SUFFIX + "$") @@ -67,7 +73,7 @@ func NewRedisCache(ctx context.Context) (*RedisCache, error) { logger.S(ctx).Debug("Connecting to ", options.Addr, " Redis instance") - start := time.Now() + start := dtime.Now() redisClient, err := waitForClient(options) if err != nil { @@ -82,7 +88,9 @@ func NewRedisCache(ctx context.Context) (*RedisCache, error) { removeElement: redis.NewScript(removeElementScript), pullElement: redis.NewScript(pullElementsScript), moveElement: redis.NewScript(moveElementScript), + lockElement: redis.NewScript(lockElementScript), moveFilteredElements: redis.NewScript(moveFilteredElementsScript), + unlockElements: redis.NewScript(unlockElementsScript), addElements: redis.NewScript(addElementsScript), containsElement: redis.NewScript(containsElementScript), }, @@ -114,9 +122,9 @@ func waitForClient(options *redis.Options) (*redis.Client, error) { } func (cache *RedisCache) Flush(ctx context.Context) { - now := time.Now() + now := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "flush")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "flush")) }() cache.Client.FlushDB(ctx) @@ -125,9 +133,9 @@ func (cache *RedisCache) Flush(ctx context.Context) { func (cache *RedisCache) Remove(ctx context.Context, queue string, ids ...string) (removed int64, err error) { total := int64(0) - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "remove")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "remove")) }() idList := make([]interface{}, len(ids)) @@ -139,7 +147,14 @@ func (cache *RedisCache) Remove(ctx context.Context, queue string, ids ...string cmd := cache.scripts[removeElement].Run( context.Background(), cache.Client, - []string{cache.activePool(queue), cache.processingPool(queue), cache.lockPool(queue, LOCK_ACK), cache.lockPool(queue, LOCK_NACK)}, + []string{ + cache.activePool(queue), + cache.processingPool(queue), + cache.lockPool(queue, LOCK_ACK), + cache.lockPool(queue, LOCK_NACK), + cache.lockPoolScore(queue, LOCK_ACK), + cache.lockPoolScore(queue, LOCK_NACK), + }, idList[index.Low:index.High]..., ) @@ -157,25 +172,27 @@ func (cache *RedisCache) Remove(ctx context.Context, queue string, ids ...string return total, nil } -func (cache *RedisCache) ListQueues(ctx context.Context, pattern string, poolType entities.PoolType) (queues []string, err error) { - execStart := time.Now() +// TODO: This should be optimized. +// TODO: We should list queues using storage with iterator, and not redis. Rethink this usage +func (cache *RedisCache) ListQueues(ctx context.Context, pattern string, poolType pool.PoolType) (queues []string, err error) { + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "list_queue")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "list_queue")) }() var searchPattern string switch poolType { - case entities.PRIMARY_POOL: + case pool.PRIMARY_POOL: searchPattern = cache.activePool(pattern) - case entities.PROCESSING_POOL: + case pool.PROCESSING_POOL: searchPattern = cache.processingPool(pattern) - case entities.LOCK_ACK_POOL: + case pool.LOCK_ACK_POOL: searchPattern = cache.lockPool(pattern, LOCK_ACK) - case entities.LOCK_NACK_POOL: + case pool.LOCK_NACK_POOL: searchPattern = cache.lockPool(pattern, LOCK_NACK) } @@ -189,13 +206,13 @@ func (cache *RedisCache) ListQueues(ctx context.Context, pattern string, poolTyp var regex *regexp.Regexp switch poolType { - case entities.PROCESSING_POOL: + case pool.PROCESSING_POOL: regex = PROCESSING_POOL_REGEX - case entities.LOCK_ACK_POOL: + case pool.LOCK_ACK_POOL: regex = LOCK_ACK_POOL_REGEX - case entities.LOCK_NACK_POOL: + case pool.LOCK_NACK_POOL: regex = LOCK_NACK_POOL_REGEX } @@ -209,7 +226,7 @@ func (cache *RedisCache) ListQueues(ctx context.Context, pattern string, poolTyp data[i] = queue } - if entities.PRIMARY_POOL == poolType { + if pool.PRIMARY_POOL == poolType { return filterQueueSuffix(data), nil } @@ -232,20 +249,28 @@ func filterQueueSuffix(data []string) []string { continue } + if strings.HasSuffix(data[i], LOCK_NACK_SCORE_SUFFIX) { + continue + } + + if strings.HasSuffix(data[i], LOCK_ACK_SCORE_SUFFIX) { + continue + } + result = append(result, data[i]) } return result } -func (cache *RedisCache) MakeAvailable(ctx context.Context, message *entities.Message) (bool, error) { +func (cache *RedisCache) MakeAvailable(ctx context.Context, message *message.Message) (bool, error) { if message.Queue == "" { return false, errors.New("invalid message queue") } - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "make_available")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "make_available")) }() cmd := cache.scripts[moveElement].Run( @@ -263,31 +288,29 @@ func (cache *RedisCache) MakeAvailable(ctx context.Context, message *entities.Me return cmd.Val().(int64) == 1, nil } -func (cache *RedisCache) LockMessage(ctx context.Context, message *entities.Message, lockType LockType) (bool, error) { +func (cache *RedisCache) LockMessage(ctx context.Context, message *message.Message, lockType LockType) (bool, error) { if message.Queue == "" { return false, errors.New("invalid queue to lock") } if message.LockMs <= 0 { - return false, errors.New("invalid lock seconds") + return false, errors.New("invalid lock time") } - now := time.Now() - nowMs := utils.TimeToMs(&now) + lockScore := dtime.NowMs() + message.LockMs - score := nowMs + message.LockMs - - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "lock")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "lock")) }() - cmd := cache.scripts[moveElement].Run( + cmd := cache.scripts[lockElement].Run( context.Background(), cache.Client, - []string{cache.lockPool(message.Queue, lockType), cache.processingPool(message.Queue)}, - score, + []string{cache.processingPool(message.Queue), cache.lockPool(message.Queue, lockType), cache.lockPoolScore(message.Queue, lockType)}, + lockScore, message.ID, + message.Score, ) if cmd.Err() != nil { @@ -298,50 +321,56 @@ func (cache *RedisCache) LockMessage(ctx context.Context, message *entities.Mess } func (cache *RedisCache) UnlockMessages(ctx context.Context, queue string, lockType LockType) ([]string, error) { - now := time.Now() - nowTime := utils.TimeToMs(&now) + defaultScore := score.Min - newScore := entities.MaxScore() if lockType == LOCK_ACK { - newScore = entities.GetScore(&now, 0) + defaultScore = score.GetScoreByDefaultAlgorithm() } - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "unlock_messages")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "unlock_messages")) }() - cmd := cache.scripts[moveFilteredElements].Run( + cmd := cache.scripts[unlockElements].Run( context.Background(), cache.Client, - []string{cache.activePool(queue), cache.lockPool(queue, lockType)}, - nowTime, newScore, 1000, + []string{cache.lockPool(queue, lockType), cache.activePool(queue), cache.lockPoolScore(queue, lockType)}, + 1000, dtime.NowMs(), defaultScore, ) return parseResult(cmd) } -func (cache *RedisCache) PullMessages(ctx context.Context, queue string, n int64, scoreFilter int64) (ids []string, err error) { +func (cache *RedisCache) PullMessages(ctx context.Context, queue string, n int64, minScore *float64, maxScore *float64) (ids []string, err error) { var cmd *redis.Cmd - now := time.Now() - nowMs := utils.TimeToMs(&now) + now := dtime.Now() + defer func() { + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "pull")) + }() - score := int64(entities.MaxScore()) - if scoreFilter > 0 { - score = nowMs - scoreFilter + args := []any{ + n, dtime.TimeToMs(&now), } - execStart := time.Now() - defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "pull")) - }() + if minScore != nil { + args = append(args, *minScore) + } else { + args = append(args, "-inf") + } + + if maxScore != nil { + args = append(args, *maxScore) + } else { + args = append(args, "+inf") + } cmd = cache.scripts[pullElement].Run( context.Background(), cache.Client, []string{cache.activePool(queue), cache.processingPool(queue)}, - n, nowMs, score, + args..., ) return parseResult(cmd) @@ -382,25 +411,25 @@ func resultToIds(result interface{}) []string { } func (cache *RedisCache) TimeoutMessages(ctx context.Context, queue string, timeout time.Duration) ([]string, error) { - nowMinusTimeout := time.Now().Add(-1 * timeout) - timeoutTime := utils.TimeToMs(&nowMinusTimeout) + nowMinusTimeout := dtime.Now().Add(-1 * timeout) + timeoutTime := dtime.TimeToMs(&nowMinusTimeout) - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "timeout")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "timeout")) }() cmd := cache.scripts[moveFilteredElements].Run( context.Background(), cache.Client, []string{cache.activePool(queue), cache.processingPool(queue)}, - timeoutTime, entities.MaxScore(), 1000, + timeoutTime, score.Min, 1000, ) return parseResult(cmd) } -func (cache *RedisCache) Insert(ctx context.Context, queue string, messages ...*entities.Message) ([]string, error) { +func (cache *RedisCache) Insert(ctx context.Context, queue string, messages ...*message.Message) ([]string, error) { for i := range messages { if messages[i].Queue != queue { return nil, errors.New("invalid queue to insert data") @@ -423,9 +452,9 @@ func (cache *RedisCache) Insert(ctx context.Context, queue string, messages ...* var cmd *redis.Cmd func() { - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "insert")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "insert")) }() cmd = cache.scripts[addElements].Run( @@ -449,9 +478,9 @@ func (cache *RedisCache) Insert(ctx context.Context, queue string, messages ...* } func (cache *RedisCache) IsProcessing(ctx context.Context, queue string, id string) (bool, error) { - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "is_processing")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "is_processing")) }() return cache.containsElement(ctx, cache.processingPool(queue), id) @@ -472,9 +501,9 @@ func (cache *RedisCache) containsElement(ctx context.Context, queuePool string, } func (cache *RedisCache) Get(ctx context.Context, key string) (string, error) { - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "get")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "get")) }() cmd := cache.Client.Get(context.Background(), fmt.Sprint("deckard:", key)) @@ -492,9 +521,9 @@ func (cache *RedisCache) Get(ctx context.Context, key string) (string, error) { } func (cache *RedisCache) Set(ctx context.Context, key string, value string) error { - execStart := time.Now() + execStart := dtime.Now() defer func() { - metrics.CacheLatency.Record(ctx, utils.ElapsedTime(execStart), attribute.String("op", "set")) + metrics.CacheLatency.Record(ctx, dtime.ElapsedTime(execStart), attribute.String("op", "set")) }() cmd := cache.Client.Set(context.Background(), fmt.Sprint("deckard:", key), value, 0) @@ -524,3 +553,10 @@ func (cache *RedisCache) processingPool(queue string) string { func (cache *RedisCache) lockPool(queue string, lockType LockType) string { return POOL_PREFIX + queue + ":" + string(lockType) } + +// lockPool returns the name of the lock pool scores of messages. +// +// used to unlock messages with a predefined score. +func (cache *RedisCache) lockPoolScore(queue string, lockType LockType) string { + return POOL_PREFIX + queue + ":" + string(lockType) + ":score" +} diff --git a/internal/queue/cache/redis_cache_scripts.go b/internal/queue/cache/redis_cache_scripts.go index 51ac18b..bd87634 100644 --- a/internal/queue/cache/redis_cache_scripts.go +++ b/internal/queue/cache/redis_cache_scripts.go @@ -34,6 +34,36 @@ end return 0 ` +// The lock script is used to move an element from a sorted set to another setting a specific score. +// It also add the score to the lock pool score to be able to unlock the element with a proper score. +// +// KEYS[1] -> active sorted set +// +// KEYS[2] -> lock sorted set +// +// KEYS[3] -> lock score sorted set +// +// ARGV[1] -> the score +// +// ARGV[2] -> the element to be moved +// +// ARGV[3] -> the result score to be appended to the message id with a comma +const lockElementScript = ` +local removed = redis.call('ZREM', KEYS[1], ARGV[2]) + +if removed > 0 then + local result = redis.call('ZADD', KEYS[2], ARGV[1], ARGV[2]) + + if tostring(ARGV[3]) == "-1" then + return result + end + + return redis.call('ZADD', KEYS[3], ARGV[3], ARGV[2]) +end + +return 0 +` + // Adds all elements into a sorted set if they are not present in any other sorted set // // KEYS[1] -> destination sorted set @@ -107,7 +137,7 @@ return 0 const moveFilteredElementsScript = ` local elements = redis.call('ZREVRANGEBYSCORE', KEYS[2], ARGV[1], '0', 'LIMIT', '0', tostring(ARGV[3])) if next(elements) == nil then - return '' + return '' end for i, key in ipairs(elements) do redis.call('ZADD', KEYS[1], ARGV[2], key) @@ -117,6 +147,46 @@ redis.call('ZREM', KEYS[2], unpack(elements)) return elements ` +// Unlocks elements from the lock sorted set filtered by a score (usually the current timestamp) into the destination sorted set +// +// KEYS[1] -> lock sorted set +// +// KEYS[2] -> destination sorted set +// +// KEYS[3] -> lock score sorted set +// +// ARGV[1] -> number of elements to move +// +// ARGV[2] -> current timestamp to filter elements to unlock +// +// ARGV[3] -> the default score if no score is present in the score lock sorted set +const unlockElementsScript = ` +local elements = redis.call('ZREVRANGEBYSCORE', KEYS[1], ARGV[2], '0', 'LIMIT', '0', tostring(ARGV[1])) +if next(elements) == nil then + return '' +end + +local scores = redis.call('ZMSCORE', KEYS[3], unpack(elements)) + +local bulkData = {} +for i, key in ipairs(elements) do + local lockScore = scores[i] + + if lockScore == nil or not lockScore then + lockScore = ARGV[3] + end + + table.insert(bulkData, lockScore) + table.insert(bulkData, key) +end + +redis.call('ZADD', KEYS[2], unpack(bulkData)) +redis.call('ZREM', KEYS[1], unpack(elements)) +redis.call('ZREM', KEYS[3], unpack(elements)) + +return elements +` + // Moves n elements from a sorted set into another sorted set and return moved elements. // // KEYS[1] -> sorted set to move from @@ -127,16 +197,18 @@ return elements // // ARGV[2] -> value to use as new score in the destination sorted set // -// ARGV[3] -> value to filter elements by score +// ARGV[3] -> min score to filter +// +// ARGV[4] -> max score to filter const pullElementsScript = ` local elements -if ARGV[3] == '0' then +if ARGV[3] == '-inf' and ARGV[4] == '+inf' then elements = redis.call('ZRANGE', KEYS[1], '0', tostring(tonumber(ARGV[1]) - 1)) else - elements = redis.call('ZRANGEBYSCORE', KEYS[1], '0', ARGV[3], 'LIMIT', '0', tostring(ARGV[1])) + elements = redis.call('ZRANGEBYSCORE', KEYS[1], ARGV[3], ARGV[4], 'LIMIT', '0', tostring(ARGV[1])) end if next(elements) == nil then - return '' + return '' end for i, key in ipairs(elements) do redis.call('ZADD', KEYS[2], ARGV[2], key) diff --git a/internal/queue/cache/redis_cache_test.go b/internal/queue/cache/redis_cache_test.go index d08751e..fc556ef 100644 --- a/internal/queue/cache/redis_cache_test.go +++ b/internal/queue/cache/redis_cache_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/takenet/deckard/internal/config" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/message" ) func TestRedisCacheIntegration(t *testing.T) { @@ -54,16 +54,16 @@ func TestInsertShouldInsertWithCorrectScoreIntegration(t *testing.T) { cache.Flush(ctx) - data := make([]*entities.Message, 2) + data := make([]*message.Message, 2) - data[0] = &entities.Message{ + data[0] = &message.Message{ ID: "123", Description: "desc", Queue: "queue", Score: 654231, } - data[1] = &entities.Message{ + data[1] = &message.Message{ ID: "234", Description: "desc", Queue: "queue", @@ -112,8 +112,8 @@ func TestConnectWithRedisUsingConnectionURI(t *testing.T) { cache.Flush(ctx) - data := make([]*entities.Message, 1) - data[0] = &entities.Message{ + data := make([]*message.Message, 1) + data[0] = &message.Message{ ID: "234", Description: "desc", Queue: "queue", diff --git a/internal/queue/entities/queue_configuration.go b/internal/queue/configuration/queue_configuration.go similarity index 92% rename from internal/queue/entities/queue_configuration.go rename to internal/queue/configuration/queue_configuration.go index 5a0487d..9eeff2c 100644 --- a/internal/queue/entities/queue_configuration.go +++ b/internal/queue/configuration/queue_configuration.go @@ -1,4 +1,4 @@ -package entities +package configuration type QueueConfiguration struct { // Queue is the identifier of this message. diff --git a/internal/queue/entities/message_test.go b/internal/queue/entities/message_test.go deleted file mode 100644 index 8bef693..0000000 --- a/internal/queue/entities/message_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package entities - -import ( - "testing" - - "github.com/stretchr/testify/require" - "github.com/takenet/deckard/internal/queue/utils" -) - -func TestMaxScore(t *testing.T) { - t.Parallel() - - require.Equal(t, float64(0), MaxScore()) -} - -func TestUpdateScoreWithoutUsageShouldResultMaxScore(t *testing.T) { - t.Parallel() - - message := Message{} - - message.UpdateScore() - - require.Equal(t, MaxScore(), message.Score) -} - -func TestUpdateScoreWithOtherScoreWithoutUsageShouldResultMaxScore(t *testing.T) { - t.Parallel() - - message := Message{ - Score: 10, - } - - message.UpdateScore() - - require.Equal(t, MaxScore(), message.Score) -} - -func TestUpdateScoreWithLastUsageWithoutSubtractShouldResultLastUsage(t *testing.T) { - t.Parallel() - - fixedTime := utils.MsToTime(1610576986705) - message := Message{ - LastUsage: &fixedTime, - } - - message.UpdateScore() - - require.Equal(t, float64(1610576986705), message.Score) -} - -func TestUpdateScoreWithLastUsageWithSubtractShouldResultLastUsageMinusSubtract(t *testing.T) { - t.Parallel() - - fixedTime := utils.MsToTime(1610576986705) - message := Message{ - LastUsage: &fixedTime, - LastScoreSubtract: 1000, - } - - message.UpdateScore() - - require.Equal(t, float64(1610576986705-1000), message.Score) -} diff --git a/internal/queue/entities/message.go b/internal/queue/message/message.go similarity index 70% rename from internal/queue/entities/message.go rename to internal/queue/message/message.go index f1d3e29..98c7ed8 100644 --- a/internal/queue/entities/message.go +++ b/internal/queue/message/message.go @@ -1,24 +1,13 @@ -package entities +package message import ( "strings" "time" - "github.com/takenet/deckard/internal/queue/utils" + "github.com/takenet/deckard/internal/queue/pool" "google.golang.org/protobuf/types/known/anypb" ) -type PoolType string - -const ( - PRIMARY_POOL PoolType = "primary_pool" - PROCESSING_POOL PoolType = "processing_pool" - LOCK_ACK_POOL PoolType = "lock_ack_pool" - LOCK_NACK_POOL PoolType = "lock_nack_pool" -) - -const QUEUE_SEPARATOR = "::" - // Message contains all data related to a single message. Including telemetry data. type Message struct { ID string `json:"id" bson:"id"` @@ -56,12 +45,14 @@ type Message struct { // Internal fields are managed by the Deckard itself and should not manually inserted on storage. // The internal storage id for this message + // For MongoDB storage it is the _id field as primitive.ObjectID + // For Memory storage it is an int64 InternalId interface{} `json:"_id,omitempty" bson:"_id,omitempty"` - // Score defines the priority of this message and is calculate with UpdateScore method. + // Score defines the score priority of this message. Score float64 `json:"score" bson:"score"` - // Represents the result from the last time this message has been processed. + // Represents the result from the last time this message has been processed successfully (acknowledged). LastUsage *time.Time `json:"last_usage,omitempty" bson:"last_usage,omitempty"` Breakpoint string `json:"breakpoint" bson:"breakpoint"` LastScoreSubtract float64 `json:"last_score_subtract" bson:"last_score_subtract"` @@ -77,42 +68,16 @@ type Message struct { QueueSuffix string `json:"queue_suffix" bson:"queue_suffix"` } -// MaxScore is the biggest possible score a message could have. -// Since our base implementation is the Redis, its sorted set are ascendant and 0 is the biggest score. -func MaxScore() float64 { - return 0 -} - -func (q *Message) UpdateScore() { - q.Score = GetScore(q.LastUsage, q.LastScoreSubtract) -} - -// GetScore returns the seconds since the unix epoch of the last usage minus the last score subtract. -// The idea here is to give priority to messages that have not been used for a long time and also allow users to modify personalize the priority algorithm. -func GetScore(usageTime *time.Time, scoreSubtract float64) float64 { - if usageTime == nil { - return MaxScore() - } - - usageMillis := float64(utils.TimeToMs(usageTime)) - - if scoreSubtract == 0 { - return usageMillis - } - - return usageMillis - scoreSubtract -} - func (q *Message) GetQueueParts() (prefix string, suffix string) { return GetQueueParts(q.Queue) } func GetQueueParts(queue string) (prefix string, suffix string) { - if !strings.Contains(queue, QUEUE_SEPARATOR) { + if !strings.Contains(queue, pool.QUEUE_SEPARATOR) { return queue, "" } - data := strings.SplitN(queue, QUEUE_SEPARATOR, 2) + data := strings.SplitN(queue, pool.QUEUE_SEPARATOR, 2) return data[0], data[1] } diff --git a/internal/queue/message/message_test.go b/internal/queue/message/message_test.go new file mode 100644 index 0000000..e4e344f --- /dev/null +++ b/internal/queue/message/message_test.go @@ -0,0 +1,76 @@ +package message_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "github.com/takenet/deckard/internal/queue/message" +) + +func TestGetQueueParts(t *testing.T) { + t.Run("Queue with separator", func(t *testing.T) { + queue := "prefix::suffix" + wantPrefix := "prefix" + wantSuffix := "suffix" + + gotPrefix, gotSuffix := message.GetQueueParts(queue) + + require.Equal(t, wantPrefix, gotPrefix, "Unexpected prefix") + require.Equal(t, wantSuffix, gotSuffix, "Unexpected suffix") + }) + + t.Run("Queue without separator", func(t *testing.T) { + queue := "queue" + wantPrefix := "queue" + wantSuffix := "" + + gotPrefix, gotSuffix := message.GetQueueParts(queue) + + require.Equal(t, wantPrefix, gotPrefix, "Unexpected prefix") + require.Equal(t, wantSuffix, gotSuffix, "Unexpected suffix") + }) +} + +func TestGetQueuePrefix(t *testing.T) { + t.Run("Queue with separator", func(t *testing.T) { + queue := "prefix::suffix" + want := "prefix" + + got := message.GetQueuePrefix(queue) + + require.Equal(t, want, got, "Unexpected prefix") + }) + + t.Run("Queue without separator", func(t *testing.T) { + queue := "queue" + want := "queue" + + got := message.GetQueuePrefix(queue) + + require.Equal(t, want, got, "Unexpected prefix") + }) +} + +func TestMessage_GetQueueParts(t *testing.T) { + t.Run("Queue with separator", func(t *testing.T) { + message := message.Message{Queue: "prefix::suffix"} + wantPrefix := "prefix" + wantSuffix := "suffix" + + gotPrefix, gotSuffix := message.GetQueueParts() + + require.Equal(t, wantPrefix, gotPrefix, "Unexpected prefix") + require.Equal(t, wantSuffix, gotSuffix, "Unexpected suffix") + }) + + t.Run("Queue without separator", func(t *testing.T) { + message := message.Message{Queue: "queue"} + wantPrefix := "queue" + wantSuffix := "" + + gotPrefix, gotSuffix := message.GetQueueParts() + + require.Equal(t, wantPrefix, gotPrefix, "Unexpected prefix") + require.Equal(t, wantSuffix, gotSuffix, "Unexpected suffix") + }) +} diff --git a/internal/queue/pool/pool.go b/internal/queue/pool/pool.go new file mode 100644 index 0000000..098a0f3 --- /dev/null +++ b/internal/queue/pool/pool.go @@ -0,0 +1,12 @@ +package pool + +type PoolType string + +const ( + PRIMARY_POOL PoolType = "primary_pool" + PROCESSING_POOL PoolType = "processing_pool" + LOCK_ACK_POOL PoolType = "lock_ack_pool" + LOCK_NACK_POOL PoolType = "lock_nack_pool" +) + +const QUEUE_SEPARATOR = "::" diff --git a/internal/queue/queue.go b/internal/queue/queue.go index 7a19b4a..f297c4d 100644 --- a/internal/queue/queue.go +++ b/internal/queue/queue.go @@ -14,21 +14,22 @@ import ( "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/metrics" "github.com/takenet/deckard/internal/queue/cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" "go.opentelemetry.io/otel/attribute" ) type DeckardQueue interface { - AddMessagesToCache(ctx context.Context, messages ...*entities.Message) (int64, error) - AddMessagesToStorage(ctx context.Context, messages ...*entities.Message) (inserted int64, updated int64, err error) - Nack(ctx context.Context, message *entities.Message, timestamp time.Time, reason string) (bool, error) - Ack(ctx context.Context, message *entities.Message, timestamp time.Time, reason string) (bool, error) + AddMessagesToCache(ctx context.Context, messages ...*message.Message) (int64, error) + AddMessagesToStorage(ctx context.Context, messages ...*message.Message) (inserted int64, updated int64, err error) + Nack(ctx context.Context, message *message.Message, timestamp time.Time, reason string) (bool, error) + Ack(ctx context.Context, message *message.Message, reason string) (bool, error) TimeoutMessages(ctx context.Context, queue string) ([]string, error) - Pull(ctx context.Context, queue string, n int64, scoreFilter int64) (*[]entities.Message, error) + Pull(ctx context.Context, queue string, n int64, minScore *float64, maxScore *float64) (*[]message.Message, error) Remove(ctx context.Context, queue string, reason string, ids ...string) (cacheRemoved int64, storageRemoved int64, err error) Count(ctx context.Context, opts *storage.FindOptions) (int64, error) - GetStorageMessages(ctx context.Context, opt *storage.FindOptions) ([]entities.Message, error) + GetStorageMessages(ctx context.Context, opt *storage.FindOptions) ([]message.Message, error) // Flushes all deckard content from cache and storage. // Used only for memory instance. @@ -73,7 +74,7 @@ func (pool *Queue) Count(ctx context.Context, opts *storage.FindOptions) (int64, return result, nil } -func (pool *Queue) GetStorageMessages(ctx context.Context, opt *storage.FindOptions) ([]entities.Message, error) { +func (pool *Queue) GetStorageMessages(ctx context.Context, opt *storage.FindOptions) ([]message.Message, error) { result, err := pool.storage.Find(ctx, opt) if err != nil { @@ -85,7 +86,7 @@ func (pool *Queue) GetStorageMessages(ctx context.Context, opt *storage.FindOpti return result, nil } -func (pool *Queue) AddMessagesToStorage(ctx context.Context, messages ...*entities.Message) (inserted int64, updated int64, err error) { +func (pool *Queue) AddMessagesToStorage(ctx context.Context, messages ...*message.Message) (inserted int64, updated int64, err error) { queues := make(map[string]bool) for i := range messages { @@ -109,16 +110,16 @@ func (pool *Queue) AddMessagesToStorage(ctx context.Context, messages ...*entiti return insertions, updates, err } -func (pool *Queue) AddMessagesToCache(ctx context.Context, messages ...*entities.Message) (int64, error) { +func (pool *Queue) AddMessagesToCache(ctx context.Context, messages ...*message.Message) (int64, error) { return pool.AddMessagesToCacheWithAuditReason(ctx, "", messages...) } -func (pool *Queue) AddMessagesToCacheWithAuditReason(ctx context.Context, reason string, messages ...*entities.Message) (int64, error) { - membersByQueue := make(map[string][]*entities.Message) +func (pool *Queue) AddMessagesToCacheWithAuditReason(ctx context.Context, reason string, messages ...*message.Message) (int64, error) { + membersByQueue := make(map[string][]*message.Message) for i := range messages { queueMessages, ok := membersByQueue[messages[i].Queue] if !ok { - queueMessages = make([]*entities.Message, 0) + queueMessages = make([]*message.Message, 0) } queueMessages = append(queueMessages, messages[i]) @@ -152,25 +153,25 @@ func (pool *Queue) AddMessagesToCacheWithAuditReason(ctx context.Context, reason return count, nil } -func (pool *Queue) Nack(ctx context.Context, message *entities.Message, timestamp time.Time, reason string) (bool, error) { - if message == nil { +func (pool *Queue) Nack(ctx context.Context, msg *message.Message, timestamp time.Time, reason string) (bool, error) { + if msg == nil { return false, nil } - if message.Queue == "" { + if msg.Queue == "" { return false, fmt.Errorf("message has a invalid queue") } - if message.ID == "" { + if msg.ID == "" { return false, fmt.Errorf("message has a invalid ID") } defer func() { - metrics.QueueNack.Add(ctx, 1, attribute.String("queue", entities.GetQueuePrefix(message.Queue)), attribute.String("reason", reason)) + metrics.QueueNack.Add(ctx, 1, attribute.String("queue", message.GetQueuePrefix(msg.Queue)), attribute.String("reason", reason)) }() - if message.LockMs > 0 { - result, err := pool.cache.LockMessage(ctx, message, cache.LOCK_NACK) + if msg.LockMs > 0 { + result, err := pool.cache.LockMessage(ctx, msg, cache.LOCK_NACK) if err != nil { logger.S(ctx).Error("Error locking message: ", err) @@ -179,21 +180,21 @@ func (pool *Queue) Nack(ctx context.Context, message *entities.Message, timestam } pool.auditor.Store(ctx, audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.NACK, Reason: reason, - LockMs: message.LockMs, + LockMs: msg.LockMs, }) return result, nil } - message.Score = entities.MaxScore() + msg.Score = score.Min - result, err := pool.cache.MakeAvailable(ctx, message) + result, err := pool.cache.MakeAvailable(ctx, msg) if err != nil { logger.S(ctx).Error("Error making element available: ", err) @@ -202,10 +203,10 @@ func (pool *Queue) Nack(ctx context.Context, message *entities.Message, timestam } pool.auditor.Store(ctx, audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.NACK, Reason: reason, }) @@ -213,23 +214,20 @@ func (pool *Queue) Nack(ctx context.Context, message *entities.Message, timestam return result, nil } -func (pool *Queue) Ack(ctx context.Context, message *entities.Message, timestamp time.Time, reason string) (bool, error) { - if message == nil { +func (pool *Queue) Ack(ctx context.Context, msg *message.Message, reason string) (bool, error) { + if msg == nil { return false, nil } - if message.Queue == "" { + if msg.Queue == "" { return false, fmt.Errorf("message has a invalid queue") } - if message.ID == "" { + if msg.ID == "" { return false, fmt.Errorf("message has a invalid ID") } - message.LastUsage = ×tamp - message.UpdateScore() - - _, err := pool.storage.Ack(ctx, message) + _, err := pool.storage.Ack(ctx, msg) if err != nil { logger.S(ctx).Error("Error acking element on storage: ", err) @@ -237,10 +235,10 @@ func (pool *Queue) Ack(ctx context.Context, message *entities.Message, timestamp return false, err } - metrics.QueueAck.Add(ctx, 1, attribute.String("queue", entities.GetQueuePrefix(message.Queue)), attribute.String("reason", reason)) + metrics.QueueAck.Add(ctx, 1, attribute.String("queue", message.GetQueuePrefix(msg.Queue)), attribute.String("reason", reason)) - if message.LockMs > 0 { - result, err := pool.cache.LockMessage(ctx, message, cache.LOCK_ACK) + if msg.LockMs > 0 { + result, err := pool.cache.LockMessage(ctx, msg, cache.LOCK_ACK) if err != nil { logger.S(ctx).Error("Error locking element: ", err) @@ -249,19 +247,19 @@ func (pool *Queue) Ack(ctx context.Context, message *entities.Message, timestamp } pool.auditor.Store(ctx, audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.ACK, Reason: reason, - LockMs: message.LockMs, + LockMs: msg.LockMs, }) return result, nil } - result, availableErr := pool.cache.MakeAvailable(ctx, message) + result, availableErr := pool.cache.MakeAvailable(ctx, msg) if availableErr != nil { logger.S(ctx).Error("Error making element available: ", availableErr) @@ -269,10 +267,10 @@ func (pool *Queue) Ack(ctx context.Context, message *entities.Message, timestamp } pool.auditor.Store(ctx, audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.ACK, Reason: reason, }) @@ -290,7 +288,7 @@ func (pool *Queue) TimeoutMessages(ctx context.Context, queue string) ([]string, } if len(ids) > 0 { - metrics.QueueTimeout.Add(ctx, int64(len(ids)), attribute.String("queue", entities.GetQueuePrefix(queue))) + metrics.QueueTimeout.Add(ctx, int64(len(ids)), attribute.String("queue", message.GetQueuePrefix(queue))) for _, id := range ids { pool.auditor.Store(ctx, audit.Entry{ @@ -304,8 +302,8 @@ func (pool *Queue) TimeoutMessages(ctx context.Context, queue string) ([]string, return ids, nil } -func (pool *Queue) Pull(ctx context.Context, queue string, n int64, scoreFilter int64) (*[]entities.Message, error) { - ids, err := pool.cache.PullMessages(ctx, queue, n, scoreFilter) +func (pool *Queue) Pull(ctx context.Context, queue string, n int64, minScore *float64, maxScore *float64) (*[]message.Message, error) { + ids, err := pool.cache.PullMessages(ctx, queue, n, minScore, maxScore) if err != nil { logger.S(ctx).Error("Error pulling cache elements: ", err) @@ -313,7 +311,7 @@ func (pool *Queue) Pull(ctx context.Context, queue string, n int64, scoreFilter } if len(ids) == 0 { - metrics.QueueEmptyQueue.Add(ctx, 1, attribute.String("queue", entities.GetQueuePrefix(queue))) + metrics.QueueEmptyQueue.Add(ctx, 1, attribute.String("queue", message.GetQueuePrefix(queue))) return nil, nil } @@ -337,7 +335,7 @@ func (pool *Queue) Pull(ctx context.Context, queue string, n int64, scoreFilter } if len(retryNotFound) > 0 { - metrics.QueueNotFoundInStorage.Add(ctx, int64(len(notFound)), attribute.String("queue", entities.GetQueuePrefix(queue))) + metrics.QueueNotFoundInStorage.Add(ctx, int64(len(notFound)), attribute.String("queue", message.GetQueuePrefix(queue))) for _, id := range retryNotFound { pool.auditor.Store(ctx, audit.Entry{ @@ -362,7 +360,7 @@ func (pool *Queue) Pull(ctx context.Context, queue string, n int64, scoreFilter } if len(messages) == 0 { - metrics.QueueEmptyQueueStorage.Add(ctx, 1, attribute.String("queue", entities.GetQueuePrefix(queue))) + metrics.QueueEmptyQueueStorage.Add(ctx, 1, attribute.String("queue", message.GetQueuePrefix(queue))) return nil, nil } @@ -370,7 +368,7 @@ func (pool *Queue) Pull(ctx context.Context, queue string, n int64, scoreFilter return &messages, nil } -func (pool *Queue) getFromStorage(ctx context.Context, ids []string, queue string, sort *orderedmap.OrderedMap[string, int], retry bool) ([]entities.Message, []string, error) { +func (pool *Queue) getFromStorage(ctx context.Context, ids []string, queue string, sort *orderedmap.OrderedMap[string, int], retry bool) ([]message.Message, []string, error) { messages, err := pool.storage.Find(ctx, &storage.FindOptions{ Sort: sort, InternalFilter: &storage.InternalFilter{ @@ -435,7 +433,7 @@ func (pool *Queue) Flush(ctx context.Context) (bool, error) { } // notFoundIds calculate the difference between the ids and the found messages. -func notFoundIds(ids []string, messages []entities.Message) []string { +func notFoundIds(ids []string, messages []message.Message) []string { notFound := make([]string, 0) found := make(map[string]struct{}) diff --git a/internal/queue/queue_configuration_service.go b/internal/queue/queue_configuration_service.go index ff8bf86..fe02e30 100644 --- a/internal/queue/queue_configuration_service.go +++ b/internal/queue/queue_configuration_service.go @@ -5,13 +5,13 @@ import ( "time" "github.com/patrickmn/go-cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" "github.com/takenet/deckard/internal/queue/storage" ) type QueueConfigurationService interface { - EditQueueConfiguration(ctx context.Context, configuration *entities.QueueConfiguration) error - GetQueueConfiguration(ctx context.Context, queue string) (*entities.QueueConfiguration, error) + EditQueueConfiguration(ctx context.Context, configuration *configuration.QueueConfiguration) error + GetQueueConfiguration(ctx context.Context, queue string) (*configuration.QueueConfiguration, error) } type DefaultQueueConfigurationService struct { @@ -30,7 +30,7 @@ func NewQueueConfigurationService(_ context.Context, storage storage.Storage) *D var _ QueueConfigurationService = &DefaultQueueConfigurationService{} -func (queueService *DefaultQueueConfigurationService) EditQueueConfiguration(ctx context.Context, cfg *entities.QueueConfiguration) error { +func (queueService *DefaultQueueConfigurationService) EditQueueConfiguration(ctx context.Context, cfg *configuration.QueueConfiguration) error { if cfg == nil { return nil } @@ -39,13 +39,13 @@ func (queueService *DefaultQueueConfigurationService) EditQueueConfiguration(ctx return nil } - configuration, found := queueService.localCache.Get(cfg.Queue) + config, found := queueService.localCache.Get(cfg.Queue) if !found { return queueService.storage.EditQueueConfiguration(ctx, cfg) } - cacheConfiguration := configuration.(*entities.QueueConfiguration) + cacheConfiguration := config.(*configuration.QueueConfiguration) // Check if the new configuration is different if cacheConfiguration.MaxElements != cfg.MaxElements { @@ -57,29 +57,29 @@ func (queueService *DefaultQueueConfigurationService) EditQueueConfiguration(ctx return nil } -func (queueService *DefaultQueueConfigurationService) GetQueueConfiguration(ctx context.Context, queue string) (*entities.QueueConfiguration, error) { +func (queueService *DefaultQueueConfigurationService) GetQueueConfiguration(ctx context.Context, queue string) (*configuration.QueueConfiguration, error) { cacheConfig, found := queueService.localCache.Get(queue) if found { - return cacheConfig.(*entities.QueueConfiguration), nil + return cacheConfig.(*configuration.QueueConfiguration), nil } var err error - var configuration *entities.QueueConfiguration + var config *configuration.QueueConfiguration - configuration, err = queueService.storage.GetQueueConfiguration(ctx, queue) + config, err = queueService.storage.GetQueueConfiguration(ctx, queue) if err != nil { return nil, err } - if configuration == nil { - configuration = &entities.QueueConfiguration{ + if config == nil { + config = &configuration.QueueConfiguration{ Queue: queue, } } - queueService.localCache.Set(queue, configuration, cache.DefaultExpiration) + queueService.localCache.Set(queue, config, cache.DefaultExpiration) - return configuration, nil + return config, nil } diff --git a/internal/queue/queue_configuration_service_test.go b/internal/queue/queue_configuration_service_test.go index 421c8d3..05cbf2c 100644 --- a/internal/queue/queue_configuration_service_test.go +++ b/internal/queue/queue_configuration_service_test.go @@ -9,7 +9,7 @@ import ( "github.com/patrickmn/go-cache" "github.com/stretchr/testify/require" "github.com/takenet/deckard/internal/mocks" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" ) var configurationCtx = context.Background() @@ -39,9 +39,9 @@ func TestEditConfigurationNilConfigurationShouldDoNothing(t *testing.T) { func TestEditConfigurationMaxElementsZeroShouldDoNothing(t *testing.T) { t.Parallel() - configuration := NewQueueConfigurationService(configurationCtx, nil) + configurationService := NewQueueConfigurationService(configurationCtx, nil) - require.NoError(t, configuration.EditQueueConfiguration(configurationCtx, &entities.QueueConfiguration{MaxElements: 0})) + require.NoError(t, configurationService.EditQueueConfiguration(configurationCtx, &configuration.QueueConfiguration{MaxElements: 0})) } func TestEditConfigurationCacheNotFoundShouldCallStorageEdit(t *testing.T) { @@ -50,7 +50,7 @@ func TestEditConfigurationCacheNotFoundShouldCallStorageEdit(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - config := &entities.QueueConfiguration{MaxElements: 321, Queue: "q1"} + config := &configuration.QueueConfiguration{MaxElements: 321, Queue: "q1"} mockStorage := mocks.NewMockStorage(mockCtrl) mockStorage.EXPECT().EditQueueConfiguration(configurationCtx, config).Return(nil) @@ -62,13 +62,13 @@ func TestEditConfigurationCacheNotFoundShouldCallStorageEdit(t *testing.T) { func TestEditConfigurationCacheFoundWithSameConfigShouldDoNothing(t *testing.T) { t.Parallel() - configuration := NewQueueConfigurationService(configurationCtx, nil) + configurationService := NewQueueConfigurationService(configurationCtx, nil) - config := &entities.QueueConfiguration{MaxElements: 321, Queue: "q1"} + config := &configuration.QueueConfiguration{MaxElements: 321, Queue: "q1"} - configuration.localCache.Set("q1", config, cache.DefaultExpiration) + configurationService.localCache.Set("q1", config, cache.DefaultExpiration) - require.NoError(t, configuration.EditQueueConfiguration(configurationCtx, config)) + require.NoError(t, configurationService.EditQueueConfiguration(configurationCtx, config)) } func TestEditConfigurationCacheFoundWithDifferentConfigShouldCallStorageAndDeleteCache(t *testing.T) { @@ -77,18 +77,18 @@ func TestEditConfigurationCacheFoundWithDifferentConfigShouldCallStorageAndDelet mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - config := &entities.QueueConfiguration{MaxElements: 321, Queue: "q1"} + config := &configuration.QueueConfiguration{MaxElements: 321, Queue: "q1"} mockStorage := mocks.NewMockStorage(mockCtrl) mockStorage.EXPECT().EditQueueConfiguration(configurationCtx, config).Return(nil) - configuration := NewQueueConfigurationService(configurationCtx, mockStorage) + configurationService := NewQueueConfigurationService(configurationCtx, mockStorage) - configuration.localCache.Set("q1", &entities.QueueConfiguration{MaxElements: 123, Queue: "q1"}, cache.DefaultExpiration) + configurationService.localCache.Set("q1", &configuration.QueueConfiguration{MaxElements: 123, Queue: "q1"}, cache.DefaultExpiration) - require.NoError(t, configuration.EditQueueConfiguration(configurationCtx, config)) + require.NoError(t, configurationService.EditQueueConfiguration(configurationCtx, config)) - result, found := configuration.localCache.Get("q1") + result, found := configurationService.localCache.Get("q1") require.False(t, found) require.Nil(t, result) } @@ -96,7 +96,7 @@ func TestEditConfigurationCacheFoundWithDifferentConfigShouldCallStorageAndDelet func TestGetConfigurationFromCacheShouldResultFromCache(t *testing.T) { t.Parallel() - config := &entities.QueueConfiguration{MaxElements: 321, Queue: "q1"} + config := &configuration.QueueConfiguration{MaxElements: 321, Queue: "q1"} configuration := NewQueueConfigurationService(configurationCtx, nil) @@ -132,18 +132,18 @@ func TestGetConfigurationCacheMissStorageNotFoundShouldResultDefaultConfiguratio mockStorage := mocks.NewMockStorage(mockCtrl) mockStorage.EXPECT().GetQueueConfiguration(configurationCtx, "q1").Return(nil, nil) - configuration := NewQueueConfigurationService(configurationCtx, mockStorage) + configurationService := NewQueueConfigurationService(configurationCtx, mockStorage) - _, found := configuration.localCache.Get("q1") + _, found := configurationService.localCache.Get("q1") require.False(t, found) - result, err := configuration.GetQueueConfiguration(configurationCtx, "q1") + result, err := configurationService.GetQueueConfiguration(configurationCtx, "q1") require.NoError(t, err) - require.Equal(t, &entities.QueueConfiguration{ + require.Equal(t, &configuration.QueueConfiguration{ Queue: "q1", }, result) - cacheResult, found := configuration.localCache.Get("q1") + cacheResult, found := configurationService.localCache.Get("q1") require.True(t, found) require.Same(t, result, cacheResult) @@ -157,7 +157,7 @@ func TestGetConfigurationCacheMissStorageFoundShouldResultStorageConfigurationAn mockStorage := mocks.NewMockStorage(mockCtrl) - storageConfig := &entities.QueueConfiguration{ + storageConfig := &configuration.QueueConfiguration{ Queue: "q1", MaxElements: 534, } diff --git a/internal/queue/queue_housekeeper.go b/internal/queue/queue_housekeeper.go index 3d685af..f5ff5bf 100644 --- a/internal/queue/queue_housekeeper.go +++ b/internal/queue/queue_housekeeper.go @@ -6,12 +6,15 @@ import ( "github.com/elliotchance/orderedmap/v2" "github.com/takenet/deckard/internal/audit" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/metrics" "github.com/takenet/deckard/internal/queue/cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/pool" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" - "github.com/takenet/deckard/internal/queue/utils" "github.com/takenet/deckard/internal/shutdown" "go.opentelemetry.io/otel/attribute" ) @@ -21,13 +24,13 @@ import ( // TODO: allow each queue to have its own deadline for timeout. // TODO: change the behavior of this so it doesn't need to load all queue names in memory, we could use the storage to list queues with a cursor // TODO: we could even change the timeout mechanism to be not based on the queue name -func ProcessTimeoutMessages(ctx context.Context, pool *Queue) error { - t := time.Now() +func ProcessTimeoutMessages(ctx context.Context, queue *Queue) error { + t := dtime.Now() - queues, err := pool.cache.ListQueues(ctx, "*", entities.PROCESSING_POOL) + queues, err := queue.cache.ListQueues(ctx, "*", pool.PROCESSING_POOL) if err != nil { - logger.S(ctx).Error("Error list processing pool queues: ", err) + logger.S(ctx).Error("Error list processing queues: ", err) return err } @@ -46,7 +49,7 @@ func ProcessTimeoutMessages(ctx context.Context, pool *Queue) error { break } - result, err := pool.TimeoutMessages(ctx, queueName) + result, err := queue.TimeoutMessages(ctx, queueName) if err != nil { logger.S(ctx).Errorf("Error processing timeouts for queue %s: %v", queueName, err) @@ -70,15 +73,15 @@ func ProcessTimeoutMessages(ctx context.Context, pool *Queue) error { } // processLockPool moves messages from the lock message pool to the message pool. -func ProcessLockPool(ctx context.Context, pool *Queue) { - lockAckQueues, err := pool.cache.ListQueues(ctx, "*", entities.LOCK_ACK_POOL) +func ProcessLockPool(ctx context.Context, queue *Queue) { + lockAckQueues, err := queue.cache.ListQueues(ctx, "*", pool.LOCK_ACK_POOL) if err != nil { logger.S(ctx).Error("Error getting lock_ack queue names: ", err) return } - unlockMessages(ctx, pool, lockAckQueues, cache.LOCK_ACK) + unlockMessages(ctx, queue, lockAckQueues, cache.LOCK_ACK) if shutdown.Ongoing() { logger.S(ctx).Info("Shutdown started. Stopping unlock process.") @@ -86,14 +89,14 @@ func ProcessLockPool(ctx context.Context, pool *Queue) { return } - lockNackQueues, err := pool.cache.ListQueues(ctx, "*", entities.LOCK_NACK_POOL) + lockNackQueues, err := queue.cache.ListQueues(ctx, "*", pool.LOCK_NACK_POOL) if err != nil { logger.S(ctx).Error("Error getting lock_nack queue names: ", err) return } - unlockMessages(ctx, pool, lockNackQueues, cache.LOCK_NACK) + unlockMessages(ctx, queue, lockNackQueues, cache.LOCK_NACK) } func unlockMessages(ctx context.Context, pool *Queue, queues []string, lockType cache.LockType) { @@ -107,7 +110,7 @@ func unlockMessages(ctx context.Context, pool *Queue, queues []string, lockType ids, err := pool.cache.UnlockMessages(ctx, queues[i], lockType) if err != nil { - logger.S(ctx).Errorf("Error processing locks for queue %s: %v", queues[i], err.Error()) + logger.S(ctx).Errorf("Error processing locks for queue '%s': %v", queues[i], err.Error()) continue } @@ -121,7 +124,7 @@ func unlockMessages(ctx context.Context, pool *Queue, queues []string, lockType }) } - metrics.HousekeeperUnlock.Add(ctx, int64(len(ids)), attribute.String("queue", entities.GetQueuePrefix(queues[i])), attribute.String("lock_type", string(lockType))) + metrics.HousekeeperUnlock.Add(ctx, int64(len(ids)), attribute.String("queue", message.GetQueuePrefix(queues[i])), attribute.String("lock_type", string(lockType))) } } @@ -138,7 +141,7 @@ func isRecovering(ctx context.Context, pool *Queue) (bool, error) { // RecoveryMessagesPool recover messages pool sending all storage data to cache func RecoveryMessagesPool(ctx context.Context, pool *Queue) (metrify bool) { - t := time.Now() + t := dtime.Now() breakpoint, err := pool.cache.Get(ctx, cache.RECOVERY_STORAGE_BREAKPOINT_KEY) if err != nil { @@ -196,9 +199,16 @@ func RecoveryMessagesPool(ctx context.Context, pool *Queue) (metrify bool) { } if len(messages) > 0 { - addMessages := make([]*entities.Message, len(messages)) + addMessages := make([]*message.Message, len(messages)) for i := range messages { addMessages[i] = &messages[i] + + // TODO: + if messages[i].Score < score.Min { + messages[i].Score = score.Min + } else if messages[i].Score > score.Max { + messages[i].Score = score.Max + } } _, err := pool.AddMessagesToCacheWithAuditReason(ctx, "recovery", addMessages...) @@ -322,8 +332,8 @@ func RemoveTTLMessages(ctx context.Context, pool *Queue, filterDate *time.Time) return true, err } - metrics.HousekeeperTTLCacheRemoved.Add(ctx, cacheRemoved, attribute.String("queue", entities.GetQueuePrefix(queue))) - metrics.HousekeeperTTLStorageRemoved.Add(ctx, storageRemoved, attribute.String("queue", entities.GetQueuePrefix(queue))) + metrics.HousekeeperTTLCacheRemoved.Add(ctx, cacheRemoved, attribute.String("queue", message.GetQueuePrefix(queue))) + metrics.HousekeeperTTLStorageRemoved.Add(ctx, storageRemoved, attribute.String("queue", message.GetQueuePrefix(queue))) } return true, nil @@ -358,7 +368,7 @@ func RemoveExceedingMessages(ctx context.Context, pool *Queue) (bool, error) { return true, nil } -func (pool *Queue) removeExceedingMessagesFromQueue(ctx context.Context, queueConfiguration *entities.QueueConfiguration) error { +func (pool *Queue) removeExceedingMessagesFromQueue(ctx context.Context, queueConfiguration *configuration.QueueConfiguration) error { if queueConfiguration == nil || queueConfiguration.MaxElements <= 0 { return nil } @@ -414,8 +424,8 @@ func (pool *Queue) removeExceedingMessagesFromQueue(ctx context.Context, queueCo return err } - metrics.HousekeeperExceedingCacheRemoved.Add(ctx, cacheRemoved, attribute.String("queue", entities.GetQueuePrefix(queue))) - metrics.HousekeeperExceedingStorageRemoved.Add(ctx, storageRemoved, attribute.String("queue", entities.GetQueuePrefix(queue))) + metrics.HousekeeperExceedingCacheRemoved.Add(ctx, cacheRemoved, attribute.String("queue", message.GetQueuePrefix(queue))) + metrics.HousekeeperExceedingStorageRemoved.Add(ctx, storageRemoved, attribute.String("queue", message.GetQueuePrefix(queue))) return nil } @@ -457,7 +467,7 @@ func ComputeMetrics(ctx context.Context, pool *Queue) { } if len(message) == 1 && message[0].LastUsage != nil { - oldestElement[queue] = utils.ElapsedTime(*message[0].LastUsage) + oldestElement[queue] = dtime.ElapsedTime(*message[0].LastUsage) } total, err := pool.Count(ctx, &storage.FindOptions{ diff --git a/internal/queue/queue_housekeeper_test.go b/internal/queue/queue_housekeeper_test.go index 219f797..4b5348c 100644 --- a/internal/queue/queue_housekeeper_test.go +++ b/internal/queue/queue_housekeeper_test.go @@ -15,7 +15,10 @@ import ( "github.com/takenet/deckard/internal/metrics" "github.com/takenet/deckard/internal/mocks" "github.com/takenet/deckard/internal/queue/cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/pool" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -44,7 +47,7 @@ func TestUpdateOldestQueueMap(t *testing.T) { InternalFilter: &storage.InternalFilter{ QueuePrefix: "a", }, - }).Return([]entities.Message{ + }).Return([]message.Message{ {LastUsage: &now}, }, nil) @@ -56,7 +59,7 @@ func TestUpdateOldestQueueMap(t *testing.T) { InternalFilter: &storage.InternalFilter{ QueuePrefix: "b", }, - }).Return([]entities.Message{ + }).Return([]message.Message{ {LastUsage: &nowMinusTenSeconds}, }, nil) @@ -91,8 +94,8 @@ func TestProcessLockPool(t *testing.T) { ctx = context.Background() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_ACK_POOL).Return([]string{"a", "b"}, nil) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_NACK_POOL).Return([]string{"c", "d"}, nil) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_ACK_POOL).Return([]string{"a", "b"}, nil) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_NACK_POOL).Return([]string{"c", "d"}, nil) mockCache.EXPECT().UnlockMessages(ctx, "a", cache.LOCK_ACK) mockCache.EXPECT().UnlockMessages(ctx, "b", cache.LOCK_ACK) @@ -116,7 +119,7 @@ func TestProcessLockPoolAckListErrorShouldDoNothing(t *testing.T) { ctx = context.Background() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_ACK_POOL).Return(nil, fmt.Errorf("error")) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_ACK_POOL).Return(nil, fmt.Errorf("error")) mockStorage := mocks.NewMockStorage(mockCtrl) mockAuditor := mocks.NewMockAuditor(mockCtrl) @@ -135,8 +138,8 @@ func TestProcessLockPoolNackAckListErrorShouldDoNothing(t *testing.T) { ctx = context.Background() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_ACK_POOL).Return([]string{}, nil) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_NACK_POOL).Return(nil, fmt.Errorf("error")) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_ACK_POOL).Return([]string{}, nil) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_NACK_POOL).Return(nil, fmt.Errorf("error")) mockStorage := mocks.NewMockStorage(mockCtrl) mockAuditor := mocks.NewMockAuditor(mockCtrl) @@ -155,8 +158,8 @@ func TestProcessUnlockErrorShouldUnlockOthers(t *testing.T) { ctx = context.Background() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_ACK_POOL).Return([]string{"a", "b"}, nil) - mockCache.EXPECT().ListQueues(ctx, "*", entities.LOCK_NACK_POOL).Return([]string{"c", "d"}, nil) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_ACK_POOL).Return([]string{"a", "b"}, nil) + mockCache.EXPECT().ListQueues(ctx, "*", pool.LOCK_NACK_POOL).Return([]string{"c", "d"}, nil) mockCache.EXPECT().UnlockMessages(ctx, "a", cache.LOCK_ACK).Return(nil, fmt.Errorf("error")) mockCache.EXPECT().UnlockMessages(ctx, "b", cache.LOCK_ACK) @@ -179,20 +182,20 @@ func TestRecoveryMessagesCacheError(t *testing.T) { now := time.Now() - cacheMessages := []*entities.Message{{ + cacheMessages := []*message.Message{{ ID: "id", Queue: "queue", InternalId: 4321, ExpiryDate: time.Time{}, LastUsage: &now, - Score: entities.GetScore(&now, 54321), + Score: score.GetScoreFromTime(&now) - 54321, LastScoreSubtract: 54321, }, { ID: "id2", InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreFromTime(&now) - 23457, LastUsage: &now, LastScoreSubtract: 23457, }} @@ -203,12 +206,12 @@ func TestRecoveryMessagesCacheError(t *testing.T) { mockCache.EXPECT().Get(ctx, cache.RECOVERY_BREAKPOINT_KEY).Return("65456", nil) mockCache.EXPECT().Insert(ctx, "queue", cacheMessages).Return(nil, errors.New("cache error")) - storageMessages := []entities.Message{{ + storageMessages := []message.Message{{ ID: "id", InternalId: 4321, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 54321), + Score: score.GetScoreFromTime(&now) - 54321, LastUsage: &now, LastScoreSubtract: 54321, }, { @@ -216,7 +219,7 @@ func TestRecoveryMessagesCacheError(t *testing.T) { InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreFromTime(&now) - 23457, LastUsage: &now, LastScoreSubtract: 23457, }} @@ -289,20 +292,20 @@ func TestRecoveryMessagesPoolShouldAddMessagesAfterBreakpoint(t *testing.T) { now := time.Now() - cacheMessages := []*entities.Message{{ + cacheMessages := []*message.Message{{ ID: "id", Queue: "queue", InternalId: 45456, ExpiryDate: time.Time{}, LastUsage: &now, - Score: entities.GetScore(&now, 54321), + Score: score.GetScoreByDefaultAlgorithm() - 54321, LastScoreSubtract: 54321, }, { ID: "id2", InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreByDefaultAlgorithm() - 23457, LastUsage: &now, LastScoreSubtract: 23457, }} @@ -316,12 +319,12 @@ func TestRecoveryMessagesPoolShouldAddMessagesAfterBreakpoint(t *testing.T) { mockCache.EXPECT().Set(ctx, cache.RECOVERY_RUNNING, "false") mockCache.EXPECT().Insert(ctx, "queue", cacheMessages).Return([]string{"id", "id2"}, nil) - storageMessages := []entities.Message{{ + storageMessages := []message.Message{{ ID: "id", InternalId: 45456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 54321), + Score: score.GetScoreByDefaultAlgorithm() - 54321, LastUsage: &now, LastScoreSubtract: 54321, }, { @@ -329,7 +332,7 @@ func TestRecoveryMessagesPoolShouldAddMessagesAfterBreakpoint(t *testing.T) { InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreByDefaultAlgorithm() - 23457, LastUsage: &now, LastScoreSubtract: 23457, }} @@ -354,12 +357,12 @@ func TestRecoveryMessagesPoolShouldAddMessagesAfterBreakpoint(t *testing.T) { Sort: sort, Limit: int64(4000), }).Return(storageMessages, nil) - mockStorage.EXPECT().GetStringInternalId(ctx, &entities.Message{ + mockStorage.EXPECT().GetStringInternalId(ctx, &message.Message{ ID: "id2", InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreByDefaultAlgorithm() - 23457, LastUsage: &now, LastScoreSubtract: 23457, }).Return("65456") @@ -392,7 +395,7 @@ func TestRecoveryMessagesPoolInitWithEmptyStorageShouldNotStartRecovery(t *testi }, Sort: sort, Limit: int64(1), - }).Return([]entities.Message{}, nil) + }).Return([]message.Message{}, nil) q := NewQueue(&audit.AuditorImpl{}, mockStorage, nil, mockCache) @@ -426,19 +429,19 @@ func TestRecoveryMessagesPoolInitNonEmptyStorageShouldStartRecovery(t *testing.T }, Sort: sort, Limit: int64(1), - }).Return([]entities.Message{ + }).Return([]message.Message{ {InternalId: storageLast}, }, nil) - mockStorage.EXPECT().GetStringInternalId(ctx, &entities.Message{ + mockStorage.EXPECT().GetStringInternalId(ctx, &message.Message{ InternalId: storageLast, }).Return(storageLast.Hex()) - storageMessages := make([]entities.Message, 4000) - cacheMessages := make([]*entities.Message, 4000) + storageMessages := make([]message.Message, 4000) + cacheMessages := make([]*message.Message, 4000) insertedIds := make([]string, 4000) for i := 0; i < len(storageMessages); i++ { - storageMessages[i] = entities.Message{ + storageMessages[i] = message.Message{ Queue: "queue", ID: strconv.Itoa(i), InternalId: primitive.NewObjectID(), @@ -484,20 +487,20 @@ func TestRecoveryMessagesPoolAlreadyRunning(t *testing.T) { now := time.Now() - cacheMessages := []*entities.Message{{ + cacheMessages := []*message.Message{{ ID: "id", Queue: "queue", InternalId: 4321, ExpiryDate: time.Time{}, LastUsage: &now, - Score: entities.GetScore(&now, 54321), + Score: score.GetScoreByDefaultAlgorithm() - 54321, LastScoreSubtract: 54321, }, { ID: "id2", InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreByDefaultAlgorithm() - 23457, LastUsage: &now, LastScoreSubtract: 23457, }} @@ -513,12 +516,12 @@ func TestRecoveryMessagesPoolAlreadyRunning(t *testing.T) { mockCache.EXPECT().Insert(ctx, "queue", cacheMessages).Return([]string{"id", "id2"}, nil) - storageMessages := []entities.Message{{ + storageMessages := []message.Message{{ ID: "id", InternalId: 4321, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 54321), + Score: score.GetScoreByDefaultAlgorithm() - 54321, LastUsage: &now, LastScoreSubtract: 54321, }, { @@ -526,7 +529,7 @@ func TestRecoveryMessagesPoolAlreadyRunning(t *testing.T) { InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreByDefaultAlgorithm() - 23457, LastUsage: &now, LastScoreSubtract: 23457, }} @@ -551,12 +554,12 @@ func TestRecoveryMessagesPoolAlreadyRunning(t *testing.T) { Sort: sort, Limit: int64(4000), }).Return(storageMessages, nil) - mockStorage.EXPECT().GetStringInternalId(ctx, &entities.Message{ + mockStorage.EXPECT().GetStringInternalId(ctx, &message.Message{ ID: "id2", InternalId: 65456, Queue: "queue", ExpiryDate: time.Time{}, - Score: entities.GetScore(&now, 23457), + Score: score.GetScoreByDefaultAlgorithm() - 23457, LastUsage: &now, LastScoreSubtract: 23457, }).Return("65456") @@ -677,10 +680,10 @@ func TestRemoveTTLMessagesShouldRemoveExpiredElements(t *testing.T) { }, Sort: sort, }).DoAndReturn( - func(ctx context.Context, opt *storage.FindOptions) ([]entities.Message, error) { + func(ctx context.Context, opt *storage.FindOptions) ([]message.Message, error) { expiryDate = *opt.InternalFilter.ExpiryDate - return []entities.Message{{ID: "1", Queue: "q1"}, {ID: "2", Queue: "q2"}}, nil + return []message.Message{{ID: "1", Queue: "q1"}, {ID: "2", Queue: "q2"}}, nil }, ) @@ -727,7 +730,7 @@ func TestRemoveExceedingMessagesNoQueuesShouldDoNothing(t *testing.T) { defer mockCtrl.Finish() mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().ListQueueConfigurations(gomock.Any()).Return([]*entities.QueueConfiguration{}, nil) + mockStorage.EXPECT().ListQueueConfigurations(gomock.Any()).Return([]*configuration.QueueConfiguration{}, nil) mockCache := mocks.NewMockCache(mockCtrl) mockCache.EXPECT().Get(gomock.Any(), cache.RECOVERY_RUNNING).Return("", nil) @@ -761,7 +764,7 @@ func TestRemoveExceedingMessagesNoQueuesShouldCallRemoveMethodToEachQueue(t *tes defer mockCtrl.Finish() mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().ListQueueConfigurations(gomock.Any()).Return([]*entities.QueueConfiguration{{Queue: "q1"}, {Queue: "q2"}}, nil) + mockStorage.EXPECT().ListQueueConfigurations(gomock.Any()).Return([]*configuration.QueueConfiguration{{Queue: "q1"}, {Queue: "q2"}}, nil) mockCache := mocks.NewMockCache(mockCtrl) mockCache.EXPECT().Get(gomock.Any(), cache.RECOVERY_RUNNING).Return("", nil) diff --git a/internal/queue/queue_test.go b/internal/queue/queue_test.go index b811fb1..79a655c 100644 --- a/internal/queue/queue_test.go +++ b/internal/queue/queue_test.go @@ -11,9 +11,12 @@ import ( "github.com/golang/mock/gomock" "github.com/stretchr/testify/require" "github.com/takenet/deckard/internal/audit" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/mocks" "github.com/takenet/deckard/internal/queue/cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" ) @@ -36,7 +39,7 @@ func TestPull(t *testing.T) { Queue: "test", }, Limit: int64(1), - }).Return([]entities.Message{ + }).Return([]message.Message{ { ID: "123", Queue: "test", @@ -44,15 +47,15 @@ func TestPull(t *testing.T) { }, nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(1), int64(0)).Return([]string{"123"}, nil) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(1), nil, nil).Return([]string{"123"}, nil) q := NewQueue(nil, mockStorage, nil, mockCache) - messages, err := q.Pull(ctx, "test", 1, 0) + messages, err := q.Pull(ctx, "test", 1, nil, nil) require.NoError(t, err) require.Len(t, *messages, 1, "expected one message") - require.Equal(t, (*messages)[0], entities.Message{ID: "123", Queue: "test"}) + require.Equal(t, (*messages)[0], message.Message{ID: "123", Queue: "test"}) } func TestAckStorageErrorShouldResultError(t *testing.T) { @@ -61,23 +64,19 @@ func TestAckStorageErrorShouldResultError(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - now := time.Now() - mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().Ack(gomock.Any(), &entities.Message{ - ID: "id", - Queue: "queue", - LastUsage: &now, - Score: entities.GetScore(&now, 0), + mockStorage.EXPECT().Ack(gomock.Any(), &message.Message{ + ID: "id", + Queue: "queue", }).Return(int64(0), errors.New("ack_error")) mockCache := mocks.NewMockCache(mockCtrl) q := NewQueue(nil, mockStorage, nil, mockCache) - result, err := q.Ack(ctx, &entities.Message{ + result, err := q.Ack(ctx, &message.Message{ ID: "id", Queue: "queue", - }, now, "") + }, "") require.Error(t, err) require.False(t, result) @@ -89,25 +88,26 @@ func TestAckMakeAvailableErrorShouldResultError(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - now := time.Now() + testTime := time.UnixMilli(1688060713537) + defer dtime.SetNowProviderValues(testTime)() - message := &entities.Message{ + msg := &message.Message{ ID: "id", Queue: "queue", - LastUsage: &now, - Score: entities.GetScore(&now, 0), + LastUsage: &testTime, } mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().Ack(gomock.Any(), message).Return(int64(1), nil) + mockStorage.EXPECT().Ack(gomock.Any(), msg).Return(int64(1), nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().MakeAvailable(gomock.Any(), message).Return(false, errors.New("make available error")) + mockCache.EXPECT().MakeAvailable(gomock.Any(), msg).Return(false, errors.New("make available error")) q := NewQueue(nil, mockStorage, nil, mockCache) - result, err := q.Ack(ctx, &entities.Message{ - ID: "id", - Queue: "queue", - }, now, "") + result, err := q.Ack(ctx, &message.Message{ + ID: "id", + LastUsage: &testTime, + Queue: "queue", + }, "") require.Error(t, err) require.False(t, result) @@ -119,36 +119,32 @@ func TestAckSuccessfulShouldAudit(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - now := time.Now() - - message := &entities.Message{ - ID: "id", - Queue: "queue", - LastUsage: &now, - Score: entities.GetScore(&now, 0), + msg := &message.Message{ + ID: "id", + Queue: "queue", } mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().Ack(gomock.Any(), message).Return(int64(1), nil) + mockStorage.EXPECT().Ack(gomock.Any(), msg).Return(int64(1), nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().MakeAvailable(gomock.Any(), message).Return(true, nil) + mockCache.EXPECT().MakeAvailable(gomock.Any(), msg).Return(true, nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) mockAuditor.EXPECT().Store(gomock.Any(), audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.ACK, Reason: "reason", }) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - result, err := q.Ack(ctx, &entities.Message{ + result, err := q.Ack(ctx, &message.Message{ ID: "id", Queue: "queue", - }, now, "reason") + }, "reason") require.NoError(t, err) require.True(t, result) @@ -159,7 +155,7 @@ func TestAckNilMessage(t *testing.T) { q := NewQueue(nil, nil, nil, nil) - result, err := q.Ack(ctx, nil, time.Time{}, "") + result, err := q.Ack(ctx, nil, "") require.NoError(t, err) require.False(t, result) @@ -170,7 +166,7 @@ func TestAckWithoutQueue(t *testing.T) { q := NewQueue(nil, nil, nil, nil) - result, err := q.Ack(ctx, &entities.Message{ID: "1"}, time.Time{}, "") + result, err := q.Ack(ctx, &message.Message{ID: "1"}, "") require.Error(t, err) require.False(t, result) @@ -181,7 +177,7 @@ func TestAckWithoutId(t *testing.T) { q := NewQueue(nil, nil, nil, nil) - result, err := q.Ack(ctx, &entities.Message{Queue: "queue"}, time.Time{}, "") + result, err := q.Ack(ctx, &message.Message{Queue: "queue"}, "") require.Error(t, err) require.False(t, result) @@ -195,19 +191,19 @@ func TestNackMakeAvailableErrorShouldResultError(t *testing.T) { now := time.Now() - message := &entities.Message{ + msg := &message.Message{ ID: "id", Queue: "queue", - Score: entities.MaxScore(), + Score: score.Min, } mockStorage := mocks.NewMockStorage(mockCtrl) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().MakeAvailable(gomock.Any(), message).Return(false, errors.New("make available error")) + mockCache.EXPECT().MakeAvailable(gomock.Any(), msg).Return(false, errors.New("make available error")) q := NewQueue(nil, mockStorage, nil, mockCache) - result, err := q.Nack(ctx, &entities.Message{ + result, err := q.Nack(ctx, &message.Message{ ID: "id", Queue: "queue", }, now, "") @@ -224,10 +220,10 @@ func TestNackSuccessfulShouldAudit(t *testing.T) { now := time.Now() - expectCall := &entities.Message{ + expectCall := &message.Message{ ID: "id", Queue: "queue", - Score: entities.MaxScore(), + Score: score.Min, } mockStorage := mocks.NewMockStorage(mockCtrl) mockCache := mocks.NewMockCache(mockCtrl) @@ -246,7 +242,7 @@ func TestNackSuccessfulShouldAudit(t *testing.T) { q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - result, err := q.Nack(ctx, &entities.Message{ + result, err := q.Nack(ctx, &message.Message{ ID: "id", Queue: "queue", }, now, "reason") @@ -271,7 +267,7 @@ func TestNackWithoutQueue(t *testing.T) { q := NewQueue(nil, nil, nil, nil) - result, err := q.Nack(ctx, &entities.Message{ID: "1"}, time.Now(), "") + result, err := q.Nack(ctx, &message.Message{ID: "1"}, time.Now(), "") require.Error(t, err) require.False(t, result) @@ -282,7 +278,7 @@ func TestNackWithoutId(t *testing.T) { q := NewQueue(nil, nil, nil, nil) - result, err := q.Nack(ctx, &entities.Message{Queue: "queue"}, time.Now(), "") + result, err := q.Nack(ctx, &message.Message{Queue: "queue"}, time.Now(), "") require.Error(t, err) require.False(t, result) @@ -305,7 +301,7 @@ func TestPullShouldDeleteNotFoundInStorageAndReturnRemaining(t *testing.T) { Queue: "test", }, Limit: int64(3), - }).Return([]entities.Message{ + }).Return([]message.Message{ { ID: "1", Queue: "test", @@ -319,10 +315,10 @@ func TestPullShouldDeleteNotFoundInStorageAndReturnRemaining(t *testing.T) { }, Limit: int64(2), Retry: true, - }).Return([]entities.Message{}, nil) + }).Return([]message.Message{}, nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), int64(0)).Return([]string{"1", "2", "3"}, nil) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), nil, nil).Return([]string{"1", "2", "3"}, nil) mockCache.EXPECT().Remove(gomock.Any(), "test", "2", "3").Return(int64(2), nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) @@ -338,11 +334,11 @@ func TestPullShouldDeleteNotFoundInStorageAndReturnRemaining(t *testing.T) { }) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - messages, err := q.Pull(ctx, "test", 3, 0) + messages, err := q.Pull(ctx, "test", 3, nil, nil) require.NoError(t, err) require.Len(t, *messages, 1, "expected one message") - require.Equal(t, (*messages)[0], entities.Message{ID: "1", Queue: "test"}) + require.Equal(t, (*messages)[0], message.Message{ID: "1", Queue: "test"}) } func TestPullElementsFromRetryShouldNotAuditMissingElements(t *testing.T) { @@ -372,7 +368,7 @@ func TestPullElementsFromRetryShouldNotAuditMissingElements(t *testing.T) { }, Limit: int64(3), Retry: true, - }).Return([]entities.Message{ + }).Return([]message.Message{ { ID: "1", Queue: "test", @@ -386,13 +382,13 @@ func TestPullElementsFromRetryShouldNotAuditMissingElements(t *testing.T) { }, nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), int64(0)).Return([]string{"1", "2", "3"}, nil) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), nil, nil).Return([]string{"1", "2", "3"}, nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - messages, err := q.Pull(ctx, "test", 3, 0) + messages, err := q.Pull(ctx, "test", 3, nil, nil) require.NoError(t, err) require.Len(t, *messages, 3) @@ -415,7 +411,7 @@ func TestPullElementsFromBothFirstTryAndRetryShouldMergeElementsAndKeepScoreOrde Queue: "test", }, Limit: int64(3), - }).Return([]entities.Message{ + }).Return([]message.Message{ { ID: "2", Queue: "test", @@ -431,7 +427,7 @@ func TestPullElementsFromBothFirstTryAndRetryShouldMergeElementsAndKeepScoreOrde }, Limit: int64(2), Retry: true, - }).Return([]entities.Message{ + }).Return([]message.Message{ { ID: "1", Queue: "test", @@ -444,13 +440,13 @@ func TestPullElementsFromBothFirstTryAndRetryShouldMergeElementsAndKeepScoreOrde }, nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), int64(0)).Return([]string{"1", "2", "3"}, nil) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), nil, nil).Return([]string{"1", "2", "3"}, nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - messages, err := q.Pull(ctx, "test", 3, 0) + messages, err := q.Pull(ctx, "test", 3, nil, nil) require.NoError(t, err) require.Len(t, *messages, 3) @@ -490,7 +486,7 @@ func TestPullNothingFoundOnStorage(t *testing.T) { }).Return(nil, nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), int64(0)).Return([]string{"1", "2", "3"}, nil) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(3), nil, nil).Return([]string{"1", "2", "3"}, nil) mockCache.EXPECT().Remove(gomock.Any(), "test", "1", "2", "3").Return(int64(3), nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) @@ -511,7 +507,7 @@ func TestPullNothingFoundOnStorage(t *testing.T) { }) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - messages, err := q.Pull(ctx, "test", 3, 0) + messages, err := q.Pull(ctx, "test", 3, nil, nil) require.NoError(t, err) require.Nil(t, messages) @@ -524,11 +520,11 @@ func TestPullCacheError(t *testing.T) { defer mockCtrl.Finish() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(1), int64(0)).Return(nil, errors.New("cache_error")) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(1), nil, nil).Return(nil, errors.New("cache_error")) q := NewQueue(nil, nil, nil, mockCache) - messages, err := q.Pull(ctx, "test", 1, 0) + messages, err := q.Pull(ctx, "test", 1, nil, nil) require.Error(t, err) require.Nil(t, messages) @@ -541,11 +537,11 @@ func TestPullCacheNoResults(t *testing.T) { defer mockCtrl.Finish() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(1), int64(0)).Return(nil, nil) + mockCache.EXPECT().PullMessages(gomock.Any(), "test", int64(1), nil, nil).Return(nil, nil) q := NewQueue(nil, nil, nil, mockCache) - messages, err := q.Pull(ctx, "test", 1, 0) + messages, err := q.Pull(ctx, "test", 1, nil, nil) require.NoError(t, err) require.Nil(t, messages) @@ -702,26 +698,26 @@ func TestAddMessagesToCacheSameIdInSameRequestShouldSetLastElementScore(t *testi now := time.Now() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().Insert(gomock.Any(), "queue", []*entities.Message{ + mockCache.EXPECT().Insert(gomock.Any(), "queue", []*message.Message{ { ID: "id", Queue: "queue", LastUsage: &now, - Score: entities.GetScore(&now, 0), + Score: score.GetScoreByDefaultAlgorithm(), }, { // No last usage ID: "id", Queue: "queue", - Score: entities.MaxScore(), + Score: score.Min, }, }).Return([]string{"id", "id"}, nil) - mockCache.EXPECT().Insert(gomock.Any(), "queue2", []*entities.Message{ + mockCache.EXPECT().Insert(gomock.Any(), "queue2", []*message.Message{ { ID: "id", Queue: "queue2", LastUsage: &now, - Score: entities.GetScore(&now, 0), + Score: score.GetScoreByDefaultAlgorithm(), }, }).Return([]string{"id"}, nil) @@ -739,22 +735,22 @@ func TestAddMessagesToCacheSameIdInSameRequestShouldSetLastElementScore(t *testi q := NewQueue(mockAuditor, nil, nil, mockCache) - messages := []*entities.Message{{ + messages := []*message.Message{{ ID: "id", Queue: "queue", LastUsage: &now, - Score: entities.GetScore(&now, 0), + Score: score.GetScoreByDefaultAlgorithm(), }, { // No last usage ID: "id", Queue: "queue", - Score: entities.MaxScore(), + Score: score.Min, }, { // Different queue with score ID: "id", Queue: "queue2", LastUsage: &now, - Score: entities.GetScore(&now, 0), + Score: score.GetScoreByDefaultAlgorithm(), }} count, err := q.AddMessagesToCache(ctx, messages...) @@ -770,7 +766,7 @@ func TestAddMessagesToStorageWithoutEditingQueueConfiguration(t *testing.T) { now := time.Now() - messages := []*entities.Message{{ + messages := []*message.Message{{ ID: "id", Queue: "queue", LastUsage: &now, @@ -807,22 +803,22 @@ func TestAddMessagesError(t *testing.T) { now := time.Now() mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().Insert(gomock.Any(), "queue", []*entities.Message{ + mockCache.EXPECT().Insert(gomock.Any(), "queue", []*message.Message{ { ID: "id", Queue: "queue", LastUsage: &now, - Score: entities.GetScore(&now, 0), + Score: score.GetScoreByDefaultAlgorithm(), }, }).Return(nil, errors.New("insert error")) q := NewQueue(nil, nil, nil, mockCache) - messages := []*entities.Message{{ + messages := []*message.Message{{ ID: "id", Queue: "queue", LastUsage: &now, - Score: entities.GetScore(&now, 0), + Score: score.GetScoreByDefaultAlgorithm(), }} _, err := q.AddMessagesToCache(ctx, messages...) @@ -840,7 +836,7 @@ func TestRemoveExceedingMessagesQueueZeroMaxElementsShouldDoNothing(t *testing.T q := NewQueue(&audit.AuditorImpl{}, mockStorage, NewQueueConfigurationService(ctx, mockStorage), mockCache) - require.NoError(t, q.removeExceedingMessagesFromQueue(ctx, &entities.QueueConfiguration{MaxElements: 0, Queue: "q1"})) + require.NoError(t, q.removeExceedingMessagesFromQueue(ctx, &configuration.QueueConfiguration{MaxElements: 0, Queue: "q1"})) } func TestRemoveExceedingMessagesEmptyQueueShouldDoNothing(t *testing.T) { @@ -851,7 +847,7 @@ func TestRemoveExceedingMessagesEmptyQueueShouldDoNothing(t *testing.T) { mockStorage := mocks.NewMockStorage(mockCtrl) - queueConfiguration := &entities.QueueConfiguration{MaxElements: 2, Queue: "q1"} + queueConfiguration := &configuration.QueueConfiguration{MaxElements: 2, Queue: "q1"} mockStorage.EXPECT().Count(gomock.Any(), &storage.FindOptions{InternalFilter: &storage.InternalFilter{Queue: "q1"}}).Return(int64(0), nil) mockCache := mocks.NewMockCache(mockCtrl) @@ -867,7 +863,7 @@ func TestRemoveExceedingMessagesErrorCountingShouldReturnError(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - queueConfiguration := &entities.QueueConfiguration{MaxElements: 2, Queue: "q1"} + queueConfiguration := &configuration.QueueConfiguration{MaxElements: 2, Queue: "q1"} mockStorage := mocks.NewMockStorage(mockCtrl) mockStorage.EXPECT().Count(gomock.Any(), &storage.FindOptions{InternalFilter: &storage.InternalFilter{Queue: "q1"}}).Return(int64(0), fmt.Errorf("anyerr")) @@ -890,7 +886,7 @@ func TestRemoveExceedingMessagesShouldRemoveExceedingElements(t *testing.T) { maxElements := int64(2) count := int64(5) - queueConfiguration := &entities.QueueConfiguration{MaxElements: maxElements, Queue: "q1"} + queueConfiguration := &configuration.QueueConfiguration{MaxElements: maxElements, Queue: "q1"} mockStorage.EXPECT().Count(gomock.Any(), &storage.FindOptions{InternalFilter: &storage.InternalFilter{Queue: "q1"}}).Return(count, nil) @@ -907,7 +903,7 @@ func TestRemoveExceedingMessagesShouldRemoveExceedingElements(t *testing.T) { "_id": 0, }, Sort: sort, - }).Return([]entities.Message{{ID: "1"}, {ID: "2"}}, nil) + }).Return([]message.Message{{ID: "1"}, {ID: "2"}}, nil) mockStorage.EXPECT().Remove(gomock.Any(), "q1", []string{"1", "2"}).Return(int64(2), nil) @@ -930,7 +926,7 @@ func TestRemoveExceedingMessagesFindErrorShouldRemoveResultError(t *testing.T) { maxElements := int64(2) count := int64(5) - queueConfiguration := &entities.QueueConfiguration{MaxElements: maxElements, Queue: "q1"} + queueConfiguration := &configuration.QueueConfiguration{MaxElements: maxElements, Queue: "q1"} mockStorage.EXPECT().Count(gomock.Any(), &storage.FindOptions{InternalFilter: &storage.InternalFilter{Queue: "q1"}}).Return(count, nil) @@ -957,14 +953,14 @@ func TestRemoveExceedingMessagesRemoveErrorShouldResultError(t *testing.T) { maxElements := int64(2) count := int64(5) - queueConfiguration := &entities.QueueConfiguration{MaxElements: maxElements, Queue: "q1"} + queueConfiguration := &configuration.QueueConfiguration{MaxElements: maxElements, Queue: "q1"} mockStorage.EXPECT().Count(gomock.Any(), &storage.FindOptions{InternalFilter: &storage.InternalFilter{Queue: "q1"}}).Return(count, nil) sort := orderedmap.NewOrderedMap[string, int]() sort.Set("expiry_date", 1) - mockStorage.EXPECT().Find(gomock.Any(), gomock.Any()).Return([]entities.Message{{ID: "1"}, {ID: "2"}}, nil) + mockStorage.EXPECT().Find(gomock.Any(), gomock.Any()).Return([]message.Message{{ID: "1"}, {ID: "2"}}, nil) mockStorage.EXPECT().Remove(gomock.Any(), "q1", []string{"1", "2"}).Return(int64(0), fmt.Errorf("anyerror")) mockCache := mocks.NewMockCache(mockCtrl) @@ -1041,27 +1037,64 @@ func TestAckWithLockShouldLock(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - now := time.Now() + msg := &message.Message{ + ID: "id", + Queue: "queue", + LockMs: 10, + } + mockStorage := mocks.NewMockStorage(mockCtrl) + mockStorage.EXPECT().Ack(gomock.Any(), msg).Return(int64(1), nil) + mockCache := mocks.NewMockCache(mockCtrl) + mockCache.EXPECT().LockMessage(gomock.Any(), msg, cache.LOCK_ACK).Return(true, nil) - message := &entities.Message{ - ID: "id", - Queue: "queue", - LastUsage: &now, - Score: entities.GetScore(&now, 0), - LockMs: 10, + mockAuditor := mocks.NewMockAuditor(mockCtrl) + + mockAuditor.EXPECT().Store(gomock.Any(), audit.Entry{ + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, + Signal: audit.ACK, + Reason: "reason", + LockMs: 10, + }) + + q := NewQueue(mockAuditor, mockStorage, nil, mockCache) + + result, err := q.Ack(ctx, &message.Message{ + ID: "id", + Queue: "queue", + LockMs: 10, + }, "reason") + + require.NoError(t, err) + require.True(t, result) +} + +func TestAckWithLockAndScoreShouldLockWithScore(t *testing.T) { + t.Parallel() + + mockCtrl := gomock.NewController(t) + defer mockCtrl.Finish() + + msg := &message.Message{ + ID: "id", + Queue: "queue", + LockMs: 10, + Score: 1234, } mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().Ack(gomock.Any(), message).Return(int64(1), nil) + mockStorage.EXPECT().Ack(gomock.Any(), msg).Return(int64(1), nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().LockMessage(gomock.Any(), message, cache.LOCK_ACK).Return(true, nil) + mockCache.EXPECT().LockMessage(gomock.Any(), msg, cache.LOCK_ACK).Return(true, nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) mockAuditor.EXPECT().Store(gomock.Any(), audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.ACK, Reason: "reason", LockMs: 10, @@ -1069,12 +1102,12 @@ func TestAckWithLockShouldLock(t *testing.T) { q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - result, err := q.Ack(ctx, &entities.Message{ - ID: "id", - Queue: "queue", - LastUsage: &now, - LockMs: 10, - }, now, "reason") + result, err := q.Ack(ctx, &message.Message{ + ID: "id", + Queue: "queue", + LockMs: 10, + Score: 1234, + }, "reason") require.NoError(t, err) require.True(t, result) @@ -1086,31 +1119,26 @@ func TestAckWithLockErrorShouldResultError(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() - now := time.Now() - - message := &entities.Message{ - ID: "id", - Queue: "queue", - LastUsage: &now, - Score: entities.GetScore(&now, 0), - LockMs: 10, + msg := &message.Message{ + ID: "id", + Queue: "queue", + LockMs: 10, } mockStorage := mocks.NewMockStorage(mockCtrl) - mockStorage.EXPECT().Ack(gomock.Any(), message).Return(int64(1), nil) + mockStorage.EXPECT().Ack(gomock.Any(), msg).Return(int64(1), nil) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().LockMessage(gomock.Any(), message, cache.LOCK_ACK).Return(false, fmt.Errorf("error")) + mockCache.EXPECT().LockMessage(gomock.Any(), msg, cache.LOCK_ACK).Return(false, fmt.Errorf("error")) mockAuditor := mocks.NewMockAuditor(mockCtrl) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - result, err := q.Ack(ctx, &entities.Message{ - ID: "id", - Queue: "queue", - LastUsage: &now, - LockMs: 10, - }, now, "reason") + result, err := q.Ack(ctx, &message.Message{ + ID: "id", + Queue: "queue", + LockMs: 10, + }, "reason") require.Error(t, err) require.False(t, result) @@ -1124,7 +1152,7 @@ func TestNackWithLockShouldLock(t *testing.T) { now := time.Now() - message := &entities.Message{ + msg := &message.Message{ ID: "id", Queue: "queue", LockMs: 10, @@ -1133,14 +1161,14 @@ func TestNackWithLockShouldLock(t *testing.T) { mockStorage := mocks.NewMockStorage(mockCtrl) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().LockMessage(gomock.Any(), message, cache.LOCK_NACK).Return(true, nil) + mockCache.EXPECT().LockMessage(gomock.Any(), msg, cache.LOCK_NACK).Return(true, nil) mockAuditor := mocks.NewMockAuditor(mockCtrl) mockAuditor.EXPECT().Store(gomock.Any(), audit.Entry{ - ID: message.ID, - Queue: message.Queue, - LastScoreSubtract: message.LastScoreSubtract, - Breakpoint: message.Breakpoint, + ID: msg.ID, + Queue: msg.Queue, + LastScoreSubtract: msg.LastScoreSubtract, + Breakpoint: msg.Breakpoint, Signal: audit.NACK, Reason: "reason", LockMs: 10, @@ -1148,7 +1176,7 @@ func TestNackWithLockShouldLock(t *testing.T) { q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - result, err := q.Nack(ctx, &entities.Message{ + result, err := q.Nack(ctx, &message.Message{ ID: "id", Queue: "queue", LockMs: 10, @@ -1166,7 +1194,7 @@ func TestNackWithLockErrorShouldResultError(t *testing.T) { now := time.Now() - message := &entities.Message{ + msg := &message.Message{ ID: "id", Queue: "queue", LockMs: 10, @@ -1175,13 +1203,13 @@ func TestNackWithLockErrorShouldResultError(t *testing.T) { mockStorage := mocks.NewMockStorage(mockCtrl) mockCache := mocks.NewMockCache(mockCtrl) - mockCache.EXPECT().LockMessage(gomock.Any(), message, cache.LOCK_NACK).Return(false, fmt.Errorf("error")) + mockCache.EXPECT().LockMessage(gomock.Any(), msg, cache.LOCK_NACK).Return(false, fmt.Errorf("error")) mockAuditor := mocks.NewMockAuditor(mockCtrl) q := NewQueue(mockAuditor, mockStorage, nil, mockCache) - result, err := q.Nack(ctx, &entities.Message{ + result, err := q.Nack(ctx, &message.Message{ ID: "id", Queue: "queue", LockMs: 10, diff --git a/internal/queue/score/score.go b/internal/queue/score/score.go new file mode 100644 index 0000000..18f1242 --- /dev/null +++ b/internal/queue/score/score.go @@ -0,0 +1,98 @@ +package score + +import ( + "time" + + "github.com/takenet/deckard/internal/dtime" +) + +// TODO: keeping these as variables to get their reference and use less memory. Is this really necessary? +// TODO: benchmark this +var Max float64 = 9007199254740992 +var Min float64 = 0 +var Undefined float64 = -1 + +// GetPullMaxScore returns the upper threshold priority filter for a pull request. +// If the score is zero, it returns nil. +// If the score is greater than Max, it returns Max. +// If the score is less than Min, it returns Min. +// Otherwise, it returns the score. +func GetPullMaxScore(score float64) *float64 { + if score == 0 { + return nil + } + + if score > Max { + return &Max + } + + if score < Min { + return &Min + } + + return &score +} + +// GetPullMinScore returns the lower threshold priority filter for a pull request. +// If the score is zero, it returns nil. +// If the score is less than or equal to Min, it returns Min. +// If the score is greater than Max, it returns Max. +// Otherwise, it returns the score. +func GetPullMinScore(score float64) *float64 { + if score == 0 { + return nil + } + + if score > Max { + return &Max + } + + if score <= Min { + return &Min + } + + return &score +} + +// Results in the score to be used when a new message is added to the queue +// +// requestScore: the requested score for the message +// +// If the requestScore is 0, the value will be set with the current timestamp in milliseconds. +// +// The maximum score accepted by Deckard is 9007199254740992 and the minimum is 0, so the requestScore will be capped to these values. +// +// Negative scores are not allowed and will be converted to 0 +func GetAddScore(addRequestScore float64) float64 { + if addRequestScore == 0 { + return GetScoreByDefaultAlgorithm() + } + + if addRequestScore < Min { + return Min + } + + if addRequestScore > Max { + return Max + } + + return addRequestScore +} + +// GetScoreByDefaultAlgorithm returns the score using the default algorithm. +// The default score is the current timestamp in milliseconds. +func GetScoreByDefaultAlgorithm() float64 { + return float64(dtime.NowMs()) +} + +// GetScoreFromTime returns the score for a given time. +// The score is milliseconds representation of the time. +func GetScoreFromTime(time *time.Time) float64 { + return float64(dtime.TimeToMs(time)) +} + +// IsUndefined returns true if the score is equal to Undefined +// A score is undefined if it is equal to -1 +func IsUndefined(score float64) bool { + return score == Undefined +} diff --git a/internal/queue/score/score_test.go b/internal/queue/score/score_test.go new file mode 100644 index 0000000..d5bc691 --- /dev/null +++ b/internal/queue/score/score_test.go @@ -0,0 +1,176 @@ +package score_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/takenet/deckard/internal/dtime" + "github.com/takenet/deckard/internal/queue/score" +) + +func TestGetPullMaxScore(t *testing.T) { + t.Run("Score is zero should return nil", func(t *testing.T) { + scoreValue := 0.0 + + got := score.GetPullMaxScore(scoreValue) + + require.Nil(t, got, "Unexpected result for score = 0") + }) + + t.Run("Score is greater than Max should return Max", func(t *testing.T) { + scoreValue := score.Max + 10 + + got := score.GetPullMaxScore(scoreValue) + + require.Equal(t, score.Max, *got, "Unexpected result for score > Max") + }) + + t.Run("Score is less than Min should return Min", func(t *testing.T) { + scoreValue := score.Min - 10 + + got := score.GetPullMaxScore(scoreValue) + + require.Equal(t, score.Min, *got, "Unexpected result for score < Min") + }) + + t.Run("Score is within range shuold return score", func(t *testing.T) { + scoreValue := 100.0 + + got := score.GetPullMaxScore(scoreValue) + + require.Equal(t, scoreValue, *got, "Unexpected result for score within range") + }) +} + +func TestGetPullMinScore(t *testing.T) { + t.Run("Score is zero shuold return nil", func(t *testing.T) { + scoreValue := 0.0 + + got := score.GetPullMinScore(scoreValue) + + require.Nil(t, got, "Unexpected result for score = 0") + }) + + t.Run("Score is greater than Max should return Max", func(t *testing.T) { + scoreValue := score.Max + 10 + + got := score.GetPullMinScore(scoreValue) + + require.Equal(t, score.Max, *got, "Unexpected result for score > Max") + }) + + t.Run("Score is lower than Min should return Min", func(t *testing.T) { + scoreValue := score.Min - 10 + + got := score.GetPullMinScore(scoreValue) + + require.Equal(t, score.Min, *got, "Unexpected result for score > Max") + }) + + t.Run("Score is equal to zero should return nil", func(t *testing.T) { + got := score.GetPullMinScore(0) + + require.Nil(t, got, "Unexpected result for score <= Min") + }) + + t.Run("Score is within range should return score", func(t *testing.T) { + scoreValue := 100.0 + + got := score.GetPullMinScore(scoreValue) + + require.Equal(t, scoreValue, *got, "Unexpected result for score within range") + }) +} + +func TestGetAddScore(t *testing.T) { + t.Run("Request score is zero should return default score", func(t *testing.T) { + scoreValue := 0.0 + + got := score.GetAddScore(scoreValue) + + want := score.GetScoreByDefaultAlgorithm() + + require.Equal(t, want, got, "Unexpected result for request score = 0") + }) + + t.Run("Request score is less than Min should return Min", func(t *testing.T) { + scoreValue := -10.0 + + got := score.GetAddScore(scoreValue) + + require.Equal(t, score.Min, got, "Unexpected result for request score < Min") + }) + + t.Run("Request score is greater than Max should return Max", func(t *testing.T) { + scoreValue := score.Max + 10 + + got := score.GetAddScore(scoreValue) + + require.Equal(t, score.Max, got, "Unexpected result for request score > Max") + }) + + t.Run("Request score is within range should return request score", func(t *testing.T) { + scoreValue := 100.0 + + got := score.GetAddScore(scoreValue) + + require.Equal(t, scoreValue, got, "Unexpected result for request score within range") + }) +} + +func TestGetScoreByDefaultAlgorithm(t *testing.T) { + t.Run("Verify score is not zero", func(t *testing.T) { + got := score.GetScoreByDefaultAlgorithm() + + require.NotEqual(t, 0.0, got, "Unexpected score value") + }) + + t.Run("Verify score is whitin times", func(t *testing.T) { + before := time.Now().Add(-time.Second) + got := score.GetScoreByDefaultAlgorithm() + after := time.Now().Add(time.Second) + + require.True(t, float64(dtime.TimeToMs(&before)) < got && float64(dtime.TimeToMs(&after)) > got, "Unexpected score value") + }) +} + +func TestGetScoreFromTime(t *testing.T) { + t.Run("Verify score for 0 Unix epoch should be zero", func(t *testing.T) { + unixEpoch := time.Unix(0, 0) + + got := score.GetScoreFromTime(&unixEpoch) + + require.Equal(t, 0.0, got, "Unexpected score value for Unix epoch") + }) + + t.Run("Verify score for defined Unix epoch should be milliseconds representation", func(t *testing.T) { + unixEpoch := time.UnixMilli(432141234) + + got := score.GetScoreFromTime(&unixEpoch) + + require.Equal(t, float64(432141234), got, "Unexpected score value for Unix epoch") + }) + + t.Run("Verify score for current time should not be zero", func(t *testing.T) { + now := time.Now() + + got := score.GetScoreFromTime(&now) + + require.NotEqual(t, 0.0, got, "Unexpected score value for current time") + }) +} + +func TestIsUndefined(t *testing.T) { + t.Run("Score is undefined should return true", func(t *testing.T) { + got := score.IsUndefined(score.Undefined) + + require.True(t, got, "Unexpected result for undefined score") + }) + + t.Run("Score is defined should return false", func(t *testing.T) { + got := score.IsUndefined(100) + + require.False(t, got, "Unexpected result for defined score") + }) +} diff --git a/internal/queue/storage/memory_storage.go b/internal/queue/storage/memory_storage.go index 748819f..1372bba 100644 --- a/internal/queue/storage/memory_storage.go +++ b/internal/queue/storage/memory_storage.go @@ -9,15 +9,17 @@ import ( "sync" "time" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/dtime" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" "github.com/takenet/deckard/internal/queue/utils" ) // MemoryStorage is an implementation of the Storage Interface using memory. // Currently only insert and pull functions are implemented. type MemoryStorage struct { - docs map[string]*entities.Message - configurations map[string]*entities.QueueConfiguration + docs map[string]*message.Message + configurations map[string]*configuration.QueueConfiguration lock *sync.RWMutex internalCounter int64 } @@ -26,8 +28,8 @@ var _ Storage = &MemoryStorage{} func NewMemoryStorage(ctx context.Context) *MemoryStorage { storage := &MemoryStorage{ - docs: make(map[string]*entities.Message), - configurations: make(map[string]*entities.QueueConfiguration), + docs: make(map[string]*message.Message), + configurations: make(map[string]*configuration.QueueConfiguration), lock: &sync.RWMutex{}, internalCounter: int64(0), @@ -36,8 +38,8 @@ func NewMemoryStorage(ctx context.Context) *MemoryStorage { return storage } -func (storage *MemoryStorage) ListQueueConfigurations(ctx context.Context) ([]*entities.QueueConfiguration, error) { - configurations := make([]*entities.QueueConfiguration, len(storage.configurations)) +func (storage *MemoryStorage) ListQueueConfigurations(ctx context.Context) ([]*configuration.QueueConfiguration, error) { + configurations := make([]*configuration.QueueConfiguration, len(storage.configurations)) configurationIndex := 0 for i := range storage.configurations { @@ -49,7 +51,7 @@ func (storage *MemoryStorage) ListQueueConfigurations(ctx context.Context) ([]*e return configurations, nil } -func (storage *MemoryStorage) EditQueueConfiguration(_ context.Context, configuration *entities.QueueConfiguration) error { +func (storage *MemoryStorage) EditQueueConfiguration(_ context.Context, configuration *configuration.QueueConfiguration) error { if configuration.MaxElements == 0 { return nil } @@ -63,7 +65,7 @@ func (storage *MemoryStorage) EditQueueConfiguration(_ context.Context, configur return nil } -func (storage *MemoryStorage) GetQueueConfiguration(_ context.Context, queue string) (*entities.QueueConfiguration, error) { +func (storage *MemoryStorage) GetQueueConfiguration(_ context.Context, queue string) (*configuration.QueueConfiguration, error) { return storage.configurations[queue], nil } @@ -72,15 +74,15 @@ func (storage *MemoryStorage) Flush(_ context.Context) (deletedCount int64, err count := int64(len(storage.docs)) count += int64(len(storage.configurations)) - storage.docs = make(map[string]*entities.Message) - storage.configurations = make(map[string]*entities.QueueConfiguration) + storage.docs = make(map[string]*message.Message) + storage.configurations = make(map[string]*configuration.QueueConfiguration) storage.lock.Unlock() return count, nil } -func (storage *MemoryStorage) Insert(_ context.Context, messages ...*entities.Message) (int64, int64, error) { +func (storage *MemoryStorage) Insert(_ context.Context, messages ...*message.Message) (int64, int64, error) { inserted := int64(0) modified := int64(0) @@ -113,7 +115,7 @@ func (storage *MemoryStorage) Insert(_ context.Context, messages ...*entities.Me storage.internalCounter += 1 messages[i].InternalId = storage.internalCounter - now := time.Now() + now := dtime.Now() messages[i].LastUsage = &now storage.docs[key] = messages[i] } @@ -124,7 +126,7 @@ func (storage *MemoryStorage) Insert(_ context.Context, messages ...*entities.Me return inserted, modified, nil } -func (storage *MemoryStorage) GetStringInternalId(_ context.Context, message *entities.Message) string { +func (storage *MemoryStorage) GetStringInternalId(_ context.Context, message *message.Message) string { if message.InternalId == nil { return "" } @@ -132,7 +134,7 @@ func (storage *MemoryStorage) GetStringInternalId(_ context.Context, message *en return strconv.FormatInt(message.InternalId.(int64), 10) } -func getKey(message *entities.Message) string { +func getKey(message *message.Message) string { return message.Queue + ":" + message.ID } @@ -156,7 +158,7 @@ func (storage *MemoryStorage) Count(_ context.Context, opts *FindOptions) (int64 return count, nil } -func messageMatchesFilter(q *entities.Message, opts *FindOptions) (bool, error) { +func messageMatchesFilter(q *message.Message, opts *FindOptions) (bool, error) { if opts == nil { return true, nil } @@ -166,7 +168,7 @@ func messageMatchesFilter(q *entities.Message, opts *FindOptions) (bool, error) return matchesInternal, err } -func matchesInternalFilter(message *entities.Message, filter *InternalFilter) (bool, error) { +func matchesInternalFilter(message *message.Message, filter *InternalFilter) (bool, error) { if filter == nil { return true, nil } @@ -226,8 +228,8 @@ func matchesInternalFilter(message *entities.Message, filter *InternalFilter) (b return true, nil } -func (storage *MemoryStorage) Find(_ context.Context, opt *FindOptions) ([]entities.Message, error) { - var messages []entities.Message +func (storage *MemoryStorage) Find(_ context.Context, opt *FindOptions) ([]message.Message, error) { + var messages []message.Message storage.lock.RLock() defer storage.lock.RUnlock() @@ -288,8 +290,8 @@ func (storage *MemoryStorage) Find(_ context.Context, opt *FindOptions) ([]entit return messages, nil } -func isExpired(message *entities.Message) bool { - return !message.Timeless && (message.ExpiryDate.Before(time.Now()) || message.ExpiryDate.Equal(time.Now())) +func isExpired(message *message.Message) bool { + return !message.Timeless && (message.ExpiryDate.Before(dtime.Now()) || message.ExpiryDate.Equal(dtime.Now())) } func (storage *MemoryStorage) Remove(_ context.Context, queue string, ids ...string) (deleted int64, err error) { @@ -314,7 +316,7 @@ func (storage *MemoryStorage) Remove(_ context.Context, queue string, ids ...str return count, nil } -func (storage *MemoryStorage) Ack(_ context.Context, message *entities.Message) (modifiedCount int64, err error) { +func (storage *MemoryStorage) Ack(_ context.Context, message *message.Message) (modifiedCount int64, err error) { storage.lock.RLock() value, contains := storage.docs[getKey(message)] storage.lock.RUnlock() diff --git a/internal/queue/storage/memory_storage_test.go b/internal/queue/storage/memory_storage_test.go index 586b765..eeebce2 100644 --- a/internal/queue/storage/memory_storage_test.go +++ b/internal/queue/storage/memory_storage_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/takenet/deckard/internal/config" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/message" ) func TestMemoryStorage(t *testing.T) { @@ -28,7 +28,7 @@ func TestInternalIdIncrement(t *testing.T) { storage := NewMemoryStorage(context.Background()) for i := 1; i < 10; i++ { - message := &entities.Message{ + message := &message.Message{ ID: strconv.Itoa(i), Queue: "q", ExpiryDate: time.Now().Add(10 * time.Hour), diff --git a/internal/queue/storage/mongo_storage.go b/internal/queue/storage/mongo_storage.go index b74bfb2..3c25476 100644 --- a/internal/queue/storage/mongo_storage.go +++ b/internal/queue/storage/mongo_storage.go @@ -11,11 +11,12 @@ import ( "github.com/elliotchance/orderedmap/v2" "github.com/takenet/deckard/internal/config" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/metrics" "github.com/takenet/deckard/internal/project" - "github.com/takenet/deckard/internal/queue/entities" - "github.com/takenet/deckard/internal/queue/utils" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" "go.mongodb.org/mongo-driver/mongo" @@ -41,7 +42,7 @@ func NewMongoStorage(ctx context.Context) (*MongoStorage, error) { logger.S(ctx).Debug("Connecting to ", mongoSecondaryOpts.Hosts, " MongoDB instance(s).") - start := time.Now() + start := dtime.Now() mongoSecondaryOpts.SetReadPreference(readpref.SecondaryPreferred()) clientSecondaryPreference, err := waitForClient(ctx, mongoSecondaryOpts) @@ -157,7 +158,7 @@ func createClient(ctx context.Context, opts *options.ClientOptions) (*mongo.Clie return client, nil } -func (storage *MongoStorage) EditQueueConfiguration(ctx context.Context, configuration *entities.QueueConfiguration) error { +func (storage *MongoStorage) EditQueueConfiguration(ctx context.Context, configuration *configuration.QueueConfiguration) error { set := bson.M{} maxElements := configuration.MaxElements @@ -173,9 +174,9 @@ func (storage *MongoStorage) EditQueueConfiguration(ctx context.Context, configu return nil } - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "edit_configuration")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "edit_configuration")) }() upsert := true @@ -195,10 +196,10 @@ func (storage *MongoStorage) EditQueueConfiguration(ctx context.Context, configu return updateErr } -func (storage *MongoStorage) ListQueueConfigurations(ctx context.Context) ([]*entities.QueueConfiguration, error) { - now := time.Now() +func (storage *MongoStorage) ListQueueConfigurations(ctx context.Context) ([]*configuration.QueueConfiguration, error) { + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "list_configuration")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "list_configuration")) }() cursor, err := storage.queueConfigurationCollection.Find(context.Background(), bson.M{}) @@ -207,7 +208,7 @@ func (storage *MongoStorage) ListQueueConfigurations(ctx context.Context) ([]*en return nil, fmt.Errorf("error finding queue configurations: %w", err) } - configurations := make([]*entities.QueueConfiguration, 0) + configurations := make([]*configuration.QueueConfiguration, 0) cursorErr := cursor.All(context.Background(), &configurations) @@ -218,13 +219,13 @@ func (storage *MongoStorage) ListQueueConfigurations(ctx context.Context) ([]*en return configurations, nil } -func (storage *MongoStorage) GetQueueConfiguration(ctx context.Context, queue string) (*entities.QueueConfiguration, error) { - now := time.Now() +func (storage *MongoStorage) GetQueueConfiguration(ctx context.Context, queue string) (*configuration.QueueConfiguration, error) { + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "find_configuration")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "find_configuration")) }() - var configuration entities.QueueConfiguration + var configuration configuration.QueueConfiguration err := storage.queueConfigurationCollection.FindOne( context.Background(), @@ -245,9 +246,9 @@ func (storage *MongoStorage) GetQueueConfiguration(ctx context.Context, queue st } func (storage *MongoStorage) Flush(ctx context.Context) (int64, error) { - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "flush")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "flush")) }() result, err := storage.messagesCollection.DeleteMany(context.Background(), bson.M{}) @@ -268,9 +269,9 @@ func (storage *MongoStorage) Flush(ctx context.Context) (int64, error) { } func (storage *MongoStorage) Count(ctx context.Context, opt *FindOptions) (int64, error) { - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "count")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "count")) }() mongoFilter, err := getMongoMessage(opt) @@ -291,18 +292,18 @@ func (storage *MongoStorage) Count(ctx context.Context, opt *FindOptions) (int64 } func (storage *MongoStorage) ListQueueNames(ctx context.Context) (queues []string, err error) { - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "list_queue")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "list_queue")) }() return storage.distinct(ctx, "queue") } func (storage *MongoStorage) ListQueuePrefixes(ctx context.Context) (queues []string, err error) { - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "list_queue_prefix")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "list_queue_prefix")) }() return storage.distinct(ctx, "queue_prefix") @@ -330,7 +331,7 @@ func (storage *MongoStorage) distinct(ctx context.Context, field string) (data [ // Find returns a cursor with the specified projection for fetching // all valid messages sorted by its ascending insertion date. -func (storage *MongoStorage) Find(ctx context.Context, opt *FindOptions) ([]entities.Message, error) { +func (storage *MongoStorage) Find(ctx context.Context, opt *FindOptions) ([]message.Message, error) { if opt == nil { opt = &FindOptions{} } @@ -361,9 +362,9 @@ func (storage *MongoStorage) Find(ctx context.Context, opt *FindOptions) ([]enti "sort", findOptions.Sort, "projection", findOptions.Projection) - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "find"), attribute.String("retry", strconv.FormatBool(opt.Retry))) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "find"), attribute.String("retry", strconv.FormatBool(opt.Retry))) }() collection := storage.messagesCollection @@ -376,7 +377,7 @@ func (storage *MongoStorage) Find(ctx context.Context, opt *FindOptions) ([]enti return nil, fmt.Errorf("error finding storage elements: %w", err) } - messages := make([]entities.Message, 0, opt.Limit) + messages := make([]message.Message, 0, opt.Limit) cursorErr := cursor.All(context.Background(), &messages) @@ -402,9 +403,9 @@ func (storage *MongoStorage) Remove(ctx context.Context, queue string, ids ...st logger.S(ctx).Debugw("Storage operation: delete many operation.", "filter", filter) - now := time.Now() + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "remove")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "remove")) }() res, err := storage.messagesCollection.DeleteMany(context.Background(), filter) @@ -415,10 +416,10 @@ func (storage *MongoStorage) Remove(ctx context.Context, queue string, ids ...st return res.DeletedCount, nil } -func (storage *MongoStorage) Insert(ctx context.Context, messages ...*entities.Message) (insertedCount int64, modifiedCount int64, err error) { +func (storage *MongoStorage) Insert(ctx context.Context, messages ...*message.Message) (insertedCount int64, modifiedCount int64, err error) { updates := make([]mongo.WriteModel, 0, len(messages)) - now := time.Now() + now := dtime.Now() upsert := true for _, q := range messages { @@ -474,7 +475,7 @@ func (storage *MongoStorage) Insert(ctx context.Context, messages ...*entities.M } defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "insert")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "insert")) }() res, err := storage.messagesCollection.BulkWrite(context.Background(), updates, options.BulkWrite().SetOrdered(false)) @@ -486,10 +487,10 @@ func (storage *MongoStorage) Insert(ctx context.Context, messages ...*entities.M } // Ack updates the messages on mongostorage with updated status and score. -func (storage *MongoStorage) Ack(ctx context.Context, message *entities.Message) (modifiedCount int64, err error) { - now := time.Now() +func (storage *MongoStorage) Ack(ctx context.Context, message *message.Message) (modifiedCount int64, err error) { + now := dtime.Now() defer func() { - metrics.StorageLatency.Record(ctx, utils.ElapsedTime(now), attribute.String("op", "ack")) + metrics.StorageLatency.Record(ctx, dtime.ElapsedTime(now), attribute.String("op", "ack")) }() filter := bson.M{ @@ -608,7 +609,7 @@ func getMongoMessage(opt *FindOptions) (bson.M, error) { return mongoFilter, nil } -func (storage *MongoStorage) GetStringInternalId(_ context.Context, message *entities.Message) string { +func (storage *MongoStorage) GetStringInternalId(_ context.Context, message *message.Message) string { if message.InternalId == nil { return "" } diff --git a/internal/queue/storage/mongo_storage_test.go b/internal/queue/storage/mongo_storage_test.go index 2531beb..f744984 100644 --- a/internal/queue/storage/mongo_storage_test.go +++ b/internal/queue/storage/mongo_storage_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/takenet/deckard/internal/config" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/message" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/bson/primitive" ) @@ -53,7 +53,7 @@ func TestMongoConnectionWithURIIntegration(t *testing.T) { defer storage.Flush(context.Background()) - insert, updated, err := storage.Insert(context.Background(), &entities.Message{ + insert, updated, err := storage.Insert(context.Background(), &message.Message{ ID: "123", Queue: "queue", }) diff --git a/internal/queue/storage/storage.go b/internal/queue/storage/storage.go index bd858d5..9b70dc8 100644 --- a/internal/queue/storage/storage.go +++ b/internal/queue/storage/storage.go @@ -9,7 +9,8 @@ import ( "github.com/elliotchance/orderedmap/v2" "github.com/takenet/deckard/internal/logger" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" ) type Type string @@ -22,21 +23,21 @@ const ( // Storage is an interface that stores the messages that have to be routed. // It contains all Data of the message and is used as a storage only. type Storage interface { - Insert(ctx context.Context, messages ...*entities.Message) (inserted int64, updated int64, err error) + Insert(ctx context.Context, messages ...*message.Message) (inserted int64, updated int64, err error) - Find(ctx context.Context, opt *FindOptions) ([]entities.Message, error) + Find(ctx context.Context, opt *FindOptions) ([]message.Message, error) Remove(ctx context.Context, queue string, ids ...string) (deleted int64, err error) - Ack(ctx context.Context, message *entities.Message) (modifiedCount int64, err error) + Ack(ctx context.Context, message *message.Message) (modifiedCount int64, err error) ListQueueNames(ctx context.Context) (queues []string, err error) ListQueuePrefixes(ctx context.Context) (queues []string, err error) Count(ctx context.Context, opt *FindOptions) (int64, error) - GetStringInternalId(ctx context.Context, message *entities.Message) string + GetStringInternalId(ctx context.Context, message *message.Message) string - EditQueueConfiguration(ctx context.Context, configuration *entities.QueueConfiguration) error - GetQueueConfiguration(ctx context.Context, queue string) (*entities.QueueConfiguration, error) - ListQueueConfigurations(ctx context.Context) ([]*entities.QueueConfiguration, error) + EditQueueConfiguration(ctx context.Context, configuration *configuration.QueueConfiguration) error + GetQueueConfiguration(ctx context.Context, queue string) (*configuration.QueueConfiguration, error) + ListQueueConfigurations(ctx context.Context) ([]*configuration.QueueConfiguration, error) // Available to cleanup tests Flush(ctx context.Context) (deletedCount int64, err error) diff --git a/internal/queue/storage/storage_suite_test.go b/internal/queue/storage/storage_suite_test.go index aa900dc..b4f5f26 100644 --- a/internal/queue/storage/storage_suite_test.go +++ b/internal/queue/storage/storage_suite_test.go @@ -10,8 +10,9 @@ import ( "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" "github.com/takenet/deckard" - "github.com/takenet/deckard/internal/queue/entities" - "github.com/takenet/deckard/internal/queue/utils" + "github.com/takenet/deckard/internal/dtime" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" "google.golang.org/protobuf/types/known/anypb" "google.golang.org/protobuf/types/known/wrapperspb" ) @@ -39,7 +40,7 @@ func (suite *StorageTestSuite) BeforeTest(_, _ string) { } } -func (suite *StorageTestSuite) insertDataNoError(messages ...*entities.Message) { +func (suite *StorageTestSuite) insertDataNoError(messages ...*message.Message) { i, m, err := suite.storage.Insert(ctx, messages...) require.NoError(suite.T(), err) @@ -48,7 +49,7 @@ func (suite *StorageTestSuite) insertDataNoError(messages ...*entities.Message) } func (suite *StorageTestSuite) TestWithInternalFilterIdsOk() { - messages := []*entities.Message{{ + messages := []*message.Message{{ Queue: "q", ID: "id", ExpiryDate: futureTime(), @@ -78,7 +79,7 @@ func (suite *StorageTestSuite) TestWithInternalFilterIdsOk() { } func (suite *StorageTestSuite) TestWithInternalFilterExpiryDateOk() { - messages := []*entities.Message{{ + messages := []*message.Message{{ Queue: "q", ID: "id", ExpiryDate: futureTime(), @@ -109,7 +110,7 @@ func (suite *StorageTestSuite) TestWithInternalFilterExpiryDateOk() { } func (suite *StorageTestSuite) TestWithInternalFilterQueueOk() { - messages := []*entities.Message{{ + messages := []*message.Message{{ Queue: "q1", ID: "id", ExpiryDate: futureTime(), @@ -143,7 +144,7 @@ func (suite *StorageTestSuite) TestWithInternalFilterQueueOk() { } func (suite *StorageTestSuite) TestFindWithInternalFilterBreakpointOk() { - messages := []*entities.Message{{ + messages := []*message.Message{{ Queue: "q1", ID: "id", ExpiryDate: futureTime(), @@ -186,15 +187,15 @@ func (suite *StorageTestSuite) TestFindWithInternalFilterBreakpointOk() { func (suite *StorageTestSuite) TestInsertTwiceShouldReplaceMessageKeepingFields() { now := time.Now() - message := entities.Message{ + msg := message.Message{ Queue: "q1", ID: "id", ExpiryDate: futureTime(), } - suite.insertDataNoError(&message) + suite.insertDataNoError(&msg) - ackModified, err := suite.storage.Ack(ctx, &entities.Message{ + ackModified, err := suite.storage.Ack(ctx, &message.Message{ Queue: "q1", ID: "id", LastScoreSubtract: 1234.1, @@ -207,7 +208,7 @@ func (suite *StorageTestSuite) TestInsertTwiceShouldReplaceMessageKeepingFields( require.Equal(suite.T(), int64(1), ackModified) newDate := futureTime().Add(time.Hour) - newMessage := entities.Message{ + newMessage := message.Message{ Queue: "q1", ID: "id", ExpiryDate: newDate, @@ -233,20 +234,20 @@ func (suite *StorageTestSuite) TestInsertTwiceShouldReplaceMessageKeepingFields( require.Equal(suite.T(), 1234.1, data.TotalScoreSubtract) require.Equal(suite.T(), 1234.1, data.LastScoreSubtract) require.Equal(suite.T(), 12345.1, data.Score) - require.Equal(suite.T(), utils.MsPrecision(&now).Local(), utils.MsPrecision(data.LastUsage).Local()) + require.Equal(suite.T(), dtime.MsPrecision(&now).Local(), dtime.MsPrecision(data.LastUsage).Local()) require.Equal(suite.T(), "12345", data.Breakpoint) // New Data require.Equal(suite.T(), "newStringData", data.StringPayload) require.Equal(suite.T(), map[string]string{"new": "1234"}, data.Metadata) require.Equal(suite.T(), "newDescription", data.Description) - require.Equal(suite.T(), utils.MsPrecision(&newDate).Local(), utils.MsPrecision(&data.ExpiryDate).Local()) + require.Equal(suite.T(), dtime.MsPrecision(&newDate).Local(), dtime.MsPrecision(&data.ExpiryDate).Local()) } func (suite *StorageTestSuite) TestInsertWithoutQueueShouldError() { - messages := make([]*entities.Message, 10) + messages := make([]*message.Message, 10) for i := range messages { - messages[i] = &entities.Message{ID: "123"} + messages[i] = &message.Message{ID: "123"} } _, _, err := suite.storage.Insert(ctx, messages...) @@ -255,9 +256,9 @@ func (suite *StorageTestSuite) TestInsertWithoutQueueShouldError() { } func (suite *StorageTestSuite) TestInsertWithoutIDShouldError() { - messages := make([]*entities.Message, 10) + messages := make([]*message.Message, 10) for i := range messages { - messages[i] = &entities.Message{Queue: "123"} + messages[i] = &message.Message{Queue: "123"} } _, _, err := suite.storage.Insert(ctx, messages...) @@ -266,17 +267,17 @@ func (suite *StorageTestSuite) TestInsertWithoutIDShouldError() { } func (suite *StorageTestSuite) TestUpdateOk() { - message := entities.Message{ + msg := message.Message{ ID: "Id", Queue: "Queue", ExpiryDate: futureTime(), } - suite.insertDataNoError(&message) + suite.insertDataNoError(&msg) now := time.Now() - newTime := utils.MsToTime(int64(1610300607851)) - firstAckModified, err := suite.storage.Ack(ctx, &entities.Message{ + newTime := dtime.MsToTime(int64(1610300607851)) + firstAckModified, err := suite.storage.Ack(ctx, &message.Message{ ID: "Id", Queue: "Queue", LastScoreSubtract: 123, @@ -287,7 +288,7 @@ func (suite *StorageTestSuite) TestUpdateOk() { require.NoError(suite.T(), err) require.Equal(suite.T(), int64(1), firstAckModified) - secondAckModified, err := suite.storage.Ack(ctx, &entities.Message{ + secondAckModified, err := suite.storage.Ack(ctx, &message.Message{ ID: "Id", Queue: "Queue", LastScoreSubtract: 54325, @@ -303,18 +304,18 @@ func (suite *StorageTestSuite) TestUpdateOk() { require.NoError(suite.T(), err) require.Len(suite.T(), messages, 1) - msTime := utils.MsPrecision(&newTime).Local() + msTime := dtime.MsPrecision(&newTime).Local() - require.Equal(suite.T(), msTime, utils.MsPrecision(messages[0].LastUsage).Local()) + require.Equal(suite.T(), msTime, dtime.MsPrecision(messages[0].LastUsage).Local()) messages[0].LastUsage = nil - message.LastUsage = nil + msg.LastUsage = nil - messages[0].ExpiryDate = utils.MsPrecision(&messages[0].ExpiryDate).Local() + messages[0].ExpiryDate = dtime.MsPrecision(&messages[0].ExpiryDate).Local() - require.Equal(suite.T(), entities.Message{ + require.Equal(suite.T(), message.Message{ ID: "Id", Queue: "Queue", - ExpiryDate: utils.MsPrecision(&message.ExpiryDate).Local(), + ExpiryDate: dtime.MsPrecision(&msg.ExpiryDate).Local(), InternalId: messages[0].InternalId, Breakpoint: "breakpoint2", LastScoreSubtract: 54325, @@ -324,8 +325,8 @@ func (suite *StorageTestSuite) TestUpdateOk() { } func (suite *StorageTestSuite) TestListQueueNamesOk() { - messages := make([]entities.Message, 100) - toInsert := make([]*entities.Message, 100) + messages := make([]message.Message, 100) + toInsert := make([]*message.Message, 100) queues := make([]string, 100) @@ -350,11 +351,11 @@ func (suite *StorageTestSuite) TestListQueueNamesOk() { } func (suite *StorageTestSuite) TestListQueueNamesShouldNotResultDeletedMessageQueue() { - toInsert := make([]*entities.Message, 100) + toInsert := make([]*message.Message, 100) queues := make([]string, 100) for i := 0; i < 100; i++ { - message := entities.Message{ + message := message.Message{ Queue: strconv.Itoa(i), ID: strconv.Itoa(i), } @@ -388,8 +389,8 @@ func (suite *StorageTestSuite) TestListQueueNamesShouldNotResultDeletedMessageQu } func (suite *StorageTestSuite) TestClearOk() { - messages := make([]entities.Message, 100) - toInsert := make([]*entities.Message, 100) + messages := make([]message.Message, 100) + toInsert := make([]*message.Message, 100) for i := range messages { messages[i].Queue = "test" @@ -417,7 +418,7 @@ func (suite *StorageTestSuite) TestClearOk() { } func (suite *StorageTestSuite) TestClearShouldClearQueueConfigurations() { - err := suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{ + err := suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{ Queue: "queue", MaxElements: 1234, }) @@ -435,7 +436,7 @@ func (suite *StorageTestSuite) TestClearShouldClearQueueConfigurations() { } func (suite *StorageTestSuite) TestInsertOneOk() { - message := entities.Message{ + message := message.Message{ ID: "id", Queue: "test", ExpiryDate: time.Now().Add(10 * time.Hour), @@ -479,7 +480,7 @@ func (suite *StorageTestSuite) TestInsertWithPayloadOk() { boolData, _ := anypb.New(wrapperspb.Bool(true)) boolFalseData, _ := anypb.New(wrapperspb.Bool(false)) - message := entities.Message{ + message := message.Message{ ID: "id", Queue: "test", Payload: map[string]*anypb.Any{ @@ -544,10 +545,10 @@ func (suite *StorageTestSuite) TestInsertWithPayloadOk() { } func (suite *StorageTestSuite) TestInsertManyOk() { - messages := make([]*entities.Message, 200) + messages := make([]*message.Message, 200) for i := range messages { - messages[i] = &entities.Message{ + messages[i] = &message.Message{ Queue: "test", ID: strconv.Itoa(i), ExpiryDate: time.Now().Add(time.Duration(i+1) * time.Hour), @@ -572,9 +573,9 @@ func (suite *StorageTestSuite) TestInsertManyOk() { messages[i].InternalId = data[i].InternalId messages[i].LastUsage = data[i].LastUsage - storageTime := utils.MsPrecision(&messages[i].ExpiryDate).Local() + storageTime := dtime.MsPrecision(&messages[i].ExpiryDate).Local() messages[i].ExpiryDate = storageTime - data[i].ExpiryDate = utils.MsPrecision(&data[i].ExpiryDate).Local() + data[i].ExpiryDate = dtime.MsPrecision(&data[i].ExpiryDate).Local() require.Equal(suite.T(), *messages[i], data[i]) } @@ -588,7 +589,7 @@ func (suite *StorageTestSuite) TestGetConfigurationNotExistsShouldReturnNil() { } func (suite *StorageTestSuite) TestEditConfigurationShouldEditConfiguration() { - err := suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{MaxElements: 2, Queue: "queue"}) + err := suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{MaxElements: 2, Queue: "queue"}) require.NoError(suite.T(), err) @@ -599,10 +600,10 @@ func (suite *StorageTestSuite) TestEditConfigurationShouldEditConfiguration() { } func (suite *StorageTestSuite) TestListAllQueueConfigurations() { - err := suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{MaxElements: 2, Queue: "queue"}) + err := suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{MaxElements: 2, Queue: "queue"}) require.NoError(suite.T(), err) - err = suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{MaxElements: 43, Queue: "queue2"}) + err = suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{MaxElements: 43, Queue: "queue2"}) require.NoError(suite.T(), err) queues, err := suite.storage.ListQueueConfigurations(ctx) @@ -611,18 +612,18 @@ func (suite *StorageTestSuite) TestListAllQueueConfigurations() { require.Equal(suite.T(), 2, len(queues)) if queues[0].Queue == "queue" { - require.Equal(suite.T(), entities.QueueConfiguration{MaxElements: 2, Queue: "queue"}, *queues[0]) - require.Equal(suite.T(), entities.QueueConfiguration{MaxElements: 43, Queue: "queue2"}, *queues[1]) + require.Equal(suite.T(), configuration.QueueConfiguration{MaxElements: 2, Queue: "queue"}, *queues[0]) + require.Equal(suite.T(), configuration.QueueConfiguration{MaxElements: 43, Queue: "queue2"}, *queues[1]) } else { - require.Equal(suite.T(), entities.QueueConfiguration{MaxElements: 2, Queue: "queue"}, *queues[1]) - require.Equal(suite.T(), entities.QueueConfiguration{MaxElements: 43, Queue: "queue2"}, *queues[0]) + require.Equal(suite.T(), configuration.QueueConfiguration{MaxElements: 2, Queue: "queue"}, *queues[1]) + require.Equal(suite.T(), configuration.QueueConfiguration{MaxElements: 43, Queue: "queue2"}, *queues[0]) } } func (suite *StorageTestSuite) TestEditConfigurationWithNegativeNumberShouldMakeMaxElementsAsZero() { - err := suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{MaxElements: -1, Queue: "queue"}) + err := suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{MaxElements: -1, Queue: "queue"}) require.NoError(suite.T(), err) @@ -633,11 +634,11 @@ func (suite *StorageTestSuite) TestEditConfigurationWithNegativeNumberShouldMake } func (suite *StorageTestSuite) TestEditConfigurationWithMaxElementsZeroShouldDoNothing() { - err := suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{MaxElements: 2, Queue: "queue"}) + err := suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{MaxElements: 2, Queue: "queue"}) require.NoError(suite.T(), err) - err = suite.storage.EditQueueConfiguration(ctx, &entities.QueueConfiguration{MaxElements: 0, Queue: "queue"}) + err = suite.storage.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{MaxElements: 0, Queue: "queue"}) require.NoError(suite.T(), err) @@ -648,7 +649,7 @@ func (suite *StorageTestSuite) TestEditConfigurationWithMaxElementsZeroShouldDoN } func (suite *StorageTestSuite) TestFindWithNilOptionsOk() { - message := entities.Message{ + message := message.Message{ ID: "id", Queue: "test", ExpiryDate: time.Now().Add(10 * time.Hour), diff --git a/internal/queue/utils/primitive_conversion.go b/internal/queue/utils/primitive_conversion.go index dff3267..3b53f05 100644 --- a/internal/queue/utils/primitive_conversion.go +++ b/internal/queue/utils/primitive_conversion.go @@ -2,12 +2,6 @@ package utils import ( "strconv" - "time" -) - -const ( - millisPerSecond = int64(time.Second / time.Millisecond) - nanosPerMillisecond = int64(time.Millisecond / time.Nanosecond) ) var StrToBool = strconv.ParseBool @@ -19,26 +13,3 @@ func StrToInt64(data string) (int64, error) { func StrToFloat64(data string) (float64, error) { return strconv.ParseFloat(data, 64) } - -func MsPrecision(t *time.Time) time.Time { - return MsToTime(TimeToMs(t)) -} - -func TimeToMs(t *time.Time) int64 { - return t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) -} - -func NowMs() int64 { - t := time.Now() - - return t.UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) -} - -func MsToTime(msInt int64) time.Time { - return time.Unix(msInt/millisPerSecond, (msInt%millisPerSecond)*nanosPerMillisecond) -} - -// Time in millliseconds elapsed since a time -func ElapsedTime(since time.Time) int64 { - return int64(time.Since(since) / time.Millisecond) -} diff --git a/internal/queue/utils/primitive_conversion_test.go b/internal/queue/utils/primitive_conversion_test.go index 4dd91b8..0d7e6cf 100644 --- a/internal/queue/utils/primitive_conversion_test.go +++ b/internal/queue/utils/primitive_conversion_test.go @@ -2,7 +2,6 @@ package utils import ( "testing" - "time" "github.com/stretchr/testify/require" ) @@ -50,33 +49,6 @@ func TestStrToBool(t *testing.T) { } } -func TestElapsedTime(t *testing.T) { - // Create a time object that is 1 second in the past - since := time.Now().Add(-time.Second) - - // Call the ElapsedTime function - got := ElapsedTime(since) - - // Check if the result is within 1 millisecond of 1000 - if got < 999 || got > 1001 { - t.Errorf("ElapsedTime() = %d, expected 1000 +/- 1", got) - } -} - -func TestNowMs(t *testing.T) { - // Get the current time in milliseconds - // This is the exact implementation of NowMS, this test only guarantees that the implementation result doesn't change - now := time.Now().UnixNano() / (int64(time.Millisecond) / int64(time.Nanosecond)) - - // Call the NowMs function - got := NowMs() - - // Check if the result is within 1 millisecond of the current time - if got < now-1 || got > now+1 { - t.Errorf("NowMs() = %d, expected %d +/- 1", got, now) - } -} - func TestStrToFloat64(t *testing.T) { t.Parallel() @@ -133,29 +105,3 @@ func TestStrToInt32TypeError(t *testing.T) { require.Error(t, err) } - -func TestMsPrecision(t *testing.T) { - t.Parallel() - - fixedTime := time.Unix(1610578652, 894654759) - - require.Equal(t, int64(1610578652894654759), fixedTime.UnixNano()) - - msPrecision := MsPrecision(&fixedTime) - - require.Equal(t, int64(1610578652894000000), msPrecision.UnixNano()) -} - -func TestTimeToMs(t *testing.T) { - t.Parallel() - - fixedTime := time.Unix(1610578652, 894654759) - - require.Equal(t, int64(1610578652894), TimeToMs(&fixedTime)) -} - -func TestMsToTime(t *testing.T) { - t.Parallel() - - require.Equal(t, int64(1610578652894000000), MsToTime(int64(1610578652894)).UnixNano()) -} diff --git a/internal/service/deckard_service.go b/internal/service/deckard_service.go index 0150540..f437086 100644 --- a/internal/service/deckard_service.go +++ b/internal/service/deckard_service.go @@ -12,13 +12,17 @@ import ( "github.com/takenet/deckard" "github.com/takenet/deckard/internal/config" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/logger" "github.com/takenet/deckard/internal/queue" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/configuration" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" + "github.com/takenet/deckard/internal/trace" "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" + oteltrace "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -30,7 +34,7 @@ import ( ) type Deckard struct { - pool queue.DeckardQueue + queue queue.DeckardQueue queueConfigurationService queue.QueueConfigurationService memoryInstance bool @@ -44,7 +48,7 @@ var _ deckard.DeckardServer = (*Deckard)(nil) func NewDeckardInstance(qpool queue.DeckardQueue, queueConfigurationService queue.QueueConfigurationService, memoryInstance bool) *Deckard { return &Deckard{ - pool: qpool, + queue: qpool, queueConfigurationService: queueConfigurationService, memoryInstance: memoryInstance, healthServer: health.NewServer(), @@ -223,7 +227,7 @@ func (d *Deckard) EditQueue(ctx context.Context, request *deckard.EditQueueReque }, nil } - err := d.queueConfigurationService.EditQueueConfiguration(ctx, &entities.QueueConfiguration{ + err := d.queueConfigurationService.EditQueueConfiguration(ctx, &configuration.QueueConfiguration{ Queue: request.Queue, MaxElements: request.Configuration.MaxElements, }) @@ -264,10 +268,13 @@ func (d *Deckard) Remove(ctx context.Context, request *deckard.RemoveRequest) (* }, nil } - addTransactionIds(ctx, request.Ids) - addTransactionQueue(ctx, request.Queue) + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Id, request.Ids), + attribute.StringSlice(trace.Queue, []string{request.Queue}), + ) - cacheRemoved, storageRemoved, err := d.pool.Remove(ctx, request.Queue, "REQUEST", request.Ids...) + cacheRemoved, storageRemoved, err := d.queue.Remove(ctx, request.Queue, "REQUEST", request.Ids...) if err != nil { return nil, status.Error(codes.Internal, "error removing messages from pool") @@ -280,9 +287,12 @@ func (d *Deckard) Remove(ctx context.Context, request *deckard.RemoveRequest) (* } func (d *Deckard) Count(ctx context.Context, request *deckard.CountRequest) (*deckard.CountResponse, error) { - addTransactionQueue(ctx, request.Queue) + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Queue, []string{request.Queue}), + ) - result, err := d.pool.Count(ctx, &storage.FindOptions{ + result, err := d.queue.Count(ctx, &storage.FindOptions{ InternalFilter: &storage.InternalFilter{ Queue: request.Queue, }, @@ -298,15 +308,13 @@ func (d *Deckard) Count(ctx context.Context, request *deckard.CountRequest) (*de } func (d *Deckard) Add(ctx context.Context, request *deckard.AddRequest) (*deckard.AddResponse, error) { - messages := make([]*entities.Message, len(request.Messages)) + messages := make([]*message.Message, len(request.Messages)) ids := make([]string, len(request.Messages)) queues := make(map[string]int) for i, m := range request.Messages { - t := time.Now() - - score := entities.GetScore(&t, 0) + t := dtime.Now() if m.TtlMinutes != 0 { t = t.Add(time.Minute * time.Duration(m.TtlMinutes)) @@ -319,11 +327,11 @@ func (d *Deckard) Add(ctx context.Context, request *deckard.AddRequest) (*deckar ids[i] = m.Id queues[m.Queue] = 1 - message := entities.Message{ + msg := message.Message{ ID: m.Id, Queue: m.Queue, Timeless: m.Timeless, - Score: score, + Score: score.GetAddScore(m.Score), Description: m.Description, ExpiryDate: t, Payload: m.Payload, @@ -331,16 +339,14 @@ func (d *Deckard) Add(ctx context.Context, request *deckard.AddRequest) (*deckar Metadata: m.Metadata, } - queuePrefix, queueSuffix := entities.GetQueueParts(m.Queue) + queuePrefix, queueSuffix := message.GetQueueParts(m.Queue) - message.QueuePrefix = queuePrefix - message.QueueSuffix = queueSuffix + msg.QueuePrefix = queuePrefix + msg.QueueSuffix = queueSuffix - messages[i] = &message + messages[i] = &msg } - addTransactionIds(ctx, ids) - queueNames := make([]string, len(queues)) i := 0 for key := range queues { @@ -348,14 +354,18 @@ func (d *Deckard) Add(ctx context.Context, request *deckard.AddRequest) (*deckar i++ } - addTransactionQueues(ctx, queueNames) + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Id, ids), + attribute.StringSlice(trace.Queue, queueNames), + ) - inserted, updated, err := d.pool.AddMessagesToStorage(ctx, messages...) + inserted, updated, err := d.queue.AddMessagesToStorage(ctx, messages...) if err != nil { return nil, status.Error(codes.Internal, "error adding messages") } - cacheInserted, err := d.pool.AddMessagesToCache(ctx, messages...) + cacheInserted, err := d.queue.AddMessagesToCache(ctx, messages...) if err != nil { return nil, status.Error(codes.Internal, "error adding messages") } @@ -369,18 +379,34 @@ func (d *Deckard) Add(ctx context.Context, request *deckard.AddRequest) (*deckar } func (d *Deckard) Pull(ctx context.Context, request *deckard.PullRequest) (*deckard.PullResponse, error) { - addTransactionQueue(ctx, request.Queue) - addTransactionLabel(ctx, "amount", fmt.Sprint(request.Amount)) + amount := int64(request.Amount) + + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Queue, []string{request.Queue}), + attribute.Int64(trace.Amount, amount), + attribute.Float64(trace.MaxScore, request.MaxScore), + attribute.Float64(trace.MinScore, request.MinScore), + attribute.Int64(trace.ScoreFilter, request.ScoreFilter), + ) + + if amount <= 0 { + amount = 1 + } - if request.Amount <= 0 { - request.Amount = 1 + if amount > 1000 { + amount = 1000 } - if request.Amount > 1000 { - request.Amount = 1000 + // Compatibility with old clients using the deprecated ScoreFilter + if request.MaxScore == 0 && request.ScoreFilter > 0 { + request.MaxScore = float64(dtime.NowMs() - request.ScoreFilter) } - messages, err := d.pool.Pull(ctx, request.Queue, int64(request.Amount), request.ScoreFilter) + minScore := score.GetPullMinScore(request.MinScore) + maxScore := score.GetPullMaxScore(request.MaxScore) + + messages, err := d.queue.Pull(ctx, request.Queue, amount, minScore, maxScore) if err != nil { return nil, status.Error(codes.Internal, "error pulling messages") } @@ -421,7 +447,7 @@ func (d *Deckard) Pull(ctx context.Context, request *deckard.PullRequest) (*deck ids[i] = res.Messages[i].Id } - addTransactionIds(ctx, ids) + addSpanAttributes(ctx, attribute.StringSlice(trace.Id, ids)) return &res, nil } @@ -435,10 +461,13 @@ func (d *Deckard) GetById(ctx context.Context, request *deckard.GetByIdRequest) return nil, status.Error(codes.InvalidArgument, "invalid queue") } - addTransactionId(ctx, request.Id) - addTransactionQueue(ctx, request.Queue) + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Id, []string{request.Id}), + attribute.StringSlice(trace.Queue, []string{request.Queue}), + ) - messages, err := d.pool.GetStorageMessages(ctx, &storage.FindOptions{ + messages, err := d.queue.GetStorageMessages(ctx, &storage.FindOptions{ Limit: 1, InternalFilter: &storage.InternalFilter{ Ids: &[]string{request.Id}, @@ -487,18 +516,42 @@ func convertAnyDataToString(anyData map[string]*anypb.Any) map[string]string { } func (d *Deckard) Ack(ctx context.Context, request *deckard.AckRequest) (*deckard.AckResponse, error) { - addTransactionId(ctx, request.Id) - addTransactionQueue(ctx, request.Queue) + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Id, []string{request.Id}), + attribute.StringSlice(trace.Queue, []string{request.Queue}), + attribute.Float64(trace.ScoreSubtract, request.ScoreSubtract), + attribute.Float64(trace.Score, request.Score), + ) + + // Should set the new score only when not locking the message or if score is provided + // On unlock process the score is computed by the default algorithm if the score is not set + newScore := score.Undefined + if request.Score != 0 { + newScore = request.Score + + } else if request.LockMs == 0 { + newScore = score.GetScoreByDefaultAlgorithm() - request.ScoreSubtract + + if newScore < score.Min { + newScore = score.Min + } else if newScore > score.Max { + newScore = score.Max + } + } - message := entities.Message{ + now := dtime.Now() + message := message.Message{ ID: request.Id, Queue: request.Queue, LastScoreSubtract: request.ScoreSubtract, + LastUsage: &now, + Score: newScore, Breakpoint: request.Breakpoint, LockMs: request.LockMs, } - result, err := d.pool.Ack(ctx, &message, time.Now(), request.Reason) + result, err := d.queue.Ack(ctx, &message, request.Reason) response := &deckard.AckResponse{Success: result} @@ -514,18 +567,32 @@ func (d *Deckard) Ack(ctx context.Context, request *deckard.AckRequest) (*deckar } func (d *Deckard) Nack(ctx context.Context, request *deckard.AckRequest) (*deckard.AckResponse, error) { - addTransactionId(ctx, request.Id) - addTransactionQueue(ctx, request.Queue) + addSpanAttributes( + ctx, + attribute.StringSlice(trace.Id, []string{request.Id}), + attribute.StringSlice(trace.Queue, []string{request.Queue}), + ) + + // Should set the new score only when not locking the message or if score is provided + // On unlock process the minimum score will be used if the score is not set + newScore := score.Undefined + if request.Score != 0 { + newScore = request.Score + + } else if request.LockMs == 0 { + newScore = score.Min + } - message := entities.Message{ + message := message.Message{ ID: request.Id, Queue: request.Queue, LastScoreSubtract: request.ScoreSubtract, Breakpoint: request.Breakpoint, LockMs: request.LockMs, + Score: newScore, } - result, err := d.pool.Nack(ctx, &message, time.Now(), request.Reason) + result, err := d.queue.Nack(ctx, &message, dtime.Now(), request.Reason) response := &deckard.AckResponse{Success: result} @@ -554,12 +621,12 @@ func (d *Deckard) removeMessageFromAckNack(ctx context.Context, request *deckard } func (d *Deckard) Flush(ctx context.Context, request *deckard.FlushRequest) (*deckard.FlushResponse, error) { - // Flush is only available for in-memory data layer, to prevent accidental flushes + // Flush is only available for in-memory data layer, to prevent accidental flushes of persistent data if !d.memoryInstance { return &deckard.FlushResponse{Success: false}, nil } - result, err := d.pool.Flush(ctx) + result, err := d.queue.Flush(ctx) response := &deckard.FlushResponse{Success: result} @@ -570,34 +637,16 @@ func (d *Deckard) Flush(ctx context.Context, request *deckard.FlushRequest) (*de return response, nil } -func addTransactionQueue(ctx context.Context, queue string) { - addTransactionLabels(ctx, map[string]string{"queue": queue}) -} - -func addTransactionQueues(ctx context.Context, queue []string) { - addTransactionLabels(ctx, map[string]string{"queue": strings.Join(queue, "")}) -} - -func addTransactionId(ctx context.Context, id string) { - addTransactionLabels(ctx, map[string]string{"id": id}) -} - -func addTransactionIds(ctx context.Context, ids []string) { - addTransactionLabels(ctx, map[string]string{"id": strings.Join(ids, ",")}) -} - -func addTransactionLabel(ctx context.Context, key string, value string) { - addTransactionLabels(ctx, map[string]string{key: value}) -} +func addSpanAttributes(ctx context.Context, attributes ...attribute.KeyValue) { + if len(attributes) == 0 { + return + } -func addTransactionLabels(ctx context.Context, labels map[string]string) { - span := trace.SpanFromContext(ctx) + span := oteltrace.SpanFromContext(ctx) if !span.SpanContext().HasTraceID() { return } - for key := range labels { - span.SetAttributes(attribute.String(key, labels[key])) - } + span.SetAttributes(attributes...) } diff --git a/internal/service/deckard_service_suite_test.go b/internal/service/deckard_service_suite_test.go index 0bb0f6a..ccff006 100644 --- a/internal/service/deckard_service_suite_test.go +++ b/internal/service/deckard_service_suite_test.go @@ -9,7 +9,7 @@ import ( "github.com/takenet/deckard" "github.com/takenet/deckard/internal/queue" "github.com/takenet/deckard/internal/queue/cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" ) @@ -31,7 +31,7 @@ func (suite *DeckardIntegrationTestSuite) BeforeTest(_, _ string) { suite.deckardStorage.Flush(ctx) } -func (suite *DeckardIntegrationTestSuite) TestAddMessageIntegration() { +func (suite *DeckardIntegrationTestSuite) TestAddMessageDefaultScoreIntegration() { start := time.Now() response, err := suite.deckard.Add(ctx, &deckard.AddRequest{ @@ -63,11 +63,8 @@ func (suite *DeckardIntegrationTestSuite) TestAddMessageIntegration() { require.NoError(suite.T(), err) message := result.Messages[0] - score := message.Score - require.GreaterOrEqual(suite.T(), score, entities.GetScore(&start, 0)) - - after := time.Now() - require.LessOrEqual(suite.T(), score, entities.GetScore(&after, 0)) + require.GreaterOrEqual(suite.T(), message.Score, score.GetScoreFromTime(&start)) + require.LessOrEqual(suite.T(), message.Score, score.GetScoreByDefaultAlgorithm()) message.Score = 0 @@ -78,6 +75,48 @@ func (suite *DeckardIntegrationTestSuite) TestAddMessageIntegration() { }, message) } +func (suite *DeckardIntegrationTestSuite) TestAddMessageWithScoreIntegration() { + response, err := suite.deckard.Add(ctx, &deckard.AddRequest{ + Messages: []*deckard.AddMessage{ + { + Id: "123", + Queue: "test", + Score: 100, + Timeless: true, + }, + }, + }) + require.NoError(suite.T(), err) + require.Equal(suite.T(), int64(1), response.CreatedCount) + + // Validate stored message + messages, err := suite.deckardQueue.GetStorageMessages(ctx, &storage.FindOptions{ + InternalFilter: &storage.InternalFilter{ + Queue: "test", + Ids: &[]string{"123"}, + }, + }) + require.NoError(suite.T(), err) + require.Len(suite.T(), messages, 1) + require.Equal(suite.T(), float64(100), messages[0].Score) + + result, err := suite.deckard.Pull(ctx, &deckard.PullRequest{ + Queue: "test", + Amount: 1, + }) + + require.NoError(suite.T(), err) + + message := result.Messages[0] + require.Equal(suite.T(), float64(100), message.Score) + + message.Score = 0 + require.Equal(suite.T(), &deckard.Message{ + Id: "123", + Queue: "test", + }, message) +} + func (suite *DeckardIntegrationTestSuite) TestGetMessageIntegration() { start := time.Now() diff --git a/internal/service/deckard_service_test.go b/internal/service/deckard_service_test.go index 54f6af3..f044f06 100644 --- a/internal/service/deckard_service_test.go +++ b/internal/service/deckard_service_test.go @@ -17,10 +17,12 @@ import ( "github.com/takenet/deckard" "github.com/takenet/deckard/internal/audit" "github.com/takenet/deckard/internal/config" + "github.com/takenet/deckard/internal/dtime" "github.com/takenet/deckard/internal/mocks" "github.com/takenet/deckard/internal/queue" "github.com/takenet/deckard/internal/queue/cache" - "github.com/takenet/deckard/internal/queue/entities" + "github.com/takenet/deckard/internal/queue/message" + "github.com/takenet/deckard/internal/queue/score" "github.com/takenet/deckard/internal/queue/storage" "google.golang.org/grpc" "google.golang.org/grpc/credentials" @@ -303,13 +305,13 @@ func TestGetQueueError(t *testing.T) { ctx, "queue", int64(1000), - int64(34), + nil, + nil, ).Return(nil, errors.New("pool error")) _, err := NewDeckardService(mockQueue, nil).Pull(ctx, &deckard.PullRequest{ - Queue: "queue", - Amount: 1234, - ScoreFilter: 34, + Queue: "queue", + Amount: 1234, }) require.Error(t, err) @@ -324,13 +326,13 @@ func TestGetQueueNoMessages(t *testing.T) { ctx, "queue", int64(1000), - int64(34), + nil, + nil, ).Return(nil, nil) response, err := NewDeckardService(mockQueue, nil).Pull(ctx, &deckard.PullRequest{ - Queue: "queue", - Amount: 1234, - ScoreFilter: 34, + Queue: "queue", + Amount: 1234, }) require.NoError(t, err) @@ -341,16 +343,20 @@ func TestAck(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() + testTime := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + defer dtime.SetNowProviderValues(testTime)() + mockQueue := mocks.NewMockDeckardQueue(mockCtrl) mockQueue.EXPECT().Ack( ctx, - &entities.Message{ + &message.Message{ ID: "1234567", Queue: "queue", LastScoreSubtract: 431, Breakpoint: "54325345", + LastUsage: &testTime, + Score: score.GetScoreFromTime(&testTime) - 431, }, - gomock.AssignableToTypeOf(time.Time{}), "reason_test", ).Return(true, nil) @@ -370,16 +376,21 @@ func TestAckPoolError(t *testing.T) { mockCtrl := gomock.NewController(t) defer mockCtrl.Finish() + testTime := time.Date(2020, 1, 1, 0, 0, 0, 0, time.UTC) + defer dtime.SetNowProviderValues(testTime)() + + // FIXME: mock internal time.Now and add to the message expected time mockQueue := mocks.NewMockDeckardQueue(mockCtrl) mockQueue.EXPECT().Ack( ctx, - &entities.Message{ + &message.Message{ ID: "1234567", Queue: "queue", LastScoreSubtract: 431, + LastUsage: &testTime, Breakpoint: "54325345", + Score: score.GetScoreFromTime(&testTime) - 431, }, - gomock.AssignableToTypeOf(time.Time{}), "reason_test", ).Return(false, errors.New("pool error")) @@ -402,7 +413,7 @@ func TestNack(t *testing.T) { mockQueue := mocks.NewMockDeckardQueue(mockCtrl) mockQueue.EXPECT().Nack( ctx, - &entities.Message{ + &message.Message{ ID: "1234567", Queue: "queue", LastScoreSubtract: 431, @@ -431,7 +442,7 @@ func TestNackPoolError(t *testing.T) { mockQueue := mocks.NewMockDeckardQueue(mockCtrl) mockQueue.EXPECT().Nack( ctx, - &entities.Message{ + &message.Message{ ID: "1234567", Queue: "queue", LastScoreSubtract: 431, @@ -579,7 +590,7 @@ func TestGetMessageById(t *testing.T) { Ids: &[]string{"123"}, Queue: "queue", }, - }).Return([]entities.Message{{ + }).Return([]message.Message{{ ID: "123", Queue: "queue", StringPayload: "test", diff --git a/internal/trace/attributes.go b/internal/trace/attributes.go new file mode 100644 index 0000000..b7e9be7 --- /dev/null +++ b/internal/trace/attributes.go @@ -0,0 +1,12 @@ +package trace + +const Id = "deckard.id" +const Queue = "deckard.queue" +const Amount = "deckard.amount" + +const MaxScore = "deckard.max_score" +const MinScore = "deckard.min_score" +const ScoreFilter = "deckard.score_filter" + +const Score = "deckard.score" +const ScoreSubtract = "deckard.score_subtract" diff --git a/proto/deckard_service.proto b/proto/deckard_service.proto index 9a638ed..b87964e 100644 --- a/proto/deckard_service.proto +++ b/proto/deckard_service.proto @@ -102,14 +102,31 @@ message PullRequest { // Max value is 1000 and the default value is 1 int32 amount = 2; - /* - Number to subtract to the current time to filter the max score to return. - Useful to not return a message just moments after it was last used. + // Prefer using the max_score field instead of this one. + // This field is deprecated and will be removed in the future. + // + // The `score_filter` behaves differently than `max_score` field. + // The `max_score` field is the upper threshold itself, but the `score_filter` will result in an upper score threshold of the current timestamp minus the score_filter value. + // + // Useful only when your queue's score is only based on the current timestamp to not return a message just moments after it was last used. + // It will only return messages with score lower than the current timestamp minus the score_filter value. + // + // For example if your queue's score is only based on the current timestamp, this parameter will be the number of milliseconds a message must be in the queue before being returned. + int64 score_filter = 3 [deprecated=true]; - For example if in your queue the score is only based on the time (always acking with score_subtract as 0), - this parameter will be the number of milliseconds since the message's last usage. - */ - int64 score_filter = 3; + // Sets the upper threshold for the priority score of a message to be returned in the pull request. + // + // Only messages with a priority score equal to or lower than the max_score value will be returned. + // + // The maximum score accepted by Deckard is 9007199254740992, any value higher than this will be capped to the maximum score. + // To set this value to the minimum score accepted by Deckard, use any negative number. + // This parameter will be ignored if set to 0 (default value). + double max_score = 4; + + // Sets the lower threshold for the priority score required for a message to be returned. + // Only messages with a priority score equal to or higher than the min_score value will be returned. + // The minimum score accepted by Deckard is 0 which is also the default value + double min_score = 5; } message PullResponse { @@ -139,12 +156,16 @@ message Message { // This field can be used to store simple string data instead of using the payload field. string string_payload = 5; - // Score is the priority this message currently have in the queue. + // Score represents the priority score the message currently have in the queue. + // The lower the score, the higher the priority. + // The maximum score accepted by Deckard is 9007199254740992 and the minimum is 0 double score = 6; // Breakpoint is a field to be used as an auxiliar field for some specific use cases. + // For example if you need to keep a record of the last result processing a message, or want to iteract with a pagination system. // - // For example if you need to keep a record of the last result processing a message, use this field like iteracting with a pagination system. + // Examples: imagine a message representing a web news portal and you want to navigate through the articles. This field could be used to store the last visited article id. + // Or imagine a message representing a user and you want to iterate through the user's publications pages. This field could be used to store the last page number you visited. string breakpoint = 7; } @@ -206,6 +227,16 @@ message AddMessage { // Description of the message, this should be used as a human readable string to be used in diagnostics. string description = 8; + + // Score represents the priority score the message currently have in the queue. + // The score is used to determine the order of the messages returned in a pull request. + // The lower the score, the higher the priority. + // + // If the score is not set (or set to 0), the score will be set with the current timestamp in milliseconds at the moment of the message creation. + // + // The maximum score accepted by Deckard is 9007199254740992 and the minimum is 0 + // Negative scores will be converted to 0, adding the message with the lowest score (and highest priority) + double score = 12; } message AddResponse { @@ -226,7 +257,7 @@ message EditQueueRequest { string queue = 1; // Configuration to apply to the queue. It will always update the queue with the newer configuration. - // Only available fields will be updated, meaning that previously configured attributes will not be change unless you explicit set it. + // Only available fields will be updated, meaning that previously configured fields will not be change unless you explicit set it. // If you want to change a configuration to its default value, manually set it to its default value following each field documentation. QueueConfiguration configuration = 2; } @@ -240,15 +271,18 @@ message EditQueueResponse { // The queue configuration does not change instantly and can take up to 10 minutes to complete update. message QueueConfiguration { - /* - Number of max elements the queue can have. - - To apply a max elements to a queue, set a value greater than 0. - To remove the max elements from a queue, set the value to -1. - 0 will be always ignored and the queue will not be updated. - - All queues are unlimited by default. - */ + // Number of max elements the queue can have. + // + // To apply a max elements to a queue, set a value greater than 0. + // To remove the max elements from a queue, set the value to -1. + // 0 will be always ignored and the queue will not be updated. + // + // All queues are unlimited by default. + // + // The exclusion policy will be applied to the queue when the max elements is reached: + // + // Messages are excluded ordered by its TTL, where the closest to expire will be excluded first. + // If all messages have the same TTL, the oldest message will be excluded first. int64 max_elements = 1; } @@ -267,7 +301,7 @@ message GetQueueResponse { } /* - Ack /Nack + Ack/Nack */ message AckRequest { @@ -278,33 +312,53 @@ message AckRequest { string queue = 2; // Reason of this result. + // // Useful for audit, mostly on 'nack' signals. string reason = 5; - // The value to subtract the score and increase final message score. - // For example if you want to make this message to have a better score you can add 10000 which will represent 10s of score benefit. + // This field is deprecated and will be removed in the future. If you need to change the message score, use the 'score' field. + // + // The value to subtract the score and increase final message priority. + // For example if you want to make this message to have a higher priority you can set 10000 which will represent 10s of score benefit in the default score algorithm. // If you want to penalize the message you can send a negative number. - // - // IMPORTANT: The message will not be locked by, in the example, 10 seconds. This attribute is used only to increase or decrease the message priority in the priority queue. // - // This attribute is used only for ack requests and can't be used at the same time of 'lock_ms' attribute. - double score_subtract = 3; + // IMPORTANT: The message will not be locked by, in the example, 10 seconds. This field is used only to increase or decrease the message priority in the priority queue. + // + // This field is used only for ack requests (since in nack requests the message will return with the lowest score to the queue). + // It will be ignored if used at the same time of 'score' or 'lock_ms' fields. + double score_subtract = 3 [deprecated = true]; - // Breakpoint to set for this message + // Breakpoint is a field to be used as an auxiliar field for some specific use cases. + // For example if you need to keep a record of the last result processing a message, or want to iteract with a pagination system. + // + // Examples: imagine a message representing a web news portal and you want to navigate through the articles. This field could be used to store the last visited article id. + // Or imagine a message representing a user and you want to iterate through the user's publications pages. This field could be used to store the last page number you visited. string breakpoint = 4; // Time in milliseconds to lock a message before returning it to the queue. - // For nack requests the message will be locked before returning to first position in the priority queue. - // For ack requests the message will be locked before returning to last position in the priority queue. + // For NACK requests the message will be locked before returning to first position in the priority queue. You can change this behavior using the 'score' field. // - // IMPORTANT: The 'score_subtract' attribute will be ignored if this attribute is different than 0. - // - // IMPORTANT: Deckard checks for locked messages in a 1-second delay meaning the lock have a second precision and not milliseconds. - // This field is in milliseconds because all scores and duration units on deckard are expressed in milliseconds. + // For ACK requests the message will be locked before returning to last position in the priority queue. You can change this behavior using the 'score' field. + // + // IMPORTANT: Deckard checks for locked messages in a 1-second precision meaning the lock have a second precision and not milliseconds. + // This field is in milliseconds because all duration units on deckard are expressed in milliseconds and the default score algorithm uses milliseconds as well. int64 lock_ms = 6; // Whether the message should be removed when acked/nacked bool removeMessage = 7; + + // Sets the score of the message when ACKed, to override the default score algorithm. + // + // If used at the same time with the 'lock_ms' attribute, the message will be locked for the specified time and then returned to the queue with the specified score. + // + // For ACK requests, if the score is not provided (or set to 0), the message will return to the queue with the default score algorithm which is the current timestamp in milliseconds. + // + // For NACKs requests, if the score is not provided (or set to 0), the message will return to the queue with the minimum score accepted by Deckard which is 0. + // + // Negative values will be converted to 0, which is how to set the highest priority to a message in a ACK/NACK request. + // + // REMEMBER: the maximum score accepted by Deckard is 9007199254740992 and the minimum is 0, so values outside this range will be capped. + double score = 10; } message AckResponse {