diff --git a/.circleci/config.yml b/.circleci/config.yml
index 083cbd7d080..dddca475fab 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -8,6 +8,7 @@ jobs:
steps:
- checkout
- setup_remote_docker
+ - run: make test
- run: make build
- run: make publish-edge-chart
- store_artifacts:
diff --git a/Gopkg.lock b/Gopkg.lock
index ac94abbc2db..b57ff9c5d77 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -337,6 +337,22 @@
revision = "b3e2804c8535ee0d1b89320afd98474d5b8e9e3b"
version = "v0.19.0"
+[[projects]]
+ digest = "1:de44b2850368432628bec4f956a629e943af9ebe7449c1d80a3a5b6556869bb8"
+ name = "github.com/go-redis/redis"
+ packages = [
+ ".",
+ "internal",
+ "internal/consistenthash",
+ "internal/hashtag",
+ "internal/pool",
+ "internal/proto",
+ "internal/util",
+ ]
+ pruneopts = "NUT"
+ revision = "75795aa4236dc7341eefac3bbe945e68c99ef9df"
+ version = "v6.15.3"
+
[[projects]]
digest = "1:37234906013da82d4c05666262eda5bdec8f736bafa7d4ec1fb3314e965b476f"
name = "github.com/gogo/protobuf"
@@ -1297,8 +1313,10 @@
"github.com/Shopify/sarama",
"github.com/Sirupsen/logrus",
"github.com/aws/aws-sdk-go/aws",
+ "github.com/aws/aws-sdk-go/aws/credentials",
"github.com/aws/aws-sdk-go/aws/session",
"github.com/aws/aws-sdk-go/service/sqs",
+ "github.com/go-redis/redis",
"github.com/golang/glog",
"github.com/kelseyhightower/envconfig",
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd",
diff --git a/README.md b/README.md
index 4fcaecd1317..af89bd24fc0 100644
--- a/README.md
+++ b/README.md
@@ -46,6 +46,11 @@ helm repo update
helm install kedacore/keda-edge --devel --set logLevel=debug --namespace keda --name keda
```
+#### Install keda-edge chart with ARM image
+```cli
+helm install kedacore/keda-edge --devel --set logLevel=debug --namespace keda --name keda --set image.tag=arm
+```
+
### Deploying with the [Azure Functions Core Tools](https://github.com/Azure/azure-functions-core-tools)
```
func kubernetes install --namespace keda
diff --git a/chart/keda/README.md b/chart/keda/README.md
index e3db64c49de..85fdd0bea13 100644
--- a/chart/keda/README.md
+++ b/chart/keda/README.md
@@ -45,6 +45,21 @@ $ helm install kedacore/keda-edge --devel --set logLevel=debug --namespace keda
func kubernetes install --namespace keda
```
+## Configuration
+
+| Parameter | Description | Default |
+|:----------------------------------|:------------------------------------|:---------------------|
+| `image.repository` | Repository which provides the image | `kedacore/keda` |
+| `image.tag` | Tag of image to use | `lastest` |
+| `image.pullPolicy` | Policy to pull image | `Always` |
+| `replicaCount` | Amount of replicas to run | `1` |
+| `customResourceDefinition.create` | Indication to whether or not to create the custom resource definition | `true` |
+| `rbac.create` | Indication to whether or not to use role-based access control | `true` |
+| `serviceAccount.create` | Indication to whether or not to a serivce account should be used | `true` |
+| `serviceAccount.name` | Name of the service account to use | `` |
+| `logLevel` | Granularity of KEDA logs to use which includes scale controller & metric adapter | `info` |
+| `glogLevel` | Granularity of logs to use for metric adapter which is beyond KEDA scope | `2` |
+
## Uninstalling the Chart
To uninstall/delete the `keda` deployment:
diff --git a/examples/redis_scaledobject.yaml b/examples/redis_scaledobject.yaml
new file mode 100644
index 00000000000..a325e9db86e
--- /dev/null
+++ b/examples/redis_scaledobject.yaml
@@ -0,0 +1,17 @@
+apiVersion: keda.k8s.io/v1alpha1
+kind: ScaledObject
+metadata:
+ name: redis-scaledobject
+ namespace: keda-redis-test
+ labels:
+ deploymentName: keda-redis-node
+spec:
+ scaleTargetRef:
+ deploymentName: keda-redis-node
+ triggers:
+ - type: redis
+ metadata:
+ address: REDIS_HOST # Required host:port format
+ password: REDIS_PASSWORD
+ listName: mylist # Required
+ listLength: "5" # Required
diff --git a/pkg/client/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go b/pkg/client/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go
index 621859b73eb..da9d2932b31 100644
--- a/pkg/client/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go
+++ b/pkg/client/clientset/versioned/typed/keda/v1alpha1/fake/fake_scaledobject.go
@@ -139,7 +139,7 @@ func (c *FakeScaledObjects) DeleteCollection(options *v1.DeleteOptions, listOpti
// Patch applies the patch and returns the patched scaledObject.
func (c *FakeScaledObjects) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ScaledObject, err error) {
obj, err := c.Fake.
- Invokes(testing.NewPatchSubresourceAction(scaledobjectsResource, c.ns, name, pt, data, subresources...), &v1alpha1.ScaledObject{})
+ Invokes(testing.NewPatchSubresourceAction(scaledobjectsResource, c.ns, name, data, subresources...), &v1alpha1.ScaledObject{})
if obj == nil {
return nil, err
diff --git a/pkg/handler/scale_handler.go b/pkg/handler/scale_handler.go
index ae90af5c2a5..9d60eb9798b 100644
--- a/pkg/handler/scale_handler.go
+++ b/pkg/handler/scale_handler.go
@@ -28,6 +28,8 @@ type ScaleHandler struct {
kubeClient kubernetes.Interface
externalMetricNames map[string]int
metricNamesLock sync.RWMutex
+ hpasToCreate map[string]bool
+ hpaCreateLock sync.RWMutex
}
const (
@@ -45,6 +47,7 @@ func NewScaleHandler(kedaClient clientset.Interface, kubeClient kubernetes.Inter
kedaClient: kedaClient,
kubeClient: kubeClient,
externalMetricNames: make(map[string]int),
+ hpasToCreate: make(map[string]bool),
}
return handler
@@ -53,7 +56,7 @@ func NewScaleHandler(kedaClient clientset.Interface, kubeClient kubernetes.Inter
// TODO confusing naming switching from isUpdate (controller) -> isDue (here)[]
// WatchScaledObjectWithContext runs a handleScaleLoop go-routine for the scaledObject
func (h *ScaleHandler) WatchScaledObjectWithContext(ctx context.Context, scaledObject *keda_v1alpha1.ScaledObject, isDue bool) {
- h.createOrUpdateHPAForScaledObject(ctx, scaledObject)
+ h.createHPAWithRetry(scaledObject, true)
go h.handleScaleLoop(ctx, scaledObject, isDue)
}
@@ -77,7 +80,6 @@ func (h *ScaleHandler) GetExternalMetricNames() []string {
// GetScaledObjectMetrics is used by the metric adapter in provider.go to get the value for a metric for a scaled object
func (h *ScaleHandler) GetScaledObjectMetrics(namespace string, metricSelector labels.Selector, metricName string) ([]external_metrics.ExternalMetricValue, error) {
// get the scaled objects matching namespace and labels
- log.Debugf("Getting metrics for namespace %s MetricName %s Metric Selector %s", namespace, metricName, metricSelector.String())
scaledObjectQuerier := h.kedaClient.KedaV1alpha1().ScaledObjects(namespace)
scaledObjects, err := scaledObjectQuerier.List(meta_v1.ListOptions{LabelSelector: metricSelector.String()})
if err != nil {
@@ -88,7 +90,11 @@ func (h *ScaleHandler) GetScaledObjectMetrics(namespace string, metricSelector l
scaledObject := &scaledObjects.Items[0]
matchingMetrics := []external_metrics.ExternalMetricValue{}
- scalers, _ := h.getScalers(scaledObject)
+ scalers, _, err := h.getScalers(scaledObject)
+ if err != nil {
+ return nil, fmt.Errorf("Error when getting scalers %s", err)
+ }
+
for _, scaler := range scalers {
metrics, err := scaler.GetMetrics(context.TODO(), metricName, metricSelector)
if err != nil {
@@ -103,6 +109,41 @@ func (h *ScaleHandler) GetScaledObjectMetrics(namespace string, metricSelector l
return matchingMetrics, nil
}
+func (h *ScaleHandler) createHPAWithRetry(scaledObject *keda_v1alpha1.ScaledObject, createUpdateOverride bool) {
+ deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName
+ if deploymentName == "" {
+ log.Errorf("Notified about ScaledObject with missing deployment name: %s", scaledObject.GetName())
+ return
+ }
+ hpaName := fmt.Sprintf("keda-hpa-%s", deploymentName)
+ existsInRetryList := h.doesHPAExistInRetryList(hpaName)
+ if existsInRetryList || createUpdateOverride {
+ err := h.createOrUpdateHPAForScaledObject(scaledObject)
+ if err != nil {
+ log.Errorf("Error creating or updating HPA for scaled object %s: %s", scaledObject.GetName(), err)
+ }
+
+ h.hpaCreateLock.Lock()
+ defer h.hpaCreateLock.Unlock()
+ if err != nil {
+ h.hpasToCreate[hpaName] = true
+ if !existsInRetryList {
+ log.Debugf("createHPAWithRetry ScaledObject %s is added to retry list", scaledObject.GetName())
+ }
+ } else if existsInRetryList {
+ delete(h.hpasToCreate, hpaName)
+ log.Debugf("createHPAWithRetry ScaledObject %s is removed from retry list", scaledObject.GetName())
+ }
+ }
+}
+
+func (h *ScaleHandler) doesHPAExistInRetryList(hpaName string) bool {
+ h.hpaCreateLock.RLock()
+ defer h.hpaCreateLock.RUnlock()
+ _, found := h.hpasToCreate[hpaName]
+ return found
+}
+
func (h *ScaleHandler) deleteHPAForScaledObject(scaledObject *keda_v1alpha1.ScaledObject) {
deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName
if deploymentName == "" {
@@ -111,7 +152,11 @@ func (h *ScaleHandler) deleteHPAForScaledObject(scaledObject *keda_v1alpha1.Scal
}
scaledObjectNamespace := scaledObject.GetNamespace()
- scalers, _ := h.getScalers(scaledObject)
+ scalers, _, err := h.getScalers(scaledObject)
+ if err != nil {
+ log.Errorf("Error when getting scalers %s", err)
+ }
+
for _, scaler := range scalers {
metricSpecs := scaler.GetMetricSpecForScaling()
for _, metricSpec := range metricSpecs {
@@ -122,7 +167,7 @@ func (h *ScaleHandler) deleteHPAForScaledObject(scaledObject *keda_v1alpha1.Scal
hpaName := fmt.Sprintf("keda-hpa-%s", deploymentName)
deleteOptions := &meta_v1.DeleteOptions{}
- err := h.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(scaledObjectNamespace).Delete(hpaName, deleteOptions)
+ err = h.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(scaledObjectNamespace).Delete(hpaName, deleteOptions)
if apierrors.IsNotFound(err) {
log.Warnf("HPA with namespace %s and name %s is not found", scaledObjectNamespace, hpaName)
} else if err != nil {
@@ -130,6 +175,10 @@ func (h *ScaleHandler) deleteHPAForScaledObject(scaledObject *keda_v1alpha1.Scal
} else {
log.Infof("Deleted HPA with namespace %s and name %s", scaledObjectNamespace, hpaName)
}
+
+ h.hpaCreateLock.Lock()
+ defer h.hpaCreateLock.Unlock()
+ delete(h.hpasToCreate, hpaName)
}
func (h *ScaleHandler) addExternalMetricName(metricName string) {
@@ -150,15 +199,17 @@ func (h *ScaleHandler) removeExternalMetricName(metricName string) {
}
}
-func (h *ScaleHandler) createOrUpdateHPAForScaledObject(ctx context.Context, scaledObject *keda_v1alpha1.ScaledObject) {
+func (h *ScaleHandler) createOrUpdateHPAForScaledObject(scaledObject *keda_v1alpha1.ScaledObject) error {
deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName
if deploymentName == "" {
- log.Errorf("Notified about ScaledObject with missing deployment name: %s", scaledObject.GetName())
- return
+ return fmt.Errorf("Notified about ScaledObject with missing deployment name: %s", scaledObject.GetName())
}
var scaledObjectMetricSpecs []v2beta1.MetricSpec
- scalers, _ := h.getScalers(scaledObject)
+ scalers, _, err := h.getScalers(scaledObject)
+ if err != nil {
+ return fmt.Errorf("Error getting scalers %s", err)
+ }
for _, scaler := range scalers {
metricSpecs := scaler.GetMetricSpecForScaling()
@@ -208,29 +259,29 @@ func (h *ScaleHandler) createOrUpdateHPAForScaledObject(ctx context.Context, sca
},
}
- _, err := h.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(scaledObjectNamespace).Create(hpa)
+ _, err = h.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(scaledObjectNamespace).Create(hpa)
if apierrors.IsAlreadyExists(err) {
log.Infof("HPA with namespace %s and name %s already exists.Updating..", scaledObjectNamespace, hpaName)
_, err := h.kubeClient.AutoscalingV2beta1().HorizontalPodAutoscalers(scaledObjectNamespace).Update(hpa)
if err != nil {
- log.Errorf("Error updating HPA with namespace %s and name %s : %s\n", scaledObjectNamespace, hpaName, err)
+ return fmt.Errorf("error updating HPA with namespace %s and name %s : %s", scaledObjectNamespace, hpaName, err)
} else {
log.Infof("Updated HPA with namespace %s and name %s", scaledObjectNamespace, hpaName)
}
} else if err != nil {
- log.Errorf("Error creating HPA with namespace %s and name %s : %s\n", scaledObjectNamespace, hpaName, err)
+ return fmt.Errorf("error creating HPA with namespace %s and name %s : %s", scaledObjectNamespace, hpaName, err)
} else {
log.Infof("Created HPA with namespace %s and name %s", scaledObjectNamespace, hpaName)
}
+
+ return nil
}
// This method blocks forever and checks the scaledObject based on its pollingInterval
// if isDue is set to true, the method will check the scaledObject right away. Otherwise
// it'll wait for pollingInterval then check.
func (h *ScaleHandler) handleScaleLoop(ctx context.Context, scaledObject *keda_v1alpha1.ScaledObject, isDue bool) {
-
h.handleScale(ctx, scaledObject)
-
var pollingInterval time.Duration
if scaledObject.Spec.PollingInterval != nil {
pollingInterval = time.Second * time.Duration(*scaledObject.Spec.PollingInterval)
@@ -251,6 +302,7 @@ func (h *ScaleHandler) handleScaleLoop(ctx context.Context, scaledObject *keda_v
for {
select {
case <-time.After(getPollingInterval()):
+ h.createHPAWithRetry(scaledObject, false)
h.handleScale(ctx, scaledObject)
case <-ctx.Done():
log.Debugf("context for scaledObject (%s/%s) canceled", scaledObject.GetNamespace(), scaledObject.GetName())
@@ -262,10 +314,14 @@ func (h *ScaleHandler) handleScaleLoop(ctx context.Context, scaledObject *keda_v
// handleScale contains the main logic for the ScaleHandler scaling logic.
// It'll check each trigger active status then call scaleDeployment
func (h *ScaleHandler) handleScale(ctx context.Context, scaledObject *keda_v1alpha1.ScaledObject) {
- scalers, deployment := h.getScalers(scaledObject)
+ scalers, deployment, err := h.getScalers(scaledObject)
if deployment == nil {
return
}
+ if err != nil {
+ log.Errorf("Error getting scalers: %s", err)
+ return
+ }
isScaledObjectActive := false
@@ -420,7 +476,7 @@ func (h *ScaleHandler) resolveEnv(deployment *apps_v1.Deployment, containerName
resolved[k] = v
}
} else {
- log.Errorf("Error reading config ref %s on deployment %s/%s: %s", source.ConfigMapRef, deployment.GetNamespace(), deployment.GetName(), err)
+ return nil, fmt.Errorf("error reading config ref %s on deployment %s/%s: %s", source.ConfigMapRef, deployment.GetNamespace(), deployment.GetName(), err)
}
} else if source.SecretRef != nil {
if secretsMap, err := h.resolveSecretMap(source.SecretRef, deployment.GetNamespace()); err == nil {
@@ -428,7 +484,7 @@ func (h *ScaleHandler) resolveEnv(deployment *apps_v1.Deployment, containerName
resolved[k] = v
}
} else {
- log.Errorf("Error reading secret ref %s on deployment %s/%s: %s", source.SecretRef, deployment.GetNamespace(), deployment.GetName(), err)
+ return nil, fmt.Errorf("error reading secret ref %s on deployment %s/%s: %s", source.SecretRef, deployment.GetNamespace(), deployment.GetName(), err)
}
}
}
@@ -447,25 +503,24 @@ func (h *ScaleHandler) resolveEnv(deployment *apps_v1.Deployment, containerName
// env is a secret selector
value, err = h.resolveSecretValue(envVar.ValueFrom.SecretKeyRef, envVar.ValueFrom.SecretKeyRef.Key, deployment.GetNamespace())
if err != nil {
- log.Errorf("Error resolving secret name %s for env %s in deployment %s/%s. Skipping",
+ return nil, fmt.Errorf("error resolving secret name %s for env %s in deployment %s/%s",
envVar.ValueFrom.SecretKeyRef,
envVar.Name,
deployment.GetNamespace(),
deployment.GetName())
- continue
}
} else if envVar.ValueFrom.ConfigMapKeyRef != nil {
// env is a configMap selector
value, err = h.resolveConfigValue(envVar.ValueFrom.ConfigMapKeyRef, envVar.ValueFrom.ConfigMapKeyRef.Key, deployment.GetNamespace())
if err != nil {
- log.Errorf("Error resolving config %s for env %s in deployment %s/%s. Skippking",
+ return nil, fmt.Errorf("error resolving config %s for env %s in deployment %s/%s",
envVar.ValueFrom.ConfigMapKeyRef,
envVar.Name,
deployment.GetName(),
deployment.GetNamespace())
}
} else {
- log.Errorf("Cannot resolve env %s to a value. fieldRef and resourceFieldRef env are skipped. Skipping", envVar.Name)
+ return nil, fmt.Errorf("cannot resolve env %s to a value. fieldRef and resourceFieldRef env are skipped", envVar.Name)
}
}
resolved[envVar.Name] = value
@@ -518,37 +573,33 @@ func (h *ScaleHandler) resolveConfigValue(configKeyRef *core_v1.ConfigMapKeySele
return string(configCollection.Data[keyName]), nil
}
-func (h *ScaleHandler) getScalers(scaledObject *keda_v1alpha1.ScaledObject) ([]scalers.Scaler, *apps_v1.Deployment) {
+func (h *ScaleHandler) getScalers(scaledObject *keda_v1alpha1.ScaledObject) ([]scalers.Scaler, *apps_v1.Deployment, error) {
scalers := []scalers.Scaler{}
deploymentName := scaledObject.Spec.ScaleTargetRef.DeploymentName
if deploymentName == "" {
- log.Errorf("Notified about ScaledObject with missing deployment name: %s", scaledObject.GetName())
- return scalers, nil
+ return scalers, nil, fmt.Errorf("notified about ScaledObject with missing deployment name: %s", scaledObject.GetName())
}
deployment, err := h.kubeClient.AppsV1().Deployments(scaledObject.GetNamespace()).Get(deploymentName, meta_v1.GetOptions{})
if err != nil {
- log.Errorf("Error getting deployment: %s", err)
- return scalers, nil
+ return scalers, nil, fmt.Errorf("error getting deployment: %s", err)
}
resolvedEnv, err := h.resolveEnv(deployment, scaledObject.Spec.ScaleTargetRef.ContainerName)
if err != nil {
- log.Errorf("Error resolving secrets for deployment: %s", err)
- return scalers, nil
+ return scalers, nil, fmt.Errorf("error resolving secrets for deployment: %s", err)
}
for i, trigger := range scaledObject.Spec.Triggers {
scaler, err := h.getScaler(trigger, resolvedEnv)
if err != nil {
- log.Debugf("error for trigger #%d: %s", i, err)
- continue
+ return scalers, nil, fmt.Errorf("error getting scaler for trigger #%d: %s", i, err)
}
scalers = append(scalers, scaler)
}
- return scalers, deployment
+ return scalers, deployment, nil
}
func (h *ScaleHandler) getScaler(trigger keda_v1alpha1.ScaleTriggers, resolvedEnv map[string]string) (scalers.Scaler, error) {
@@ -565,6 +616,8 @@ func (h *ScaleHandler) getScaler(trigger keda_v1alpha1.ScaleTriggers, resolvedEn
return scalers.NewRabbitMQScaler(resolvedEnv, trigger.Metadata)
case "prometheus":
return scalers.NewPrometheusScaler(resolvedEnv, trigger.Metadata)
+ case "redis":
+ return scalers.NewRedisScaler(resolvedEnv, trigger.Metadata)
default:
return nil, fmt.Errorf("no scaler found for type: %s", trigger.Type)
}
diff --git a/pkg/provider/provider.go b/pkg/provider/provider.go
index eaa3662a577..19b2df4b2fe 100644
--- a/pkg/provider/provider.go
+++ b/pkg/provider/provider.go
@@ -50,6 +50,7 @@ func (p *KedaProvider) GetExternalMetric(namespace string, metricSelector labels
log.Debugf("Received request for namespace: %s, metric name: %s, metric selectors: %s", namespace, info.Metric, metricSelector.String())
externalmetrics, err := p.scaleHandler.GetScaledObjectMetrics(namespace, metricSelector, info.Metric)
if err != nil {
+ log.Errorf("Cannot get metrics for Namespace %s MetricSelector %s and Metrics %s. Error: %s", namespace, metricSelector, info.Metric, err)
return nil, err
}
diff --git a/pkg/scalers/rabbitmq_scaler_test.go b/pkg/scalers/rabbitmq_scaler_test.go
new file mode 100644
index 00000000000..4a70ab133f1
--- /dev/null
+++ b/pkg/scalers/rabbitmq_scaler_test.go
@@ -0,0 +1,35 @@
+package scalers
+
+import (
+ "testing"
+)
+
+type parseRabbitMQMetadataTestData struct {
+ metadata map[string]string
+ isError bool
+}
+
+var testRabbitMQMetadata = []parseRabbitMQMetadataTestData{
+ // nothing passed
+ {map[string]string{}, true},
+ // properly formed metadata
+ {map[string]string{"queueLength": "10", "queueName": "sample", "host": "amqp://rabbitmq"}, false},
+ // malformed queueLength
+ {map[string]string{"queueLength": "AA", "queueName": "sample", "host": "amqp://rabbitmq"}, true},
+ // missing host
+ {map[string]string{"queueLength": "AA", "queueName": "sample"}, true},
+ // missing queueName
+ {map[string]string{"queueLength": "10", "host": "amqp://rabbitmq"}, true},
+}
+
+func TestRabbitMQParseMetadata(t *testing.T) {
+ for _, testData := range testRabbitMQMetadata {
+ _, err := parseRabbitMQMetadata(testData.metadata)
+ if err != nil && !testData.isError {
+ t.Error("Expected success but got error", err)
+ }
+ if testData.isError && err == nil {
+ t.Error("Expected error but got success")
+ }
+ }
+}
diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go
new file mode 100644
index 00000000000..5c9d0425b3b
--- /dev/null
+++ b/pkg/scalers/redis_scaler.go
@@ -0,0 +1,144 @@
+package scalers
+
+import (
+ "context"
+ "fmt"
+ "strconv"
+
+ log "github.com/Sirupsen/logrus"
+ "github.com/go-redis/redis"
+ v2beta1 "k8s.io/api/autoscaling/v2beta1"
+ "k8s.io/apimachinery/pkg/api/resource"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/metrics/pkg/apis/external_metrics"
+)
+
+const (
+ listLengthMetricName = "RedisListLength"
+ defaultTargetListLength = 5
+ defaultRedisAddress = "redis-master.default.svc.cluster.local:6379"
+ defaultRedisPassword = ""
+)
+
+type redisScaler struct {
+ metadata *redisMetadata
+}
+
+type redisMetadata struct {
+ targetListLength int
+ listName string
+ address string
+ password string
+}
+
+// NewRedisScaler creates a new redisScaler
+func NewRedisScaler(resolvedEnv, metadata map[string]string) (Scaler, error) {
+ meta, err := parseRedisMetadata(metadata, resolvedEnv)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing redis metadata: %s", err)
+ }
+
+ return &redisScaler{
+ metadata: meta,
+ }, nil
+}
+
+func parseRedisMetadata(metadata, resolvedEnv map[string]string) (*redisMetadata, error) {
+ meta := redisMetadata{}
+ meta.targetListLength = defaultTargetListLength
+
+ if val, ok := metadata["listLength"]; ok {
+ listLength, err := strconv.Atoi(val)
+ if err != nil {
+ return nil, fmt.Errorf("List length parsing error %s", err.Error())
+ } else {
+ meta.targetListLength = listLength
+ }
+ }
+
+ if val, ok := metadata["listName"]; ok {
+ meta.listName = val
+ } else {
+ return nil, fmt.Errorf("no list name given")
+ }
+
+ address := defaultRedisAddress
+ if val, ok := metadata["address"]; ok && val != "" {
+ address = val
+ }
+
+ if val, ok := resolvedEnv[address]; ok {
+ meta.address = val
+ } else {
+ return nil, fmt.Errorf("no address given. Address should be in the format of host:port")
+ }
+
+ meta.password = defaultRedisPassword
+ if val, ok := metadata["password"]; ok && val != "" {
+ if passd, ok := resolvedEnv[val]; ok {
+ meta.password = passd
+ }
+ }
+
+ return &meta, nil
+}
+
+// IsActive checks if there is any element in the Redis list
+func (s *redisScaler) IsActive(ctx context.Context) (bool, error) {
+ length, err := getRedisListLength(
+ ctx, s.metadata.address, s.metadata.password, s.metadata.listName)
+
+ if err != nil {
+ log.Errorf("error %s", err)
+ return false, err
+ }
+
+ return length > 0, nil
+}
+
+func (s *redisScaler) Close() error {
+ return nil
+}
+
+// GetMetricSpecForScaling returns the metric spec for the HPA
+func (s *redisScaler) GetMetricSpecForScaling() []v2beta1.MetricSpec {
+ targetListLengthQty := resource.NewQuantity(int64(s.metadata.targetListLength), resource.DecimalSI)
+ externalMetric := &v2beta1.ExternalMetricSource{MetricName: listLengthMetricName, TargetAverageValue: targetListLengthQty}
+ metricSpec := v2beta1.MetricSpec{External: externalMetric, Type: externalMetricType}
+ return []v2beta1.MetricSpec{metricSpec}
+}
+
+// GetMetrics connects to Redis and finds the length of the list
+func (s *redisScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) {
+ listLen, err := getRedisListLength(ctx, s.metadata.address, s.metadata.password, s.metadata.listName)
+
+ if err != nil {
+ log.Errorf("error getting list length %s", err)
+ return []external_metrics.ExternalMetricValue{}, err
+ }
+
+ metric := external_metrics.ExternalMetricValue{
+ MetricName: metricName,
+ Value: *resource.NewQuantity(listLen, resource.DecimalSI),
+ Timestamp: metav1.Now(),
+ }
+
+ return append([]external_metrics.ExternalMetricValue{}, metric), nil
+}
+
+func getRedisListLength(ctx context.Context, address string, password string, listName string) (int64, error) {
+ client := redis.NewClient(&redis.Options{
+ Addr: address,
+ Password: password,
+ DB: 0,
+ })
+
+ cmd := client.LLen(listName)
+
+ if cmd.Err() != nil {
+ return -1, cmd.Err()
+ }
+
+ return cmd.Result()
+}
diff --git a/pkg/scalers/redis_scaler_test.go b/pkg/scalers/redis_scaler_test.go
new file mode 100644
index 00000000000..cce7f844f74
--- /dev/null
+++ b/pkg/scalers/redis_scaler_test.go
@@ -0,0 +1,40 @@
+package scalers
+
+import (
+ "testing"
+)
+
+var testRedisResolvedEnv = map[string]string{
+ "REDIS_HOST": "none",
+ "REDIS_PASSWORD": "none",
+}
+
+type parseRedisMetadataTestData struct {
+ metadata map[string]string
+ isError bool
+}
+
+var testRedisMetadata = []parseRedisMetadataTestData{
+ // nothing passed
+ {map[string]string{}, true},
+ // properly formed listName
+ {map[string]string{"listName": "mylist", "listLength": "10", "address": "REDIS_HOST", "password": "REDIS_PASSWORD"}, false},
+ // properly formed listName, empty address
+ {map[string]string{"listName": "mylist", "listLength": "10", "address": "", "password": ""}, true},
+ // improperly formed listLength
+ {map[string]string{"listName": "mylist", "listLength": "AA", "address": "REDIS_HOST", "password": ""}, true},
+ // address does not resolve
+ {map[string]string{"listName": "mylist", "listLength": "0", "address": "REDIS_WRONG", "password": ""}, true},
+}
+
+func TestRedisParseMetadata(t *testing.T) {
+ for _, testData := range testRedisMetadata {
+ _, err := parseRedisMetadata(testData.metadata, testRedisResolvedEnv)
+ if err != nil && !testData.isError {
+ t.Error("Expected success but got error", err)
+ }
+ if testData.isError && err == nil {
+ t.Error("Expected error but got success")
+ }
+ }
+}
diff --git a/vendor/github.com/go-redis/redis/LICENSE b/vendor/github.com/go-redis/redis/LICENSE
new file mode 100644
index 00000000000..298bed9beaf
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/cluster.go b/vendor/github.com/go-redis/redis/cluster.go
new file mode 100644
index 00000000000..ab2c76f05e9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/cluster.go
@@ -0,0 +1,1627 @@
+package redis
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "math"
+ "math/rand"
+ "net"
+ "runtime"
+ "sort"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/hashtag"
+ "github.com/go-redis/redis/internal/pool"
+ "github.com/go-redis/redis/internal/proto"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+ // A seed list of host:port addresses of cluster nodes.
+ Addrs []string
+
+ // The maximum number of retries before giving up. Command is retried
+ // on network errors and MOVED/ASK redirects.
+ // Default is 8 retries.
+ MaxRedirects int
+
+ // Enables read-only commands on slave nodes.
+ ReadOnly bool
+ // Allows routing read-only commands to the closest master or slave node.
+ // It automatically enables ReadOnly.
+ RouteByLatency bool
+ // Allows routing read-only commands to the random master or slave node.
+ // It automatically enables ReadOnly.
+ RouteRandomly bool
+
+ // Optional function that returns cluster slots information.
+ // It is useful to manually create cluster of standalone Redis servers
+ // and load-balance read/write operations between master and slaves.
+ // It can use service like ZooKeeper to maintain configuration information
+ // and Cluster.ReloadState to manually trigger state reloading.
+ ClusterSlots func() ([]ClusterSlot, error)
+
+ // Optional hook that is called when a new node is created.
+ OnNewNode func(*Client)
+
+ // Following options are copied from Options struct.
+
+ OnConnect func(*Conn) error
+
+ Password string
+
+ MaxRetries int
+ MinRetryBackoff time.Duration
+ MaxRetryBackoff time.Duration
+
+ DialTimeout time.Duration
+ ReadTimeout time.Duration
+ WriteTimeout time.Duration
+
+ // PoolSize applies per cluster node and not for the whole cluster.
+ PoolSize int
+ MinIdleConns int
+ MaxConnAge time.Duration
+ PoolTimeout time.Duration
+ IdleTimeout time.Duration
+ IdleCheckFrequency time.Duration
+
+ TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+ if opt.MaxRedirects == -1 {
+ opt.MaxRedirects = 0
+ } else if opt.MaxRedirects == 0 {
+ opt.MaxRedirects = 8
+ }
+
+ if (opt.RouteByLatency || opt.RouteRandomly) && opt.ClusterSlots == nil {
+ opt.ReadOnly = true
+ }
+
+ if opt.PoolSize == 0 {
+ opt.PoolSize = 5 * runtime.NumCPU()
+ }
+
+ switch opt.ReadTimeout {
+ case -1:
+ opt.ReadTimeout = 0
+ case 0:
+ opt.ReadTimeout = 3 * time.Second
+ }
+ switch opt.WriteTimeout {
+ case -1:
+ opt.WriteTimeout = 0
+ case 0:
+ opt.WriteTimeout = opt.ReadTimeout
+ }
+
+ switch opt.MinRetryBackoff {
+ case -1:
+ opt.MinRetryBackoff = 0
+ case 0:
+ opt.MinRetryBackoff = 8 * time.Millisecond
+ }
+ switch opt.MaxRetryBackoff {
+ case -1:
+ opt.MaxRetryBackoff = 0
+ case 0:
+ opt.MaxRetryBackoff = 512 * time.Millisecond
+ }
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+ const disableIdleCheck = -1
+
+ return &Options{
+ OnConnect: opt.OnConnect,
+
+ MaxRetries: opt.MaxRetries,
+ MinRetryBackoff: opt.MinRetryBackoff,
+ MaxRetryBackoff: opt.MaxRetryBackoff,
+ Password: opt.Password,
+ readOnly: opt.ReadOnly,
+
+ DialTimeout: opt.DialTimeout,
+ ReadTimeout: opt.ReadTimeout,
+ WriteTimeout: opt.WriteTimeout,
+
+ PoolSize: opt.PoolSize,
+ MinIdleConns: opt.MinIdleConns,
+ MaxConnAge: opt.MaxConnAge,
+ PoolTimeout: opt.PoolTimeout,
+ IdleTimeout: opt.IdleTimeout,
+ IdleCheckFrequency: disableIdleCheck,
+
+ TLSConfig: opt.TLSConfig,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+ Client *Client
+
+ latency uint32 // atomic
+ generation uint32 // atomic
+ loading uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+ opt := clOpt.clientOptions()
+ opt.Addr = addr
+ node := clusterNode{
+ Client: NewClient(opt),
+ }
+
+ node.latency = math.MaxUint32
+ if clOpt.RouteByLatency {
+ go node.updateLatency()
+ }
+
+ if clOpt.OnNewNode != nil {
+ clOpt.OnNewNode(node.Client)
+ }
+
+ return &node
+}
+
+func (n *clusterNode) String() string {
+ return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+ return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+ const probes = 10
+
+ var latency uint32
+ for i := 0; i < probes; i++ {
+ start := time.Now()
+ n.Client.Ping()
+ probe := uint32(time.Since(start) / time.Microsecond)
+ latency = (latency + probe) / 2
+ }
+ atomic.StoreUint32(&n.latency, latency)
+}
+
+func (n *clusterNode) Latency() time.Duration {
+ latency := atomic.LoadUint32(&n.latency)
+ return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsLoading() {
+ atomic.StoreUint32(&n.loading, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Loading() bool {
+ const minute = int64(time.Minute / time.Second)
+
+ loading := atomic.LoadUint32(&n.loading)
+ if loading == 0 {
+ return false
+ }
+ if time.Now().Unix()-int64(loading) < minute {
+ return true
+ }
+ atomic.StoreUint32(&n.loading, 0)
+ return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+ return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+ for {
+ v := atomic.LoadUint32(&n.generation)
+ if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+ break
+ }
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+ opt *ClusterOptions
+
+ mu sync.RWMutex
+ allAddrs []string
+ allNodes map[string]*clusterNode
+ clusterAddrs []string
+ closed bool
+
+ _generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+ return &clusterNodes{
+ opt: opt,
+
+ allAddrs: opt.Addrs,
+ allNodes: make(map[string]*clusterNode),
+ }
+}
+
+func (c *clusterNodes) Close() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil
+ }
+ c.closed = true
+
+ var firstErr error
+ for _, node := range c.allNodes {
+ if err := node.Client.Close(); err != nil && firstErr == nil {
+ firstErr = err
+ }
+ }
+
+ c.allNodes = nil
+ c.clusterAddrs = nil
+
+ return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+ var addrs []string
+ c.mu.RLock()
+ closed := c.closed
+ if !closed {
+ if len(c.clusterAddrs) > 0 {
+ addrs = c.clusterAddrs
+ } else {
+ addrs = c.allAddrs
+ }
+ }
+ c.mu.RUnlock()
+
+ if closed {
+ return nil, pool.ErrClosed
+ }
+ if len(addrs) == 0 {
+ return nil, errClusterNoNodes
+ }
+ return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+ return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+ var collected []*clusterNode
+ c.mu.Lock()
+ for addr, node := range c.allNodes {
+ if node.Generation() >= generation {
+ continue
+ }
+
+ c.clusterAddrs = remove(c.clusterAddrs, addr)
+ delete(c.allNodes, addr)
+ collected = append(collected, node)
+ }
+ c.mu.Unlock()
+
+ for _, node := range collected {
+ _ = node.Client.Close()
+ }
+}
+
+func (c *clusterNodes) Get(addr string) (*clusterNode, error) {
+ var node *clusterNode
+ var err error
+ c.mu.RLock()
+ if c.closed {
+ err = pool.ErrClosed
+ } else {
+ node = c.allNodes[addr]
+ }
+ c.mu.RUnlock()
+ return node, err
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+ node, err := c.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node != nil {
+ return node, nil
+ }
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ node, ok := c.allNodes[addr]
+ if ok {
+ return node, err
+ }
+
+ node = newClusterNode(c.opt, addr)
+
+ c.allAddrs = appendIfNotExists(c.allAddrs, addr)
+ c.clusterAddrs = append(c.clusterAddrs, addr)
+ c.allNodes[addr] = node
+
+ return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+
+ if c.closed {
+ return nil, pool.ErrClosed
+ }
+
+ cp := make([]*clusterNode, 0, len(c.allNodes))
+ for _, node := range c.allNodes {
+ cp = append(cp, node)
+ }
+ return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+ addrs, err := c.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ n := rand.Intn(len(addrs))
+ return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+ start, end int
+ nodes []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+ return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+ return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+ p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+ nodes *clusterNodes
+ Masters []*clusterNode
+ Slaves []*clusterNode
+
+ slots []*clusterSlot
+
+ generation uint32
+ createdAt time.Time
+}
+
+func newClusterState(
+ nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+ c := clusterState{
+ nodes: nodes,
+
+ slots: make([]*clusterSlot, 0, len(slots)),
+
+ generation: nodes.NextGeneration(),
+ createdAt: time.Now(),
+ }
+
+ originHost, _, _ := net.SplitHostPort(origin)
+ isLoopbackOrigin := isLoopback(originHost)
+
+ for _, slot := range slots {
+ var nodes []*clusterNode
+ for i, slotNode := range slot.Nodes {
+ addr := slotNode.Addr
+ if !isLoopbackOrigin {
+ addr = replaceLoopbackHost(addr, originHost)
+ }
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return nil, err
+ }
+
+ node.SetGeneration(c.generation)
+ nodes = append(nodes, node)
+
+ if i == 0 {
+ c.Masters = appendUniqueNode(c.Masters, node)
+ } else {
+ c.Slaves = appendUniqueNode(c.Slaves, node)
+ }
+ }
+
+ c.slots = append(c.slots, &clusterSlot{
+ start: slot.Start,
+ end: slot.End,
+ nodes: nodes,
+ })
+ }
+
+ sort.Sort(clusterSlotSlice(c.slots))
+
+ time.AfterFunc(time.Minute, func() {
+ nodes.GC(c.generation)
+ })
+
+ return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+ nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+ if err != nil {
+ return nodeAddr
+ }
+
+ nodeIP := net.ParseIP(nodeHost)
+ if nodeIP == nil {
+ return nodeAddr
+ }
+
+ if !nodeIP.IsLoopback() {
+ return nodeAddr
+ }
+
+ // Use origin host which is not loopback and node port.
+ return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+ ip := net.ParseIP(host)
+ if ip == nil {
+ return true
+ }
+ return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+ nodes := c.slotNodes(slot)
+ switch len(nodes) {
+ case 0:
+ return c.nodes.Random()
+ case 1:
+ return nodes[0], nil
+ case 2:
+ if slave := nodes[1]; !slave.Loading() {
+ return slave, nil
+ }
+ return nodes[0], nil
+ default:
+ var slave *clusterNode
+ for i := 0; i < 10; i++ {
+ n := rand.Intn(len(nodes)-1) + 1
+ slave = nodes[n]
+ if !slave.Loading() {
+ return slave, nil
+ }
+ }
+
+ // All slaves are loading - use master.
+ return nodes[0], nil
+ }
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+ const threshold = time.Millisecond
+
+ nodes := c.slotNodes(slot)
+ if len(nodes) == 0 {
+ return c.nodes.Random()
+ }
+
+ var node *clusterNode
+ for _, n := range nodes {
+ if n.Loading() {
+ continue
+ }
+ if node == nil || node.Latency()-n.Latency() > threshold {
+ node = n
+ }
+ }
+ return node, nil
+}
+
+func (c *clusterState) slotRandomNode(slot int) *clusterNode {
+ nodes := c.slotNodes(slot)
+ n := rand.Intn(len(nodes))
+ return nodes[n]
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+ i := sort.Search(len(c.slots), func(i int) bool {
+ return c.slots[i].end >= slot
+ })
+ if i >= len(c.slots) {
+ return nil
+ }
+ x := c.slots[i]
+ if slot >= x.start && slot <= x.end {
+ return x.nodes
+ }
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+ load func() (*clusterState, error)
+
+ state atomic.Value
+ reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func() (*clusterState, error)) *clusterStateHolder {
+ return &clusterStateHolder{
+ load: fn,
+ }
+}
+
+func (c *clusterStateHolder) Reload() (*clusterState, error) {
+ state, err := c.load()
+ if err != nil {
+ return nil, err
+ }
+ c.state.Store(state)
+ return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+ if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+ return
+ }
+ go func() {
+ defer atomic.StoreUint32(&c.reloading, 0)
+
+ _, err := c.Reload()
+ if err != nil {
+ return
+ }
+ time.Sleep(100 * time.Millisecond)
+ }()
+}
+
+func (c *clusterStateHolder) Get() (*clusterState, error) {
+ v := c.state.Load()
+ if v != nil {
+ state := v.(*clusterState)
+ if time.Since(state.createdAt) > time.Minute {
+ c.LazyReload()
+ }
+ return state, nil
+ }
+ return c.Reload()
+}
+
+func (c *clusterStateHolder) ReloadOrGet() (*clusterState, error) {
+ state, err := c.Reload()
+ if err == nil {
+ return state, nil
+ }
+ return c.Get()
+}
+
+//------------------------------------------------------------------------------
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+ cmdable
+
+ ctx context.Context
+
+ opt *ClusterOptions
+ nodes *clusterNodes
+ state *clusterStateHolder
+ cmdsInfoCache *cmdsInfoCache
+
+ process func(Cmder) error
+ processPipeline func([]Cmder) error
+ processTxPipeline func([]Cmder) error
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+ opt.init()
+
+ c := &ClusterClient{
+ opt: opt,
+ nodes: newClusterNodes(opt),
+ }
+ c.state = newClusterStateHolder(c.loadState)
+ c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+
+ c.process = c.defaultProcess
+ c.processPipeline = c.defaultProcessPipeline
+ c.processTxPipeline = c.defaultProcessTxPipeline
+
+ c.init()
+ if opt.IdleCheckFrequency > 0 {
+ go c.reaper(opt.IdleCheckFrequency)
+ }
+
+ return c
+}
+
+func (c *ClusterClient) init() {
+ c.cmdable.setProcessor(c.Process)
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState() error {
+ _, err := c.state.Reload()
+ return err
+}
+
+func (c *ClusterClient) Context() context.Context {
+ if c.ctx != nil {
+ return c.ctx
+ }
+ return context.Background()
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+ if ctx == nil {
+ panic("nil context")
+ }
+ c2 := c.clone()
+ c2.ctx = ctx
+ return c2
+}
+
+func (c *ClusterClient) clone() *ClusterClient {
+ cp := *c
+ cp.init()
+ return &cp
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+ return c.opt
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+ return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo() (map[string]*CommandInfo, error) {
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+ for _, addr := range addrs {
+ node, err := c.nodes.Get(addr)
+ if err != nil {
+ return nil, err
+ }
+ if node == nil {
+ continue
+ }
+
+ info, err := node.Client.Command().Result()
+ if err == nil {
+ return info, nil
+ }
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+ cmdsInfo, err := c.cmdsInfoCache.Get()
+ if err != nil {
+ return nil
+ }
+
+ info := cmdsInfo[name]
+ if info == nil {
+ internal.Logf("info for cmd=%s not found", name)
+ }
+ return info
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+ if pos == 0 {
+ return hashtag.RandomSlot()
+ }
+ firstKey := cmd.stringArg(pos)
+ return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+ args := cmd.Args()
+ if args[0] == "cluster" && args[1] == "getkeysinslot" {
+ return args[2].(int)
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func (c *ClusterClient) cmdSlotAndNode(cmd Cmder) (int, *clusterNode, error) {
+ state, err := c.state.Get()
+ if err != nil {
+ return 0, nil, err
+ }
+
+ cmdInfo := c.cmdInfo(cmd.Name())
+ slot := c.cmdSlot(cmd)
+
+ if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
+ if c.opt.RouteByLatency {
+ node, err := state.slotClosestNode(slot)
+ return slot, node, err
+ }
+
+ if c.opt.RouteRandomly {
+ node := state.slotRandomNode(slot)
+ return slot, node, nil
+ }
+
+ node, err := state.slotSlaveNode(slot)
+ return slot, node, err
+ }
+
+ node, err := state.slotMasterNode(slot)
+ return slot, node, err
+}
+
+func (c *ClusterClient) slotMasterNode(slot int) (*clusterNode, error) {
+ state, err := c.state.Get()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := state.slotNodes(slot)
+ if len(nodes) > 0 {
+ return nodes[0], nil
+ }
+ return c.nodes.Random()
+}
+
+func (c *ClusterClient) Watch(fn func(*Tx) error, keys ...string) error {
+ if len(keys) == 0 {
+ return fmt.Errorf("redis: Watch requires at least one key")
+ }
+
+ slot := hashtag.Slot(keys[0])
+ for _, key := range keys[1:] {
+ if hashtag.Slot(key) != slot {
+ err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+ return err
+ }
+ }
+
+ node, err := c.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
+ err = node.Client.Watch(fn, keys...)
+ if err == nil {
+ break
+ }
+ if err != Nil {
+ c.state.LazyReload()
+ }
+
+ moved, ask, addr := internal.IsMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if err == pool.ErrClosed || internal.IsReadOnlyError(err) {
+ node, err = c.slotMasterNode(slot)
+ if err != nil {
+ return err
+ }
+ continue
+ }
+
+ if internal.IsRetryableError(err, true) {
+ continue
+ }
+
+ return err
+ }
+
+ return err
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+ return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(args ...interface{}) *Cmd {
+ cmd := NewCmd(args...)
+ c.Process(cmd)
+ return cmd
+}
+
+func (c *ClusterClient) WrapProcess(
+ fn func(oldProcess func(Cmder) error) func(Cmder) error,
+) {
+ c.process = fn(c.process)
+}
+
+func (c *ClusterClient) Process(cmd Cmder) error {
+ return c.process(cmd)
+}
+
+func (c *ClusterClient) defaultProcess(cmd Cmder) error {
+ var node *clusterNode
+ var ask bool
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
+ if node == nil {
+ var err error
+ _, node, err = c.cmdSlotAndNode(cmd)
+ if err != nil {
+ cmd.setErr(err)
+ break
+ }
+ }
+
+ var err error
+ if ask {
+ pipe := node.Client.Pipeline()
+ _ = pipe.Process(NewCmd("ASKING"))
+ _ = pipe.Process(cmd)
+ _, err = pipe.Exec()
+ _ = pipe.Close()
+ ask = false
+ } else {
+ err = node.Client.Process(cmd)
+ }
+
+ // If there is no error - we are done.
+ if err == nil {
+ break
+ }
+ if err != Nil {
+ c.state.LazyReload()
+ }
+
+ // If slave is loading - pick another node.
+ if c.opt.ReadOnly && internal.IsLoadingError(err) {
+ node.MarkAsLoading()
+ node = nil
+ continue
+ }
+
+ var moved bool
+ var addr string
+ moved, ask, addr = internal.IsMovedError(err)
+ if moved || ask {
+ node, err = c.nodes.GetOrCreate(addr)
+ if err != nil {
+ break
+ }
+ continue
+ }
+
+ if err == pool.ErrClosed || internal.IsReadOnlyError(err) {
+ node = nil
+ continue
+ }
+
+ if internal.IsRetryableError(err, true) {
+ // First retry the same node.
+ if attempt == 0 {
+ continue
+ }
+
+ // Second try random node.
+ node, err = c.nodes.Random()
+ if err != nil {
+ break
+ }
+ continue
+ }
+
+ break
+ }
+
+ return cmd.Err()
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(fn func(client *Client) error) error {
+ state, err := c.state.ReloadOrGet()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, master := range state.Masters {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(master)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(fn func(client *Client) error) error {
+ state, err := c.state.ReloadOrGet()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ for _, slave := range state.Slaves {
+ wg.Add(1)
+ go func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }(slave)
+ }
+ wg.Wait()
+
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// ForEachNode concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachNode(fn func(client *Client) error) error {
+ state, err := c.state.ReloadOrGet()
+ if err != nil {
+ return err
+ }
+
+ var wg sync.WaitGroup
+ errCh := make(chan error, 1)
+ worker := func(node *clusterNode) {
+ defer wg.Done()
+ err := fn(node.Client)
+ if err != nil {
+ select {
+ case errCh <- err:
+ default:
+ }
+ }
+ }
+
+ for _, node := range state.Masters {
+ wg.Add(1)
+ go worker(node)
+ }
+ for _, node := range state.Slaves {
+ wg.Add(1)
+ go worker(node)
+ }
+
+ wg.Wait()
+ select {
+ case err := <-errCh:
+ return err
+ default:
+ return nil
+ }
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+ var acc PoolStats
+
+ state, _ := c.state.Get()
+ if state == nil {
+ return &acc
+ }
+
+ for _, node := range state.Masters {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ for _, node := range state.Slaves {
+ s := node.Client.connPool.Stats()
+ acc.Hits += s.Hits
+ acc.Misses += s.Misses
+ acc.Timeouts += s.Timeouts
+
+ acc.TotalConns += s.TotalConns
+ acc.IdleConns += s.IdleConns
+ acc.StaleConns += s.StaleConns
+ }
+
+ return &acc
+}
+
+func (c *ClusterClient) loadState() (*clusterState, error) {
+ if c.opt.ClusterSlots != nil {
+ slots, err := c.opt.ClusterSlots()
+ if err != nil {
+ return nil, err
+ }
+ return newClusterState(c.nodes, slots, "")
+ }
+
+ addrs, err := c.nodes.Addrs()
+ if err != nil {
+ return nil, err
+ }
+
+ var firstErr error
+ for _, addr := range addrs {
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ slots, err := node.Client.ClusterSlots().Result()
+ if err != nil {
+ if firstErr == nil {
+ firstErr = err
+ }
+ continue
+ }
+
+ return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+ }
+
+ return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+ ticker := time.NewTicker(idleCheckFrequency)
+ defer ticker.Stop()
+
+ for range ticker.C {
+ nodes, err := c.nodes.All()
+ if err != nil {
+ break
+ }
+
+ for _, node := range nodes {
+ _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+ if err != nil {
+ internal.Logf("ReapStaleConns failed: %s", err)
+ }
+ }
+ }
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processPipeline,
+ }
+ pipe.statefulCmdable.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *ClusterClient) Pipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.Pipeline().Pipelined(fn)
+}
+
+func (c *ClusterClient) WrapProcessPipeline(
+ fn func(oldProcess func([]Cmder) error) func([]Cmder) error,
+) {
+ c.processPipeline = fn(c.processPipeline)
+ c.processTxPipeline = fn(c.processTxPipeline)
+}
+
+func (c *ClusterClient) defaultProcessPipeline(cmds []Cmder) error {
+ cmdsMap := newCmdsMap()
+ err := c.mapCmdsByNode(cmds, cmdsMap)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap.m {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ cn, err := node.Client.getConn()
+ if err != nil {
+ if err == pool.ErrClosed {
+ c.mapCmdsByNode(cmds, failedCmds)
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ return
+ }
+
+ err = c.pipelineProcessCmds(node, cn, cmds, failedCmds)
+ node.Client.releaseConnStrict(cn, err)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+type cmdsMap struct {
+ mu sync.Mutex
+ m map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+ return &cmdsMap{
+ m: make(map[*clusterNode][]Cmder),
+ }
+}
+
+func (c *ClusterClient) mapCmdsByNode(cmds []Cmder, cmdsMap *cmdsMap) error {
+ state, err := c.state.Get()
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+
+ cmdsAreReadOnly := c.cmdsAreReadOnly(cmds)
+ for _, cmd := range cmds {
+ var node *clusterNode
+ var err error
+ if cmdsAreReadOnly {
+ _, node, err = c.cmdSlotAndNode(cmd)
+ } else {
+ slot := c.cmdSlot(cmd)
+ node, err = state.slotMasterNode(slot)
+ }
+ if err != nil {
+ return err
+ }
+ cmdsMap.mu.Lock()
+ cmdsMap.m[node] = append(cmdsMap.m[node], cmd)
+ cmdsMap.mu.Unlock()
+ }
+ return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+ for _, cmd := range cmds {
+ cmdInfo := c.cmdInfo(cmd.Name())
+ if cmdInfo == nil || !cmdInfo.ReadOnly {
+ return false
+ }
+ }
+ return true
+}
+
+func (c *ClusterClient) pipelineProcessCmds(
+ node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return writeCmd(wr, cmds...)
+ })
+ if err != nil {
+ setCmdsErr(cmds, err)
+ failedCmds.mu.Lock()
+ failedCmds.m[node] = cmds
+ failedCmds.mu.Unlock()
+ return err
+ }
+
+ err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ return c.pipelineReadCmds(node, rd, cmds, failedCmds)
+ })
+ return err
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+ node *clusterNode, rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ var firstErr error
+ for _, cmd := range cmds {
+ err := cmd.readReply(rd)
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(cmd, err, failedCmds) {
+ continue
+ }
+
+ if internal.IsRedisError(err) {
+ continue
+ }
+
+ failedCmds.mu.Lock()
+ failedCmds.m[node] = append(failedCmds.m[node], cmd)
+ failedCmds.mu.Unlock()
+ if firstErr == nil {
+ firstErr = err
+ }
+ }
+ return firstErr
+}
+
+func (c *ClusterClient) checkMovedErr(
+ cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+ moved, ask, addr := internal.IsMovedError(err)
+
+ if moved {
+ c.state.LazyReload()
+
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return false
+ }
+
+ failedCmds.mu.Lock()
+ failedCmds.m[node] = append(failedCmds.m[node], cmd)
+ failedCmds.mu.Unlock()
+ return true
+ }
+
+ if ask {
+ node, err := c.nodes.GetOrCreate(addr)
+ if err != nil {
+ return false
+ }
+
+ failedCmds.mu.Lock()
+ failedCmds.m[node] = append(failedCmds.m[node], NewCmd("ASKING"), cmd)
+ failedCmds.mu.Unlock()
+ return true
+ }
+
+ return false
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+ pipe := Pipeline{
+ exec: c.processTxPipeline,
+ }
+ pipe.statefulCmdable.setProcessor(pipe.Process)
+ return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(fn func(Pipeliner) error) ([]Cmder, error) {
+ return c.TxPipeline().Pipelined(fn)
+}
+
+func (c *ClusterClient) defaultProcessTxPipeline(cmds []Cmder) error {
+ state, err := c.state.Get()
+ if err != nil {
+ return err
+ }
+
+ cmdsMap := c.mapCmdsBySlot(cmds)
+ for slot, cmds := range cmdsMap {
+ node, err := state.slotMasterNode(slot)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ continue
+ }
+ cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+
+ for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+ if attempt > 0 {
+ time.Sleep(c.retryBackoff(attempt))
+ }
+
+ failedCmds := newCmdsMap()
+ var wg sync.WaitGroup
+
+ for node, cmds := range cmdsMap {
+ wg.Add(1)
+ go func(node *clusterNode, cmds []Cmder) {
+ defer wg.Done()
+
+ cn, err := node.Client.getConn()
+ if err != nil {
+ if err == pool.ErrClosed {
+ c.mapCmdsByNode(cmds, failedCmds)
+ } else {
+ setCmdsErr(cmds, err)
+ }
+ return
+ }
+
+ err = c.txPipelineProcessCmds(node, cn, cmds, failedCmds)
+ node.Client.releaseConnStrict(cn, err)
+ }(node, cmds)
+ }
+
+ wg.Wait()
+ if len(failedCmds.m) == 0 {
+ break
+ }
+ cmdsMap = failedCmds.m
+ }
+ }
+
+ return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+ cmdsMap := make(map[int][]Cmder)
+ for _, cmd := range cmds {
+ slot := c.cmdSlot(cmd)
+ cmdsMap[slot] = append(cmdsMap[slot], cmd)
+ }
+ return cmdsMap
+}
+
+func (c *ClusterClient) txPipelineProcessCmds(
+ node *clusterNode, cn *pool.Conn, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ err := cn.WithWriter(c.opt.WriteTimeout, func(wr *proto.Writer) error {
+ return txPipelineWriteMulti(wr, cmds)
+ })
+ if err != nil {
+ setCmdsErr(cmds, err)
+ failedCmds.mu.Lock()
+ failedCmds.m[node] = cmds
+ failedCmds.mu.Unlock()
+ return err
+ }
+
+ err = cn.WithReader(c.opt.ReadTimeout, func(rd *proto.Reader) error {
+ err := c.txPipelineReadQueued(rd, cmds, failedCmds)
+ if err != nil {
+ setCmdsErr(cmds, err)
+ return err
+ }
+ return pipelineReadCmds(rd, cmds)
+ })
+ return err
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+ rd *proto.Reader, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+ // Parse queued replies.
+ var statusCmd StatusCmd
+ if err := statusCmd.readReply(rd); err != nil {
+ return err
+ }
+
+ for _, cmd := range cmds {
+ err := statusCmd.readReply(rd)
+ if err == nil {
+ continue
+ }
+
+ if c.checkMovedErr(cmd, err, failedCmds) || internal.IsRedisError(err) {
+ continue
+ }
+
+ return err
+ }
+
+ // Parse number of replies.
+ line, err := rd.ReadLine()
+ if err != nil {
+ if err == Nil {
+ err = TxFailedErr
+ }
+ return err
+ }
+
+ switch line[0] {
+ case proto.ErrorReply:
+ err := proto.ParseErrorReply(line)
+ for _, cmd := range cmds {
+ if !c.checkMovedErr(cmd, err, failedCmds) {
+ break
+ }
+ }
+ return err
+ case proto.ArrayReply:
+ // ok
+ default:
+ err := fmt.Errorf("redis: expected '*', but got line %q", line)
+ return err
+ }
+
+ return nil
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+ var node *clusterNode
+ pubsub := &PubSub{
+ opt: c.opt.clientOptions(),
+
+ newConn: func(channels []string) (*pool.Conn, error) {
+ if node != nil {
+ panic("node != nil")
+ }
+
+ var err error
+ if len(channels) > 0 {
+ slot := hashtag.Slot(channels[0])
+ node, err = c.slotMasterNode(slot)
+ } else {
+ node, err = c.nodes.Random()
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ cn, err := node.Client.newConn()
+ if err != nil {
+ node = nil
+
+ return nil, err
+ }
+
+ return cn, nil
+ },
+ closeConn: func(cn *pool.Conn) error {
+ err := node.Client.connPool.CloseConn(cn)
+ node = nil
+ return err
+ },
+ }
+ pubsub.init()
+
+ return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.Subscribe(channels...)
+ }
+ return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(channels ...string) *PubSub {
+ pubsub := c.pubSub()
+ if len(channels) > 0 {
+ _ = pubsub.PSubscribe(channels...)
+ }
+ return pubsub
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+ for _, n := range nodes {
+ if n == node {
+ return nodes
+ }
+ }
+ return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+ for _, e := range es {
+ for _, s := range ss {
+ if s == e {
+ continue loop
+ }
+ }
+ ss = append(ss, e)
+ }
+ return ss
+}
+
+func remove(ss []string, es ...string) []string {
+ if len(es) == 0 {
+ return ss[:0]
+ }
+ for _, e := range es {
+ for i, s := range ss {
+ if s == e {
+ ss = append(ss[:i], ss[i+1:]...)
+ break
+ }
+ }
+ }
+ return ss
+}
diff --git a/vendor/github.com/go-redis/redis/cluster_commands.go b/vendor/github.com/go-redis/redis/cluster_commands.go
new file mode 100644
index 00000000000..dff62c902d0
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/cluster_commands.go
@@ -0,0 +1,22 @@
+package redis
+
+import "sync/atomic"
+
+func (c *ClusterClient) DBSize() *IntCmd {
+ cmd := NewIntCmd("dbsize")
+ var size int64
+ err := c.ForEachMaster(func(master *Client) error {
+ n, err := master.DBSize().Result()
+ if err != nil {
+ return err
+ }
+ atomic.AddInt64(&size, n)
+ return nil
+ })
+ if err != nil {
+ cmd.setErr(err)
+ return cmd
+ }
+ cmd.val = size
+ return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/command.go b/vendor/github.com/go-redis/redis/command.go
new file mode 100644
index 00000000000..dde513be2d1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/command.go
@@ -0,0 +1,1966 @@
+package redis
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+ "github.com/go-redis/redis/internal/proto"
+)
+
+type Cmder interface {
+ Name() string
+ Args() []interface{}
+ stringArg(int) string
+
+ readReply(rd *proto.Reader) error
+ setErr(error)
+
+ readTimeout() *time.Duration
+
+ Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+ for _, cmd := range cmds {
+ if cmd.Err() == nil {
+ cmd.setErr(e)
+ }
+ }
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+ for _, cmd := range cmds {
+ if err := cmd.Err(); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeCmd(wr *proto.Writer, cmds ...Cmder) error {
+ for _, cmd := range cmds {
+ err := wr.WriteArgs(cmd.Args())
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+ var ss []string
+ for _, arg := range cmd.Args() {
+ ss = append(ss, fmt.Sprint(arg))
+ }
+ s := strings.Join(ss, " ")
+ if err := cmd.Err(); err != nil {
+ return s + ": " + err.Error()
+ }
+ if val != nil {
+ switch vv := val.(type) {
+ case []byte:
+ return s + ": " + string(vv)
+ default:
+ return s + ": " + fmt.Sprint(val)
+ }
+ }
+ return s
+
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+ switch cmd.Name() {
+ case "eval", "evalsha":
+ if cmd.stringArg(2) != "0" {
+ return 3
+ }
+
+ return 0
+ case "publish":
+ return 1
+ }
+ if info == nil {
+ return 0
+ }
+ return int(info.FirstKeyPos)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+ _args []interface{}
+ err error
+
+ _readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Err() error {
+ return cmd.err
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+ return cmd._args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+ if pos < 0 || pos >= len(cmd._args) {
+ return ""
+ }
+ s, _ := cmd._args[pos].(string)
+ return s
+}
+
+func (cmd *baseCmd) Name() string {
+ if len(cmd._args) > 0 {
+ // Cmd name must be lower cased.
+ s := internal.ToLower(cmd.stringArg(0))
+ cmd._args[0] = s
+ return s
+ }
+ return ""
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+ return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+ cmd._readTimeout = &d
+}
+
+func (cmd *baseCmd) setErr(e error) {
+ cmd.err = e
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+ baseCmd
+
+ val interface{}
+}
+
+func NewCmd(args ...interface{}) *Cmd {
+ return &Cmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *Cmd) Val() interface{} {
+ return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) String() (string, error) {
+ if cmd.err != nil {
+ return "", cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case string:
+ return val, nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for String", val)
+ return "", err
+ }
+}
+
+func (cmd *Cmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return int(val), nil
+ case string:
+ return strconv.Atoi(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val, nil
+ case string:
+ return strconv.ParseInt(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return uint64(val), nil
+ case string:
+ return strconv.ParseUint(val, 10, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float32(val), nil
+ case string:
+ f, err := strconv.ParseFloat(val, 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return float64(val), nil
+ case string:
+ return strconv.ParseFloat(val, 64)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+ return 0, err
+ }
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+ if cmd.err != nil {
+ return false, cmd.err
+ }
+ switch val := cmd.val.(type) {
+ case int64:
+ return val != 0, nil
+ case string:
+ return strconv.ParseBool(val)
+ default:
+ err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+ return false, err
+ }
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadReply(sliceParser)
+ return cmd.err
+}
+
+// Implements proto.MultiBulkParse
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ vals := make([]interface{}, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(sliceParser)
+ if err != nil {
+ if err == Nil {
+ vals = append(vals, nil)
+ continue
+ }
+ if err, ok := err.(proto.RedisError); ok {
+ vals = append(vals, err)
+ continue
+ }
+ return nil, err
+ }
+
+ switch v := v.(type) {
+ case string:
+ vals = append(vals, v)
+ default:
+ vals = append(vals, v)
+ }
+ }
+ return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+ baseCmd
+
+ val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(args ...interface{}) *SliceCmd {
+ return &SliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+ return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(sliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]interface{})
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(args ...interface{}) *StatusCmd {
+ return &StatusCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StatusCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadString()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+ baseCmd
+
+ val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(args ...interface{}) *IntCmd {
+ return &IntCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *IntCmd) Val() int64 {
+ return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadIntReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+ baseCmd
+
+ val time.Duration
+ precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(precision time.Duration, args ...interface{}) *DurationCmd {
+ return &DurationCmd{
+ baseCmd: baseCmd{_args: args},
+ precision: precision,
+ }
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+ return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+ var n int64
+ n, cmd.err = rd.ReadIntReply()
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = time.Duration(n) * cmd.precision
+ return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+ baseCmd
+
+ val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(args ...interface{}) *TimeCmd {
+ return &TimeCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+ return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(timeParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(time.Time)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func timeParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d elements, expected 2", n)
+ }
+
+ sec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ microsec, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ return time.Unix(sec, microsec*1000), nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+ baseCmd
+
+ val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(args ...interface{}) *BoolCmd {
+ return &BoolCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *BoolCmd) Val() bool {
+ return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadReply(nil)
+ // `SET key value NX` returns nil when key already exists. But
+ // `SETNX key value` returns bool (0/1). So convert nil to bool.
+ // TODO: is this okay?
+ if cmd.err == Nil {
+ cmd.val = false
+ cmd.err = nil
+ return nil
+ }
+ if cmd.err != nil {
+ return cmd.err
+ }
+ switch v := v.(type) {
+ case int64:
+ cmd.val = v == 1
+ return nil
+ case string:
+ cmd.val = v == "OK"
+ return nil
+ default:
+ cmd.err = fmt.Errorf("got %T, wanted int64 or string", v)
+ return cmd.err
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+ baseCmd
+
+ val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(args ...interface{}) *StringCmd {
+ return &StringCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringCmd) Val() string {
+ return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+ return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+ return []byte(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ f, err := strconv.ParseFloat(cmd.Val(), 32)
+ if err != nil {
+ return 0, err
+ }
+ return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+ if cmd.err != nil {
+ return 0, cmd.err
+ }
+ return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+ if cmd.err != nil {
+ return cmd.err
+ }
+ return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadString()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+ baseCmd
+
+ val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(args ...interface{}) *FloatCmd {
+ return &FloatCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *FloatCmd) Val() float64 {
+ return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) error {
+ cmd.val, cmd.err = rd.ReadFloatReply()
+ return cmd.err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+ baseCmd
+
+ val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(args ...interface{}) *StringSliceCmd {
+ return &StringSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+ return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+ return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(stringSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]string)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func stringSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ss := make([]string, 0, n)
+ for i := int64(0); i < n; i++ {
+ switch s, err := rd.ReadString(); {
+ case err == Nil:
+ ss = append(ss, "")
+ case err != nil:
+ return nil, err
+ default:
+ ss = append(ss, s)
+ }
+ }
+ return ss, nil
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+ baseCmd
+
+ val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(args ...interface{}) *BoolSliceCmd {
+ return &BoolSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+ return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(boolSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]bool)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func boolSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ bools := make([]bool, 0, n)
+ for i := int64(0); i < n; i++ {
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ bools = append(bools, n == 1)
+ }
+ return bools, nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+ baseCmd
+
+ val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(args ...interface{}) *StringStringMapCmd {
+ return &StringStringMapCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+ return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(stringStringMapParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]string)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func stringStringMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]string, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+ baseCmd
+
+ val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(args ...interface{}) *StringIntMapCmd {
+ return &StringIntMapCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+ return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(stringIntMapParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]int64)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func stringIntMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]int64, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ n, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = n
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+ baseCmd
+
+ val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(args ...interface{}) *StringStructMapCmd {
+ return &StringStructMapCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+ return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(stringStructMapParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]struct{})
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func stringStructMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]struct{}, n)
+ for i := int64(0); i < n; i++ {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = struct{}{}
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+ ID string
+ Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+ baseCmd
+
+ val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(args ...interface{}) *XMessageSliceCmd {
+ return &XMessageSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+ return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(xMessageSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]XMessage)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func xMessageSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ msgs := make([]XMessage, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+ if err != nil {
+ return nil, err
+ }
+
+ msgs = append(msgs, XMessage{
+ ID: id,
+ Values: v.(map[string]interface{}),
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return msgs, nil
+}
+
+// Implements proto.MultiBulkParse
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]interface{}, n/2)
+ for i := int64(0); i < n; i += 2 {
+ key, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ value, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ m[key] = value
+ }
+ return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+ Stream string
+ Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+ baseCmd
+
+ val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(args ...interface{}) *XStreamSliceCmd {
+ return &XStreamSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+ return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(xStreamSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]XStream)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func xStreamSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ret := make([]XStream, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ stream, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ v, err := rd.ReadArrayReply(xMessageSliceParser)
+ if err != nil {
+ return nil, err
+ }
+
+ ret = append(ret, XStream{
+ Stream: stream,
+ Messages: v.([]XMessage),
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ret, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+ Count int64
+ Lower string
+ Higher string
+ Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+ baseCmd
+ val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(args ...interface{}) *XPendingCmd {
+ return &XPendingCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+ return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+ var info interface{}
+ info, cmd.err = rd.ReadArrayReply(xPendingParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = info.(*XPending)
+ return nil
+}
+
+func xPendingParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ count, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ lower, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ higher, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ pending := &XPending{
+ Count: count,
+ Lower: lower,
+ Higher: higher,
+ }
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ for i := int64(0); i < n; i++ {
+ _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 2 {
+ return nil, fmt.Errorf("got %d, wanted 2", n)
+ }
+
+ consumerName, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumerPending, err := rd.ReadInt()
+ if err != nil {
+ return nil, err
+ }
+
+ if pending.Consumers == nil {
+ pending.Consumers = make(map[string]int64)
+ }
+ pending.Consumers[consumerName] = consumerPending
+
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return nil, nil
+ })
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ return pending, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+ Id string
+ Consumer string
+ Idle time.Duration
+ RetryCount int64
+}
+
+type XPendingExtCmd struct {
+ baseCmd
+ val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(args ...interface{}) *XPendingExtCmd {
+ return &XPendingExtCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+ return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+ var info interface{}
+ info, cmd.err = rd.ReadArrayReply(xPendingExtSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = info.([]XPendingExt)
+ return nil
+}
+
+func xPendingExtSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ ret := make([]XPendingExt, 0, n)
+ for i := int64(0); i < n; i++ {
+ _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 4 {
+ return nil, fmt.Errorf("got %d, wanted 4", n)
+ }
+
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ consumer, err := rd.ReadString()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ idle, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ retryCount, err := rd.ReadIntReply()
+ if err != nil && err != Nil {
+ return nil, err
+ }
+
+ ret = append(ret, XPendingExt{
+ Id: id,
+ Consumer: consumer,
+ Idle: time.Duration(idle) * time.Millisecond,
+ RetryCount: retryCount,
+ })
+ return nil, nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ }
+ return ret, nil
+}
+
+//------------------------------------------------------------------------------
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+ baseCmd
+
+ val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(args ...interface{}) *ZSliceCmd {
+ return &ZSliceCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+ return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+ return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(zSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]Z)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func zSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ zz := make([]Z, n/2)
+ for i := int64(0); i < n; i += 2 {
+ var err error
+
+ z := &zz[i/2]
+
+ z.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ z.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ return zz, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+ baseCmd
+
+ val ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(args ...interface{}) *ZWithKeyCmd {
+ return &ZWithKeyCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *ZWithKeyCmd) Val() ZWithKey {
+ return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (ZWithKey, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(zWithKeyParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(ZWithKey)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func zWithKeyParser(rd *proto.Reader, n int64) (interface{}, error) {
+ if n != 3 {
+ return nil, fmt.Errorf("got %d elements, expected 3", n)
+ }
+
+ var z ZWithKey
+ var err error
+
+ z.Key, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ z.Member, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ z.Score, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ return z, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+ baseCmd
+
+ page []string
+ cursor uint64
+
+ process func(cmd Cmder) error
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(process func(cmd Cmder) error, args ...interface{}) *ScanCmd {
+ return &ScanCmd{
+ baseCmd: baseCmd{_args: args},
+ process: process,
+ }
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+ return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+ return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+ return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) error {
+ cmd.page, cmd.cursor, cmd.err = rd.ReadScanReply()
+ return cmd.err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+ return &ScanIterator{
+ cmd: cmd,
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+ Id string
+ Addr string
+}
+
+type ClusterSlot struct {
+ Start int
+ End int
+ Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+ baseCmd
+
+ val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(args ...interface{}) *ClusterSlotsCmd {
+ return &ClusterSlotsCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+ return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(clusterSlotsParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.([]ClusterSlot)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func clusterSlotsParser(rd *proto.Reader, n int64) (interface{}, error) {
+ slots := make([]ClusterSlot, n)
+ for i := 0; i < len(slots); i++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n < 2 {
+ err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+ return nil, err
+ }
+
+ start, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ end, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes := make([]ClusterNode, n-2)
+ for j := 0; j < len(nodes); j++ {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 && n != 3 {
+ err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+ return nil, err
+ }
+
+ ip, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ port, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ nodes[j].Addr = net.JoinHostPort(ip, port)
+
+ if n == 3 {
+ id, err := rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ nodes[j].Id = id
+ }
+ }
+
+ slots[i] = ClusterSlot{
+ Start: int(start),
+ End: int(end),
+ Nodes: nodes,
+ }
+ }
+ return slots, nil
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+ Name string
+ Longitude, Latitude, Dist float64
+ GeoHash int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+ Radius float64
+ // Can be m, km, ft, or mi. Default is km.
+ Unit string
+ WithCoord bool
+ WithDist bool
+ WithGeoHash bool
+ Count int
+ // Can be ASC or DESC. Default is no sort order.
+ Sort string
+ Store string
+ StoreDist string
+}
+
+type GeoLocationCmd struct {
+ baseCmd
+
+ q *GeoRadiusQuery
+ locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+ args = append(args, q.Radius)
+ if q.Unit != "" {
+ args = append(args, q.Unit)
+ } else {
+ args = append(args, "km")
+ }
+ if q.WithCoord {
+ args = append(args, "withcoord")
+ }
+ if q.WithDist {
+ args = append(args, "withdist")
+ }
+ if q.WithGeoHash {
+ args = append(args, "withhash")
+ }
+ if q.Count > 0 {
+ args = append(args, "count", q.Count)
+ }
+ if q.Sort != "" {
+ args = append(args, q.Sort)
+ }
+ if q.Store != "" {
+ args = append(args, "store")
+ args = append(args, q.Store)
+ }
+ if q.StoreDist != "" {
+ args = append(args, "storedist")
+ args = append(args, q.StoreDist)
+ }
+ return &GeoLocationCmd{
+ baseCmd: baseCmd{_args: args},
+ q: q,
+ }
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+ return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+ return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+ return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.locations = v.([]GeoLocation)
+ return nil
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ var loc GeoLocation
+ var err error
+
+ loc.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+ if q.WithDist {
+ loc.Dist, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithGeoHash {
+ loc.GeoHash, err = rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+ if q.WithCoord {
+ n, err := rd.ReadArrayLen()
+ if err != nil {
+ return nil, err
+ }
+ if n != 2 {
+ return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+ }
+
+ loc.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ loc.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return &loc, nil
+ }
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+ return func(rd *proto.Reader, n int64) (interface{}, error) {
+ locs := make([]GeoLocation, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(newGeoLocationParser(q))
+ if err != nil {
+ return nil, err
+ }
+ switch vv := v.(type) {
+ case string:
+ locs = append(locs, GeoLocation{
+ Name: vv,
+ })
+ case *GeoLocation:
+ locs = append(locs, *vv)
+ default:
+ return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+ }
+ }
+ return locs, nil
+ }
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+ Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+ baseCmd
+
+ positions []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(args ...interface{}) *GeoPosCmd {
+ return &GeoPosCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+ return cmd.positions
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+ return cmdString(cmd, cmd.positions)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(geoPosSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.positions = v.([]*GeoPos)
+ return nil
+}
+
+func geoPosSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ positions := make([]*GeoPos, 0, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(geoPosParser)
+ if err != nil {
+ if err == Nil {
+ positions = append(positions, nil)
+ continue
+ }
+ return nil, err
+ }
+ switch v := v.(type) {
+ case *GeoPos:
+ positions = append(positions, v)
+ default:
+ return nil, fmt.Errorf("got %T, expected *GeoPos", v)
+ }
+ }
+ return positions, nil
+}
+
+func geoPosParser(rd *proto.Reader, n int64) (interface{}, error) {
+ var pos GeoPos
+ var err error
+
+ pos.Longitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ pos.Latitude, err = rd.ReadFloatReply()
+ if err != nil {
+ return nil, err
+ }
+
+ return &pos, nil
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+ Name string
+ Arity int8
+ Flags []string
+ FirstKeyPos int8
+ LastKeyPos int8
+ StepCount int8
+ ReadOnly bool
+}
+
+type CommandsInfoCmd struct {
+ baseCmd
+
+ val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(args ...interface{}) *CommandsInfoCmd {
+ return &CommandsInfoCmd{
+ baseCmd: baseCmd{_args: args},
+ }
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+ return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+ return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+ return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+ var v interface{}
+ v, cmd.err = rd.ReadArrayReply(commandInfoSliceParser)
+ if cmd.err != nil {
+ return cmd.err
+ }
+ cmd.val = v.(map[string]*CommandInfo)
+ return nil
+}
+
+// Implements proto.MultiBulkParse
+func commandInfoSliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+ m := make(map[string]*CommandInfo, n)
+ for i := int64(0); i < n; i++ {
+ v, err := rd.ReadReply(commandInfoParser)
+ if err != nil {
+ return nil, err
+ }
+ vv := v.(*CommandInfo)
+ m[vv.Name] = vv
+
+ }
+ return m, nil
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+ var cmd CommandInfo
+ var err error
+
+ if n != 6 {
+ return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 6", n)
+ }
+
+ cmd.Name, err = rd.ReadString()
+ if err != nil {
+ return nil, err
+ }
+
+ arity, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.Arity = int8(arity)
+
+ flags, err := rd.ReadReply(stringSliceParser)
+ if err != nil {
+ return nil, err
+ }
+ cmd.Flags = flags.([]string)
+
+ firstKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.FirstKeyPos = int8(firstKeyPos)
+
+ lastKeyPos, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.LastKeyPos = int8(lastKeyPos)
+
+ stepCount, err := rd.ReadIntReply()
+ if err != nil {
+ return nil, err
+ }
+ cmd.StepCount = int8(stepCount)
+
+ for _, flag := range cmd.Flags {
+ if flag == "readonly" {
+ cmd.ReadOnly = true
+ break
+ }
+ }
+
+ return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+ fn func() (map[string]*CommandInfo, error)
+
+ once internal.Once
+ cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func() (map[string]*CommandInfo, error)) *cmdsInfoCache {
+ return &cmdsInfoCache{
+ fn: fn,
+ }
+}
+
+func (c *cmdsInfoCache) Get() (map[string]*CommandInfo, error) {
+ err := c.once.Do(func() error {
+ cmds, err := c.fn()
+ if err != nil {
+ return err
+ }
+ c.cmds = cmds
+ return nil
+ })
+ return c.cmds, err
+}
diff --git a/vendor/github.com/go-redis/redis/commands.go b/vendor/github.com/go-redis/redis/commands.go
new file mode 100644
index 00000000000..653e4abe963
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/commands.go
@@ -0,0 +1,2583 @@
+package redis
+
+import (
+ "errors"
+ "io"
+ "time"
+
+ "github.com/go-redis/redis/internal"
+)
+
+func usePrecise(dur time.Duration) bool {
+ return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(dur time.Duration) int64 {
+ if dur > 0 && dur < time.Millisecond {
+ internal.Logf(
+ "specified duration is %s, but minimal supported value is %s",
+ dur, time.Millisecond,
+ )
+ }
+ return int64(dur / time.Millisecond)
+}
+
+func formatSec(dur time.Duration) int64 {
+ if dur > 0 && dur < time.Second {
+ internal.Logf(
+ "specified duration is %s, but minimal supported value is %s",
+ dur, time.Second,
+ )
+ }
+ return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+ if len(src) == 1 {
+ if ss, ok := src[0].([]string); ok {
+ for _, s := range ss {
+ dst = append(dst, s)
+ }
+ return dst
+ }
+ }
+
+ for _, v := range src {
+ dst = append(dst, v)
+ }
+ return dst
+}
+
+type Cmdable interface {
+ Pipeline() Pipeliner
+ Pipelined(fn func(Pipeliner) error) ([]Cmder, error)
+
+ TxPipelined(fn func(Pipeliner) error) ([]Cmder, error)
+ TxPipeline() Pipeliner
+
+ Command() *CommandsInfoCmd
+ ClientGetName() *StringCmd
+ Echo(message interface{}) *StringCmd
+ Ping() *StatusCmd
+ Quit() *StatusCmd
+ Del(keys ...string) *IntCmd
+ Unlink(keys ...string) *IntCmd
+ Dump(key string) *StringCmd
+ Exists(keys ...string) *IntCmd
+ Expire(key string, expiration time.Duration) *BoolCmd
+ ExpireAt(key string, tm time.Time) *BoolCmd
+ Keys(pattern string) *StringSliceCmd
+ Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd
+ Move(key string, db int64) *BoolCmd
+ ObjectRefCount(key string) *IntCmd
+ ObjectEncoding(key string) *StringCmd
+ ObjectIdleTime(key string) *DurationCmd
+ Persist(key string) *BoolCmd
+ PExpire(key string, expiration time.Duration) *BoolCmd
+ PExpireAt(key string, tm time.Time) *BoolCmd
+ PTTL(key string) *DurationCmd
+ RandomKey() *StringCmd
+ Rename(key, newkey string) *StatusCmd
+ RenameNX(key, newkey string) *BoolCmd
+ Restore(key string, ttl time.Duration, value string) *StatusCmd
+ RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd
+ Sort(key string, sort *Sort) *StringSliceCmd
+ SortStore(key, store string, sort *Sort) *IntCmd
+ SortInterfaces(key string, sort *Sort) *SliceCmd
+ Touch(keys ...string) *IntCmd
+ TTL(key string) *DurationCmd
+ Type(key string) *StatusCmd
+ Scan(cursor uint64, match string, count int64) *ScanCmd
+ SScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ HScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ ZScan(key string, cursor uint64, match string, count int64) *ScanCmd
+ Append(key, value string) *IntCmd
+ BitCount(key string, bitCount *BitCount) *IntCmd
+ BitOpAnd(destKey string, keys ...string) *IntCmd
+ BitOpOr(destKey string, keys ...string) *IntCmd
+ BitOpXor(destKey string, keys ...string) *IntCmd
+ BitOpNot(destKey string, key string) *IntCmd
+ BitPos(key string, bit int64, pos ...int64) *IntCmd
+ Decr(key string) *IntCmd
+ DecrBy(key string, decrement int64) *IntCmd
+ Get(key string) *StringCmd
+ GetBit(key string, offset int64) *IntCmd
+ GetRange(key string, start, end int64) *StringCmd
+ GetSet(key string, value interface{}) *StringCmd
+ Incr(key string) *IntCmd
+ IncrBy(key string, value int64) *IntCmd
+ IncrByFloat(key string, value float64) *FloatCmd
+ MGet(keys ...string) *SliceCmd
+ MSet(pairs ...interface{}) *StatusCmd
+ MSetNX(pairs ...interface{}) *BoolCmd
+ Set(key string, value interface{}, expiration time.Duration) *StatusCmd
+ SetBit(key string, offset int64, value int) *IntCmd
+ SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd
+ SetRange(key string, offset int64, value string) *IntCmd
+ StrLen(key string) *IntCmd
+ HDel(key string, fields ...string) *IntCmd
+ HExists(key, field string) *BoolCmd
+ HGet(key, field string) *StringCmd
+ HGetAll(key string) *StringStringMapCmd
+ HIncrBy(key, field string, incr int64) *IntCmd
+ HIncrByFloat(key, field string, incr float64) *FloatCmd
+ HKeys(key string) *StringSliceCmd
+ HLen(key string) *IntCmd
+ HMGet(key string, fields ...string) *SliceCmd
+ HMSet(key string, fields map[string]interface{}) *StatusCmd
+ HSet(key, field string, value interface{}) *BoolCmd
+ HSetNX(key, field string, value interface{}) *BoolCmd
+ HVals(key string) *StringSliceCmd
+ BLPop(timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPop(timeout time.Duration, keys ...string) *StringSliceCmd
+ BRPopLPush(source, destination string, timeout time.Duration) *StringCmd
+ LIndex(key string, index int64) *StringCmd
+ LInsert(key, op string, pivot, value interface{}) *IntCmd
+ LInsertBefore(key string, pivot, value interface{}) *IntCmd
+ LInsertAfter(key string, pivot, value interface{}) *IntCmd
+ LLen(key string) *IntCmd
+ LPop(key string) *StringCmd
+ LPush(key string, values ...interface{}) *IntCmd
+ LPushX(key string, value interface{}) *IntCmd
+ LRange(key string, start, stop int64) *StringSliceCmd
+ LRem(key string, count int64, value interface{}) *IntCmd
+ LSet(key string, index int64, value interface{}) *StatusCmd
+ LTrim(key string, start, stop int64) *StatusCmd
+ RPop(key string) *StringCmd
+ RPopLPush(source, destination string) *StringCmd
+ RPush(key string, values ...interface{}) *IntCmd
+ RPushX(key string, value interface{}) *IntCmd
+ SAdd(key string, members ...interface{}) *IntCmd
+ SCard(key string) *IntCmd
+ SDiff(keys ...string) *StringSliceCmd
+ SDiffStore(destination string, keys ...string) *IntCmd
+ SInter(keys ...string) *StringSliceCmd
+ SInterStore(destination string, keys ...string) *IntCmd
+ SIsMember(key string, member interface{}) *BoolCmd
+ SMembers(key string) *StringSliceCmd
+ SMembersMap(key string) *StringStructMapCmd
+ SMove(source, destination string, member interface{}) *BoolCmd
+ SPop(key string) *StringCmd
+ SPopN(key string, count int64) *StringSliceCmd
+ SRandMember(key string) *StringCmd
+ SRandMemberN(key string, count int64) *StringSliceCmd
+ SRem(key string, members ...interface{}) *IntCmd
+ SUnion(keys ...string) *StringSliceCmd
+ SUnionStore(destination string, keys ...string) *IntCmd
+ XAdd(a *XAddArgs) *StringCmd
+ XDel(stream string, ids ...string) *IntCmd
+ XLen(stream string) *IntCmd
+ XRange(stream, start, stop string) *XMessageSliceCmd
+ XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd
+ XRevRange(stream string, start, stop string) *XMessageSliceCmd
+ XRevRangeN(stream string, start, stop string, count int64) *XMessageSliceCmd
+ XRead(a *XReadArgs) *XStreamSliceCmd
+ XReadStreams(streams ...string) *XStreamSliceCmd
+ XGroupCreate(stream, group, start string) *StatusCmd
+ XGroupCreateMkStream(stream, group, start string) *StatusCmd
+ XGroupSetID(stream, group, start string) *StatusCmd
+ XGroupDestroy(stream, group string) *IntCmd
+ XGroupDelConsumer(stream, group, consumer string) *IntCmd
+ XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd
+ XAck(stream, group string, ids ...string) *IntCmd
+ XPending(stream, group string) *XPendingCmd
+ XPendingExt(a *XPendingExtArgs) *XPendingExtCmd
+ XClaim(a *XClaimArgs) *XMessageSliceCmd
+ XClaimJustID(a *XClaimArgs) *StringSliceCmd
+ XTrim(key string, maxLen int64) *IntCmd
+ XTrimApprox(key string, maxLen int64) *IntCmd
+ BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd
+ BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd
+ ZAdd(key string, members ...Z) *IntCmd
+ ZAddNX(key string, members ...Z) *IntCmd
+ ZAddXX(key string, members ...Z) *IntCmd
+ ZAddCh(key string, members ...Z) *IntCmd
+ ZAddNXCh(key string, members ...Z) *IntCmd
+ ZAddXXCh(key string, members ...Z) *IntCmd
+ ZIncr(key string, member Z) *FloatCmd
+ ZIncrNX(key string, member Z) *FloatCmd
+ ZIncrXX(key string, member Z) *FloatCmd
+ ZCard(key string) *IntCmd
+ ZCount(key, min, max string) *IntCmd
+ ZLexCount(key, min, max string) *IntCmd
+ ZIncrBy(key string, increment float64, member string) *FloatCmd
+ ZInterStore(destination string, store ZStore, keys ...string) *IntCmd
+ ZPopMax(key string, count ...int64) *ZSliceCmd
+ ZPopMin(key string, count ...int64) *ZSliceCmd
+ ZRange(key string, start, stop int64) *StringSliceCmd
+ ZRangeWithScores(key string, start, stop int64) *ZSliceCmd
+ ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
+ ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
+ ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
+ ZRank(key, member string) *IntCmd
+ ZRem(key string, members ...interface{}) *IntCmd
+ ZRemRangeByRank(key string, start, stop int64) *IntCmd
+ ZRemRangeByScore(key, min, max string) *IntCmd
+ ZRemRangeByLex(key, min, max string) *IntCmd
+ ZRevRange(key string, start, stop int64) *StringSliceCmd
+ ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd
+ ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd
+ ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd
+ ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd
+ ZRevRank(key, member string) *IntCmd
+ ZScore(key, member string) *FloatCmd
+ ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd
+ PFAdd(key string, els ...interface{}) *IntCmd
+ PFCount(keys ...string) *IntCmd
+ PFMerge(dest string, keys ...string) *StatusCmd
+ BgRewriteAOF() *StatusCmd
+ BgSave() *StatusCmd
+ ClientKill(ipPort string) *StatusCmd
+ ClientKillByFilter(keys ...string) *IntCmd
+ ClientList() *StringCmd
+ ClientPause(dur time.Duration) *BoolCmd
+ ClientID() *IntCmd
+ ConfigGet(parameter string) *SliceCmd
+ ConfigResetStat() *StatusCmd
+ ConfigSet(parameter, value string) *StatusCmd
+ ConfigRewrite() *StatusCmd
+ DBSize() *IntCmd
+ FlushAll() *StatusCmd
+ FlushAllAsync() *StatusCmd
+ FlushDB() *StatusCmd
+ FlushDBAsync() *StatusCmd
+ Info(section ...string) *StringCmd
+ LastSave() *IntCmd
+ Save() *StatusCmd
+ Shutdown() *StatusCmd
+ ShutdownSave() *StatusCmd
+ ShutdownNoSave() *StatusCmd
+ SlaveOf(host, port string) *StatusCmd
+ Time() *TimeCmd
+ Eval(script string, keys []string, args ...interface{}) *Cmd
+ EvalSha(sha1 string, keys []string, args ...interface{}) *Cmd
+ ScriptExists(hashes ...string) *BoolSliceCmd
+ ScriptFlush() *StatusCmd
+ ScriptKill() *StatusCmd
+ ScriptLoad(script string) *StringCmd
+ DebugObject(key string) *StringCmd
+ Publish(channel string, message interface{}) *IntCmd
+ PubSubChannels(pattern string) *StringSliceCmd
+ PubSubNumSub(channels ...string) *StringIntMapCmd
+ PubSubNumPat() *IntCmd
+ ClusterSlots() *ClusterSlotsCmd
+ ClusterNodes() *StringCmd
+ ClusterMeet(host, port string) *StatusCmd
+ ClusterForget(nodeID string) *StatusCmd
+ ClusterReplicate(nodeID string) *StatusCmd
+ ClusterResetSoft() *StatusCmd
+ ClusterResetHard() *StatusCmd
+ ClusterInfo() *StringCmd
+ ClusterKeySlot(key string) *IntCmd
+ ClusterGetKeysInSlot(slot int, count int) *StringSliceCmd
+ ClusterCountFailureReports(nodeID string) *IntCmd
+ ClusterCountKeysInSlot(slot int) *IntCmd
+ ClusterDelSlots(slots ...int) *StatusCmd
+ ClusterDelSlotsRange(min, max int) *StatusCmd
+ ClusterSaveConfig() *StatusCmd
+ ClusterSlaves(nodeID string) *StringSliceCmd
+ ClusterFailover() *StatusCmd
+ ClusterAddSlots(slots ...int) *StatusCmd
+ ClusterAddSlotsRange(min, max int) *StatusCmd
+ GeoAdd(key string, geoLocation ...*GeoLocation) *IntCmd
+ GeoPos(key string, members ...string) *GeoPosCmd
+ GeoRadius(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusRO(key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMember(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoRadiusByMemberRO(key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+ GeoDist(key string, member1, member2, unit string) *FloatCmd
+ GeoHash(key string, members ...string) *StringSliceCmd
+ ReadOnly() *StatusCmd
+ ReadWrite() *StatusCmd
+ MemoryUsage(key string, samples ...int) *IntCmd
+}
+
+type StatefulCmdable interface {
+ Cmdable
+ Auth(password string) *StatusCmd
+ Select(index int) *StatusCmd
+ SwapDB(index1, index2 int) *StatusCmd
+ ClientSetName(name string) *BoolCmd
+}
+
+var _ Cmdable = (*Client)(nil)
+var _ Cmdable = (*Tx)(nil)
+var _ Cmdable = (*Ring)(nil)
+var _ Cmdable = (*ClusterClient)(nil)
+
+type cmdable struct {
+ process func(cmd Cmder) error
+}
+
+func (c *cmdable) setProcessor(fn func(Cmder) error) {
+ c.process = fn
+}
+
+type statefulCmdable struct {
+ cmdable
+ process func(cmd Cmder) error
+}
+
+func (c *statefulCmdable) setProcessor(fn func(Cmder) error) {
+ c.process = fn
+ c.cmdable.setProcessor(fn)
+}
+
+//------------------------------------------------------------------------------
+
+func (c *statefulCmdable) Auth(password string) *StatusCmd {
+ cmd := NewStatusCmd("auth", password)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Echo(message interface{}) *StringCmd {
+ cmd := NewStringCmd("echo", message)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Ping() *StatusCmd {
+ cmd := NewStatusCmd("ping")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Wait(numSlaves int, timeout time.Duration) *IntCmd {
+ cmd := NewIntCmd("wait", numSlaves, int(timeout/time.Millisecond))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Quit() *StatusCmd {
+ panic("not implemented")
+}
+
+func (c *statefulCmdable) Select(index int) *StatusCmd {
+ cmd := NewStatusCmd("select", index)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *statefulCmdable) SwapDB(index1, index2 int) *StatusCmd {
+ cmd := NewStatusCmd("swapdb", index1, index2)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Command() *CommandsInfoCmd {
+ cmd := NewCommandsInfoCmd("command")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Del(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "del"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Unlink(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "unlink"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Dump(key string) *StringCmd {
+ cmd := NewStringCmd("dump", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Exists(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "exists"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Expire(key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("expire", key, formatSec(expiration))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd("expireat", key, tm.Unix())
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Keys(pattern string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("keys", pattern)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Migrate(host, port, key string, db int64, timeout time.Duration) *StatusCmd {
+ cmd := NewStatusCmd(
+ "migrate",
+ host,
+ port,
+ key,
+ db,
+ formatMs(timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Move(key string, db int64) *BoolCmd {
+ cmd := NewBoolCmd("move", key, db)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ObjectRefCount(key string) *IntCmd {
+ cmd := NewIntCmd("object", "refcount", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ObjectEncoding(key string) *StringCmd {
+ cmd := NewStringCmd("object", "encoding", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ObjectIdleTime(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "object", "idletime", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Persist(key string) *BoolCmd {
+ cmd := NewBoolCmd("persist", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PExpire(key string, expiration time.Duration) *BoolCmd {
+ cmd := NewBoolCmd("pexpire", key, formatMs(expiration))
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PExpireAt(key string, tm time.Time) *BoolCmd {
+ cmd := NewBoolCmd(
+ "pexpireat",
+ key,
+ tm.UnixNano()/int64(time.Millisecond),
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PTTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Millisecond, "pttl", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RandomKey() *StringCmd {
+ cmd := NewStringCmd("randomkey")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Rename(key, newkey string) *StatusCmd {
+ cmd := NewStatusCmd("rename", key, newkey)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RenameNX(key, newkey string) *BoolCmd {
+ cmd := NewBoolCmd("renamenx", key, newkey)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Restore(key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "restore",
+ key,
+ formatMs(ttl),
+ value,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RestoreReplace(key string, ttl time.Duration, value string) *StatusCmd {
+ cmd := NewStatusCmd(
+ "restore",
+ key,
+ formatMs(ttl),
+ value,
+ "replace",
+ )
+ c.process(cmd)
+ return cmd
+}
+
+type Sort struct {
+ By string
+ Offset, Count int64
+ Get []string
+ Order string
+ Alpha bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+ args := []interface{}{"sort", key}
+ if sort.By != "" {
+ args = append(args, "by", sort.By)
+ }
+ if sort.Offset != 0 || sort.Count != 0 {
+ args = append(args, "limit", sort.Offset, sort.Count)
+ }
+ for _, get := range sort.Get {
+ args = append(args, "get", get)
+ }
+ if sort.Order != "" {
+ args = append(args, sort.Order)
+ }
+ if sort.Alpha {
+ args = append(args, "alpha")
+ }
+ return args
+}
+
+func (c *cmdable) Sort(key string, sort *Sort) *StringSliceCmd {
+ cmd := NewStringSliceCmd(sort.args(key)...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SortStore(key, store string, sort *Sort) *IntCmd {
+ args := sort.args(key)
+ if store != "" {
+ args = append(args, "store", store)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SortInterfaces(key string, sort *Sort) *SliceCmd {
+ cmd := NewSliceCmd(sort.args(key)...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Touch(keys ...string) *IntCmd {
+ args := make([]interface{}, len(keys)+1)
+ args[0] = "touch"
+ for i, key := range keys {
+ args[i+1] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) TTL(key string) *DurationCmd {
+ cmd := NewDurationCmd(time.Second, "ttl", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Type(key string) *StatusCmd {
+ cmd := NewStatusCmd("type", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Scan(cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"scan", cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"sscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"hscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZScan(key string, cursor uint64, match string, count int64) *ScanCmd {
+ args := []interface{}{"zscan", key, cursor}
+ if match != "" {
+ args = append(args, "match", match)
+ }
+ if count > 0 {
+ args = append(args, "count", count)
+ }
+ cmd := NewScanCmd(c.process, args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) Append(key, value string) *IntCmd {
+ cmd := NewIntCmd("append", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+type BitCount struct {
+ Start, End int64
+}
+
+func (c *cmdable) BitCount(key string, bitCount *BitCount) *IntCmd {
+ args := []interface{}{"bitcount", key}
+ if bitCount != nil {
+ args = append(
+ args,
+ bitCount.Start,
+ bitCount.End,
+ )
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) bitOp(op, destKey string, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "bitop"
+ args[1] = op
+ args[2] = destKey
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BitOpAnd(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("and", destKey, keys...)
+}
+
+func (c *cmdable) BitOpOr(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("or", destKey, keys...)
+}
+
+func (c *cmdable) BitOpXor(destKey string, keys ...string) *IntCmd {
+ return c.bitOp("xor", destKey, keys...)
+}
+
+func (c *cmdable) BitOpNot(destKey string, key string) *IntCmd {
+ return c.bitOp("not", destKey, key)
+}
+
+func (c *cmdable) BitPos(key string, bit int64, pos ...int64) *IntCmd {
+ args := make([]interface{}, 3+len(pos))
+ args[0] = "bitpos"
+ args[1] = key
+ args[2] = bit
+ switch len(pos) {
+ case 0:
+ case 1:
+ args[3] = pos[0]
+ case 2:
+ args[3] = pos[0]
+ args[4] = pos[1]
+ default:
+ panic("too many arguments")
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Decr(key string) *IntCmd {
+ cmd := NewIntCmd("decr", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) DecrBy(key string, decrement int64) *IntCmd {
+ cmd := NewIntCmd("decrby", key, decrement)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c *cmdable) Get(key string) *StringCmd {
+ cmd := NewStringCmd("get", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GetBit(key string, offset int64) *IntCmd {
+ cmd := NewIntCmd("getbit", key, offset)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GetRange(key string, start, end int64) *StringCmd {
+ cmd := NewStringCmd("getrange", key, start, end)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) GetSet(key string, value interface{}) *StringCmd {
+ cmd := NewStringCmd("getset", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) Incr(key string) *IntCmd {
+ cmd := NewIntCmd("incr", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) IncrBy(key string, value int64) *IntCmd {
+ cmd := NewIntCmd("incrby", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) IncrByFloat(key string, value float64) *FloatCmd {
+ cmd := NewFloatCmd("incrbyfloat", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) MGet(keys ...string) *SliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "mget"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) MSet(pairs ...interface{}) *StatusCmd {
+ args := make([]interface{}, 1, 1+len(pairs))
+ args[0] = "mset"
+ args = appendArgs(args, pairs)
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) MSetNX(pairs ...interface{}) *BoolCmd {
+ args := make([]interface{}, 1, 1+len(pairs))
+ args[0] = "msetnx"
+ args = appendArgs(args, pairs)
+ cmd := NewBoolCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration]` command.
+//
+// Use expiration for `SETEX`-like behavior.
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) Set(key string, value interface{}, expiration time.Duration) *StatusCmd {
+ args := make([]interface{}, 3, 4)
+ args[0] = "set"
+ args[1] = key
+ args[2] = value
+ if expiration > 0 {
+ if usePrecise(expiration) {
+ args = append(args, "px", formatMs(expiration))
+ } else {
+ args = append(args, "ex", formatSec(expiration))
+ }
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SetBit(key string, offset int64, value int) *IntCmd {
+ cmd := NewIntCmd(
+ "setbit",
+ key,
+ offset,
+ value,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) SetNX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ if expiration == 0 {
+ // Use old `SETNX` to support old Redis versions.
+ cmd = NewBoolCmd("setnx", key, value)
+ } else {
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "nx")
+ } else {
+ cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "nx")
+ }
+ }
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+func (c *cmdable) SetXX(key string, value interface{}, expiration time.Duration) *BoolCmd {
+ var cmd *BoolCmd
+ if expiration == 0 {
+ cmd = NewBoolCmd("set", key, value, "xx")
+ } else {
+ if usePrecise(expiration) {
+ cmd = NewBoolCmd("set", key, value, "px", formatMs(expiration), "xx")
+ } else {
+ cmd = NewBoolCmd("set", key, value, "ex", formatSec(expiration), "xx")
+ }
+ }
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SetRange(key string, offset int64, value string) *IntCmd {
+ cmd := NewIntCmd("setrange", key, offset, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) StrLen(key string) *IntCmd {
+ cmd := NewIntCmd("strlen", key)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) HDel(key string, fields ...string) *IntCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hdel"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HExists(key, field string) *BoolCmd {
+ cmd := NewBoolCmd("hexists", key, field)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HGet(key, field string) *StringCmd {
+ cmd := NewStringCmd("hget", key, field)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HGetAll(key string) *StringStringMapCmd {
+ cmd := NewStringStringMapCmd("hgetall", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HIncrBy(key, field string, incr int64) *IntCmd {
+ cmd := NewIntCmd("hincrby", key, field, incr)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HIncrByFloat(key, field string, incr float64) *FloatCmd {
+ cmd := NewFloatCmd("hincrbyfloat", key, field, incr)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HKeys(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("hkeys", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HLen(key string) *IntCmd {
+ cmd := NewIntCmd("hlen", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HMGet(key string, fields ...string) *SliceCmd {
+ args := make([]interface{}, 2+len(fields))
+ args[0] = "hmget"
+ args[1] = key
+ for i, field := range fields {
+ args[2+i] = field
+ }
+ cmd := NewSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HMSet(key string, fields map[string]interface{}) *StatusCmd {
+ args := make([]interface{}, 2+len(fields)*2)
+ args[0] = "hmset"
+ args[1] = key
+ i := 2
+ for k, v := range fields {
+ args[i] = k
+ args[i+1] = v
+ i += 2
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HSet(key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd("hset", key, field, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HSetNX(key, field string, value interface{}) *BoolCmd {
+ cmd := NewBoolCmd("hsetnx", key, field, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) HVals(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("hvals", key)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) BLPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "blpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(timeout)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BRPop(timeout time.Duration, keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "brpop"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(keys)+1] = formatSec(timeout)
+ cmd := NewStringSliceCmd(args...)
+ cmd.setReadTimeout(timeout)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BRPopLPush(source, destination string, timeout time.Duration) *StringCmd {
+ cmd := NewStringCmd(
+ "brpoplpush",
+ source,
+ destination,
+ formatSec(timeout),
+ )
+ cmd.setReadTimeout(timeout)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LIndex(key string, index int64) *StringCmd {
+ cmd := NewStringCmd("lindex", key, index)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LInsert(key, op string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, op, pivot, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LInsertBefore(key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, "before", pivot, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LInsertAfter(key string, pivot, value interface{}) *IntCmd {
+ cmd := NewIntCmd("linsert", key, "after", pivot, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LLen(key string) *IntCmd {
+ cmd := NewIntCmd("llen", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LPop(key string) *StringCmd {
+ cmd := NewStringCmd("lpop", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LPush(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "lpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LPushX(key string, value interface{}) *IntCmd {
+ cmd := NewIntCmd("lpushx", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd(
+ "lrange",
+ key,
+ start,
+ stop,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LRem(key string, count int64, value interface{}) *IntCmd {
+ cmd := NewIntCmd("lrem", key, count, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LSet(key string, index int64, value interface{}) *StatusCmd {
+ cmd := NewStatusCmd("lset", key, index, value)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) LTrim(key string, start, stop int64) *StatusCmd {
+ cmd := NewStatusCmd(
+ "ltrim",
+ key,
+ start,
+ stop,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPop(key string) *StringCmd {
+ cmd := NewStringCmd("rpop", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPopLPush(source, destination string) *StringCmd {
+ cmd := NewStringCmd("rpoplpush", source, destination)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPush(key string, values ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(values))
+ args[0] = "rpush"
+ args[1] = key
+ args = appendArgs(args, values)
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) RPushX(key string, value interface{}) *IntCmd {
+ cmd := NewIntCmd("rpushx", key, value)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) SAdd(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "sadd"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SCard(key string) *IntCmd {
+ cmd := NewIntCmd("scard", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SDiff(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sdiff"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SDiffStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sdiffstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SInter(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sinter"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SInterStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sinterstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SIsMember(key string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd("sismember", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a slice
+func (c *cmdable) SMembers(key string) *StringSliceCmd {
+ cmd := NewStringSliceCmd("smembers", key)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SMEMBERS key` command output as a map
+func (c *cmdable) SMembersMap(key string) *StringStructMapCmd {
+ cmd := NewStringStructMapCmd("smembers", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SMove(source, destination string, member interface{}) *BoolCmd {
+ cmd := NewBoolCmd("smove", source, destination, member)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SPOP key` command.
+func (c *cmdable) SPop(key string) *StringCmd {
+ cmd := NewStringCmd("spop", key)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SPOP key count` command.
+func (c *cmdable) SPopN(key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("spop", key, count)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key` command.
+func (c *cmdable) SRandMember(key string) *StringCmd {
+ cmd := NewStringCmd("srandmember", key)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `SRANDMEMBER key count` command.
+func (c *cmdable) SRandMemberN(key string, count int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("srandmember", key, count)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SRem(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "srem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SUnion(keys ...string) *StringSliceCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "sunion"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) SUnionStore(destination string, keys ...string) *IntCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "sunionstore"
+ args[1] = destination
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type XAddArgs struct {
+ Stream string
+ MaxLen int64 // MAXLEN N
+ MaxLenApprox int64 // MAXLEN ~ N
+ ID string
+ Values map[string]interface{}
+}
+
+func (c *cmdable) XAdd(a *XAddArgs) *StringCmd {
+ args := make([]interface{}, 0, 6+len(a.Values)*2)
+ args = append(args, "xadd")
+ args = append(args, a.Stream)
+ if a.MaxLen > 0 {
+ args = append(args, "maxlen", a.MaxLen)
+ } else if a.MaxLenApprox > 0 {
+ args = append(args, "maxlen", "~", a.MaxLenApprox)
+ }
+ if a.ID != "" {
+ args = append(args, a.ID)
+ } else {
+ args = append(args, "*")
+ }
+ for k, v := range a.Values {
+ args = append(args, k)
+ args = append(args, v)
+ }
+
+ cmd := NewStringCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XDel(stream string, ids ...string) *IntCmd {
+ args := []interface{}{"xdel", stream}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XLen(stream string) *IntCmd {
+ cmd := NewIntCmd("xlen", stream)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRange(stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrange", stream, start, stop)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrange", stream, start, stop, "count", count)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRevRange(stream, start, stop string) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XRevRangeN(stream, start, stop string, count int64) *XMessageSliceCmd {
+ cmd := NewXMessageSliceCmd("xrevrange", stream, start, stop, "count", count)
+ c.process(cmd)
+ return cmd
+}
+
+type XReadArgs struct {
+ Streams []string
+ Count int64
+ Block time.Duration
+}
+
+func (c *cmdable) XRead(a *XReadArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 5+len(a.Streams))
+ args = append(args, "xread")
+ if a.Count > 0 {
+ args = append(args, "count")
+ args = append(args, a.Count)
+ }
+ if a.Block >= 0 {
+ args = append(args, "block")
+ args = append(args, int64(a.Block/time.Millisecond))
+ }
+ args = append(args, "streams")
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XReadStreams(streams ...string) *XStreamSliceCmd {
+ return c.XRead(&XReadArgs{
+ Streams: streams,
+ Block: -1,
+ })
+}
+
+func (c *cmdable) XGroupCreate(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "create", stream, group, start)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupCreateMkStream(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "create", stream, group, start, "mkstream")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupSetID(stream, group, start string) *StatusCmd {
+ cmd := NewStatusCmd("xgroup", "setid", stream, group, start)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupDestroy(stream, group string) *IntCmd {
+ cmd := NewIntCmd("xgroup", "destroy", stream, group)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XGroupDelConsumer(stream, group, consumer string) *IntCmd {
+ cmd := NewIntCmd("xgroup", "delconsumer", stream, group, consumer)
+ c.process(cmd)
+ return cmd
+}
+
+type XReadGroupArgs struct {
+ Group string
+ Consumer string
+ // List of streams and ids.
+ Streams []string
+ Count int64
+ Block time.Duration
+ NoAck bool
+}
+
+func (c *cmdable) XReadGroup(a *XReadGroupArgs) *XStreamSliceCmd {
+ args := make([]interface{}, 0, 8+len(a.Streams))
+ args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+ if a.Count > 0 {
+ args = append(args, "count", a.Count)
+ }
+ if a.Block >= 0 {
+ args = append(args, "block", int64(a.Block/time.Millisecond))
+ }
+ if a.NoAck {
+ args = append(args, "noack")
+ }
+ args = append(args, "streams")
+ for _, s := range a.Streams {
+ args = append(args, s)
+ }
+
+ cmd := NewXStreamSliceCmd(args...)
+ if a.Block >= 0 {
+ cmd.setReadTimeout(a.Block)
+ }
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XAck(stream, group string, ids ...string) *IntCmd {
+ args := []interface{}{"xack", stream, group}
+ for _, id := range ids {
+ args = append(args, id)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XPending(stream, group string) *XPendingCmd {
+ cmd := NewXPendingCmd("xpending", stream, group)
+ c.process(cmd)
+ return cmd
+}
+
+type XPendingExtArgs struct {
+ Stream string
+ Group string
+ Start string
+ End string
+ Count int64
+ Consumer string
+}
+
+func (c *cmdable) XPendingExt(a *XPendingExtArgs) *XPendingExtCmd {
+ args := make([]interface{}, 0, 7)
+ args = append(args, "xpending", a.Stream, a.Group, a.Start, a.End, a.Count)
+ if a.Consumer != "" {
+ args = append(args, a.Consumer)
+ }
+ cmd := NewXPendingExtCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+type XClaimArgs struct {
+ Stream string
+ Group string
+ Consumer string
+ MinIdle time.Duration
+ Messages []string
+}
+
+func (c *cmdable) XClaim(a *XClaimArgs) *XMessageSliceCmd {
+ args := xClaimArgs(a)
+ cmd := NewXMessageSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XClaimJustID(a *XClaimArgs) *StringSliceCmd {
+ args := xClaimArgs(a)
+ args = append(args, "justid")
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+ args := make([]interface{}, 0, 4+len(a.Messages))
+ args = append(args,
+ "xclaim",
+ a.Stream,
+ a.Group, a.Consumer,
+ int64(a.MinIdle/time.Millisecond))
+ for _, id := range a.Messages {
+ args = append(args, id)
+ }
+ return args
+}
+
+func (c *cmdable) XTrim(key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd("xtrim", key, "maxlen", maxLen)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) XTrimApprox(key string, maxLen int64) *IntCmd {
+ cmd := NewIntCmd("xtrim", key, "maxlen", "~", maxLen)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+ Score float64
+ Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+ Z
+ Key string
+}
+
+// ZStore is used as an arg to ZInterStore and ZUnionStore.
+type ZStore struct {
+ Weights []float64
+ // Can be SUM, MIN or MAX.
+ Aggregate string
+}
+
+// Redis `BZPOPMAX key [key ...] timeout` command.
+func (c *cmdable) BZPopMax(timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmax"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewZWithKeyCmd(args...)
+ cmd.setReadTimeout(timeout)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `BZPOPMIN key [key ...] timeout` command.
+func (c *cmdable) BZPopMin(timeout time.Duration, keys ...string) *ZWithKeyCmd {
+ args := make([]interface{}, 1+len(keys)+1)
+ args[0] = "bzpopmin"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ args[len(args)-1] = formatSec(timeout)
+ cmd := NewZWithKeyCmd(args...)
+ cmd.setReadTimeout(timeout)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) zAdd(a []interface{}, n int, members ...Z) *IntCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewIntCmd(a...)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `ZADD key score member [score member ...]` command.
+func (c *cmdable) ZAdd(key string, members ...Z) *IntCmd {
+ const n = 2
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1] = "zadd", key
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX score member [score member ...]` command.
+func (c *cmdable) ZAddNX(key string, members ...Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "nx"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX score member [score member ...]` command.
+func (c *cmdable) ZAddXX(key string, members ...Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "xx"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key CH score member [score member ...]` command.
+func (c *cmdable) ZAddCh(key string, members ...Z) *IntCmd {
+ const n = 3
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2] = "zadd", key, "ch"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key NX CH score member [score member ...]` command.
+func (c *cmdable) ZAddNXCh(key string, members ...Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "nx", "ch"
+ return c.zAdd(a, n, members...)
+}
+
+// Redis `ZADD key XX CH score member [score member ...]` command.
+func (c *cmdable) ZAddXXCh(key string, members ...Z) *IntCmd {
+ const n = 4
+ a := make([]interface{}, n+2*len(members))
+ a[0], a[1], a[2], a[3] = "zadd", key, "xx", "ch"
+ return c.zAdd(a, n, members...)
+}
+
+func (c *cmdable) zIncr(a []interface{}, n int, members ...Z) *FloatCmd {
+ for i, m := range members {
+ a[n+2*i] = m.Score
+ a[n+2*i+1] = m.Member
+ }
+ cmd := NewFloatCmd(a...)
+ c.process(cmd)
+ return cmd
+}
+
+// Redis `ZADD key INCR score member` command.
+func (c *cmdable) ZIncr(key string, member Z) *FloatCmd {
+ const n = 3
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2] = "zadd", key, "incr"
+ return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key NX INCR score member` command.
+func (c *cmdable) ZIncrNX(key string, member Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "nx"
+ return c.zIncr(a, n, member)
+}
+
+// Redis `ZADD key XX INCR score member` command.
+func (c *cmdable) ZIncrXX(key string, member Z) *FloatCmd {
+ const n = 4
+ a := make([]interface{}, n+2)
+ a[0], a[1], a[2], a[3] = "zadd", key, "incr", "xx"
+ return c.zIncr(a, n, member)
+}
+
+func (c *cmdable) ZCard(key string) *IntCmd {
+ cmd := NewIntCmd("zcard", key)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zcount", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZLexCount(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zlexcount", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZIncrBy(key string, increment float64, member string) *FloatCmd {
+ cmd := NewFloatCmd("zincrby", key, increment, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZInterStore(destination string, store ZStore, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zinterstore"
+ args[1] = destination
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZPopMax(key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmax",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZPopMin(key string, count ...int64) *ZSliceCmd {
+ args := []interface{}{
+ "zpopmin",
+ key,
+ }
+
+ switch len(count) {
+ case 0:
+ break
+ case 1:
+ args = append(args, count[0])
+ default:
+ panic("too many arguments")
+ }
+
+ cmd := NewZSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) zRange(key string, start, stop int64, withScores bool) *StringSliceCmd {
+ args := []interface{}{
+ "zrange",
+ key,
+ start,
+ stop,
+ }
+ if withScores {
+ args = append(args, "withscores")
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRange(key string, start, stop int64) *StringSliceCmd {
+ return c.zRange(key, start, stop, false)
+}
+
+func (c *cmdable) ZRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd("zrange", key, start, stop, "withscores")
+ c.process(cmd)
+ return cmd
+}
+
+type ZRangeBy struct {
+ Min, Max string
+ Offset, Count int64
+}
+
+func (c *cmdable) zRangeBy(zcmd, key string, opt ZRangeBy, withScores bool) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Min, opt.Max}
+ if withScores {
+ args = append(args, "withscores")
+ }
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRangeByScore(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy("zrangebyscore", key, opt, false)
+}
+
+func (c *cmdable) ZRangeByLex(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRangeBy("zrangebylex", key, opt, false)
+}
+
+func (c *cmdable) ZRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("zrank", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRem(key string, members ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(members))
+ args[0] = "zrem"
+ args[1] = key
+ args = appendArgs(args, members)
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRemRangeByRank(key string, start, stop int64) *IntCmd {
+ cmd := NewIntCmd(
+ "zremrangebyrank",
+ key,
+ start,
+ stop,
+ )
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRemRangeByScore(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zremrangebyscore", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRemRangeByLex(key, min, max string) *IntCmd {
+ cmd := NewIntCmd("zremrangebylex", key, min, max)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRange(key string, start, stop int64) *StringSliceCmd {
+ cmd := NewStringSliceCmd("zrevrange", key, start, stop)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRangeWithScores(key string, start, stop int64) *ZSliceCmd {
+ cmd := NewZSliceCmd("zrevrange", key, start, stop, "withscores")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) zRevRangeBy(zcmd, key string, opt ZRangeBy) *StringSliceCmd {
+ args := []interface{}{zcmd, key, opt.Max, opt.Min}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewStringSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRangeByScore(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy("zrevrangebyscore", key, opt)
+}
+
+func (c *cmdable) ZRevRangeByLex(key string, opt ZRangeBy) *StringSliceCmd {
+ return c.zRevRangeBy("zrevrangebylex", key, opt)
+}
+
+func (c *cmdable) ZRevRangeByScoreWithScores(key string, opt ZRangeBy) *ZSliceCmd {
+ args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+ if opt.Offset != 0 || opt.Count != 0 {
+ args = append(
+ args,
+ "limit",
+ opt.Offset,
+ opt.Count,
+ )
+ }
+ cmd := NewZSliceCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZRevRank(key, member string) *IntCmd {
+ cmd := NewIntCmd("zrevrank", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZScore(key, member string) *FloatCmd {
+ cmd := NewFloatCmd("zscore", key, member)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ZUnionStore(dest string, store ZStore, keys ...string) *IntCmd {
+ args := make([]interface{}, 3+len(keys))
+ args[0] = "zunionstore"
+ args[1] = dest
+ args[2] = len(keys)
+ for i, key := range keys {
+ args[3+i] = key
+ }
+ if len(store.Weights) > 0 {
+ args = append(args, "weights")
+ for _, weight := range store.Weights {
+ args = append(args, weight)
+ }
+ }
+ if store.Aggregate != "" {
+ args = append(args, "aggregate", store.Aggregate)
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) PFAdd(key string, els ...interface{}) *IntCmd {
+ args := make([]interface{}, 2, 2+len(els))
+ args[0] = "pfadd"
+ args[1] = key
+ args = appendArgs(args, els)
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PFCount(keys ...string) *IntCmd {
+ args := make([]interface{}, 1+len(keys))
+ args[0] = "pfcount"
+ for i, key := range keys {
+ args[1+i] = key
+ }
+ cmd := NewIntCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) PFMerge(dest string, keys ...string) *StatusCmd {
+ args := make([]interface{}, 2+len(keys))
+ args[0] = "pfmerge"
+ args[1] = dest
+ for i, key := range keys {
+ args[2+i] = key
+ }
+ cmd := NewStatusCmd(args...)
+ c.process(cmd)
+ return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c *cmdable) BgRewriteAOF() *StatusCmd {
+ cmd := NewStatusCmd("bgrewriteaof")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) BgSave() *StatusCmd {
+ cmd := NewStatusCmd("bgsave")
+ c.process(cmd)
+ return cmd
+}
+
+func (c *cmdable) ClientKill(ipPort string) *StatusCmd {
+ cmd := NewStatusCmd("client", "kill", ipPort)
+ c.process(cmd)
+ return cmd
+}
+
+// ClientKillByFilter is new style synx, while the ClientKill is old
+// CLIENT KILL