diff --git a/.azdevops/image-pipeline.yml b/.azdevops/image-pipeline.yml
index d8670703..f64731db 100644
--- a/.azdevops/image-pipeline.yml
+++ b/.azdevops/image-pipeline.yml
@@ -1,147 +1,153 @@
-# This pipeline tests the project and builds an image, which is pushed to a
-# container registry for later use
-
-trigger:
-- master
-pr:
-- master
-variables:
-- group: 'Metrics Adapter'
-jobs:
-- job: buildAndTest
- pool:
- vmImage: 'ubuntu-16.04'
- steps:
-
- # Set up the Go workspace
- # Go workspace setup from https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/go?view=azure-devops
- - script: |
- mkdir -p '$(GOBIN)'
- mkdir -p '$(GOPATH)/pkg'
- mkdir -p '$(modulePath)'
- shopt -s extglob
- shopt -s dotglob
- mv !(go) '$(modulePath)'
- echo '##vso[task.prependpath]$(GOBIN)'
- echo '##vso[task.prependpath]$(GOROOT)/bin'
- cd $(modulePath)
- go get -v -t -d ./...
- if [ -f Gopkg.toml ]; then
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
- fi
- displayName: 'Set up the Go workspace'
-
- # make build
- - script: 'make build'
- workingDirectory: '$(modulePath)'
- displayName: 'make build (Verify autogenerated scripts/Run tests/Build image)'
- env:
- VERSION: $(Build.BuildId)
-
- # docker push
- - script: 'make push'
- workingDirectory: '$(modulePath)'
- displayName: 'make push (docker push)'
- env:
- VERSION: $(Build.BuildId)
- DOCKER_PASS: $(DOCKER_PASS)
-
-- job: e2eTests
- dependsOn: buildAndTest
-
- # This will run this job 4 times with these 4 sets of variables
- strategy:
- matrix:
- K8s10:
- SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-10'
- SERVICEBUS_QUEUE_NAME: 'externalq-10'
- DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
- KUBERNETES_VERSION: '1.10.12'
- K8s11:
- SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-11'
- SERVICEBUS_QUEUE_NAME: 'externalq-11'
- DOCKER_VERSION: '17.03.3~ce-0~ubuntu-xenial'
- KUBERNETES_VERSION: '1.11.6'
- K8s12:
- SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-12'
- SERVICEBUS_QUEUE_NAME: 'externalq-12'
- DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
- KUBERNETES_VERSION: '1.12.4'
- K8s13:
- SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-13'
- SERVICEBUS_QUEUE_NAME: 'externalq-13'
- DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
- KUBERNETES_VERSION: '1.13.1'
-
- pool:
- vmImage: 'ubuntu-16.04'
- steps:
-
- # Print version details
- - script: |
- echo "This build uses the following versions:"
- echo "Kubernetes: $(KUBERNETES_VERSION)"
- echo "Minikube: $(MINIKUBE_VERSION)"
- echo "Helm: $(HELM_VERSION)"
- echo "Docker: $(DOCKER_VERSION)"
- displayName: 'Print version details'
-
- # Set up the Go workspace
- # Go workspace setup from https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/go?view=azure-devops
- - script: |
- mkdir -p '$(GOBIN)'
- mkdir -p '$(GOPATH)/pkg'
- mkdir -p '$(modulePath)'
- shopt -s extglob
- shopt -s dotglob
- mv !(go) '$(modulePath)'
- echo '##vso[task.prependpath]$(GOBIN)'
- echo '##vso[task.prependpath]$(GOROOT)/bin'
- cd $(modulePath)
- go get -v -t -d ./...
- if [ -f Gopkg.toml ]; then
- curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
- fi
- displayName: 'Set up the Go workspace'
-
- # Install docker
- - script: |
- chmod +x *.sh
- ./install-docker.sh
- workingDirectory: '$(modulePath)/.azdevops/0_install'
- displayName: 'Install docker (must continue on error)'
-
- # Install everything else needed
- - script: |
- ./install-misc.sh
- ./install-crictl.sh
- ./install-minikube.sh
- ./install-helm.sh
- workingDirectory: '$(modulePath)/.azdevops/0_install'
- displayName: 'Install everything else'
-
- # Start the cluster, get go packages, install tiller, set up metrics server, gen the local values file
- - script: |
- chmod +x *.sh
- ./start-cluster.sh
- ./go-get.sh
- ./check-cluster.sh
- ./init-tiller.sh
- ./gen-values-file.sh
- workingDirectory: '$(modulePath)/.azdevops/1_setup'
- displayName: 'Cluster set up, Go get, prep for adapter deployment'
- env:
- VERSION: $(Build.BuildId)
-
- # Run hack/test-e2e.sh
- - script: |
- chmod +x *.sh
- ./run-e2e.sh
- workingDirectory: '$(modulePath)/hack'
- displayName: 'Run e2e test script (deploy adapter & test functionality)'
- failOnStderr: true
- env:
- SERVICEBUS_CONNECTION_STRING: $(SERVICEBUS_CONNECTION_STRING)
- SP_CLIENT_ID: $(SP_CLIENT_ID)
- SP_TENANT_ID: $(SP_TENANT_ID)
+# This pipeline tests the project and builds an image, which is pushed to a
+# container registry for later use
+
+trigger:
+- master
+pr:
+- master
+variables:
+- group: 'Metrics Adapter'
+jobs:
+- job: buildAndTest
+ pool:
+ vmImage: 'ubuntu-16.04'
+ steps:
+
+ # Set up the Go workspace
+ # Go workspace setup from https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/go?view=azure-devops
+ - script: |
+ mkdir -p '$(GOBIN)'
+ mkdir -p '$(GOPATH)/pkg'
+ mkdir -p '$(modulePath)'
+ shopt -s extglob
+ shopt -s dotglob
+ mv !(go) '$(modulePath)'
+ echo '##vso[task.prependpath]$(GOBIN)'
+ echo '##vso[task.prependpath]$(GOROOT)/bin'
+ cd $(modulePath)
+ go get -v -t -d ./...
+ if [ -f Gopkg.toml ]; then
+ curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
+ fi
+ displayName: 'Set up the Go workspace'
+
+ # make build
+ - script: 'make build'
+ workingDirectory: '$(modulePath)'
+ displayName: 'make build (Verify autogenerated scripts/Run tests/Build image)'
+ env:
+ VERSION: $(Build.BuildId)
+ GO111MODULE: on
+
+ # docker push
+ - script: 'make push'
+ workingDirectory: '$(modulePath)'
+ displayName: 'make push (docker push)'
+ env:
+ VERSION: $(Build.BuildId)
+ DOCKER_PASS: $(DOCKER_PASS)
+
+- job: e2eTests
+ dependsOn: buildAndTest
+
+ # This will run this job 4 times with these 4 sets of variables
+ strategy:
+ matrix:
+ K8s11:
+ SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-11'
+ SERVICEBUS_QUEUE_NAME: 'externalq-11'
+ DOCKER_VERSION: '17.03.3~ce-0~ubuntu-xenial'
+ KUBERNETES_VERSION: '1.11.10'
+ K8s12:
+ SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-12'
+ SERVICEBUS_QUEUE_NAME: 'externalq-12'
+ DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
+ KUBERNETES_VERSION: '1.12.4'
+ K8s13:
+ SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-13'
+ SERVICEBUS_QUEUE_NAME: 'externalq-13'
+ DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
+ KUBERNETES_VERSION: '1.13.1'
+ K8s14:
+ SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-14'
+ SERVICEBUS_QUEUE_NAME: 'externalq-14'
+ DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
+ KUBERNETES_VERSION: '1.14.8'
+ K8s15:
+ SERVICEBUS_SUBSCRIPTION_NAME: 'externalsub-15'
+ SERVICEBUS_QUEUE_NAME: 'externalq-15'
+ DOCKER_VERSION: '18.06.1~ce~3-0~ubuntu'
+ KUBERNETES_VERSION: '1.15.5'
+
+ pool:
+ vmImage: 'ubuntu-16.04'
+ steps:
+
+ # Print version details
+ - script: |
+ echo "This build uses the following versions:"
+ echo "Kubernetes: $(KUBERNETES_VERSION)"
+ echo "Minikube: $(MINIKUBE_VERSION)"
+ echo "Helm: $(HELM_VERSION)"
+ echo "Docker: $(DOCKER_VERSION)"
+ displayName: 'Print version details'
+
+ # Set up the Go workspace
+ # Go workspace setup from https://docs.microsoft.com/en-us/azure/devops/pipelines/languages/go?view=azure-devops
+ - script: |
+ mkdir -p '$(GOBIN)'
+ mkdir -p '$(GOPATH)/pkg'
+ mkdir -p '$(modulePath)'
+ shopt -s extglob
+ shopt -s dotglob
+ mv !(go) '$(modulePath)'
+ echo '##vso[task.prependpath]$(GOBIN)'
+ echo '##vso[task.prependpath]$(GOROOT)/bin'
+ cd $(modulePath)
+ go get -v -t -d ./...
+ if [ -f Gopkg.toml ]; then
+ curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh
+ fi
+ displayName: 'Set up the Go workspace'
+
+ # Install docker
+ - script: |
+ chmod +x *.sh
+ ./install-docker.sh
+ workingDirectory: '$(modulePath)/.azdevops/0_install'
+ displayName: 'Install docker (must continue on error)'
+
+ # Install everything else needed
+ - script: |
+ ./install-misc.sh
+ ./install-crictl.sh
+ ./install-minikube.sh
+ ./install-helm.sh
+ workingDirectory: '$(modulePath)/.azdevops/0_install'
+ displayName: 'Install everything else'
+
+ # Start the cluster, get go packages, install tiller, set up metrics server, gen the local values file
+ - script: |
+ chmod +x *.sh
+ ./start-cluster.sh
+ ./go-get.sh
+ ./check-cluster.sh
+ ./init-tiller.sh
+ ./gen-values-file.sh
+ workingDirectory: '$(modulePath)/.azdevops/1_setup'
+ displayName: 'Cluster set up, Go get, prep for adapter deployment'
+ env:
+ VERSION: $(Build.BuildId)
+
+ # Run hack/test-e2e.sh
+ - script: |
+ chmod +x *.sh
+ ./run-e2e.sh
+ workingDirectory: '$(modulePath)/hack'
+ displayName: 'Run e2e test script (deploy adapter & test functionality)'
+ failOnStderr: true
+ env:
+ SERVICEBUS_CONNECTION_STRING: $(SERVICEBUS_CONNECTION_STRING)
+ SP_CLIENT_ID: $(SP_CLIENT_ID)
+ SP_TENANT_ID: $(SP_TENANT_ID)
SP_CLIENT_SECRET: $(SP_CLIENT_SECRET)
\ No newline at end of file
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 78d95167..b489e841 100755
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -5,7 +5,7 @@ version: 2
jobs:
build:
docker:
- - image: circleci/golang:1.10.3
+ - image: circleci/golang:1.12.12
working_directory: /go/src/github.com/Azure/azure-k8s-metrics-adapter
steps:
- checkout
@@ -13,6 +13,7 @@ jobs:
- run: |
echo 'export VERSION="$CIRCLE_WORKFLOW_ID"' >> $BASH_ENV
echo 'export IMAGE="public/k8s/metrics/adapter"' >> $BASH_ENV
+ echo 'export GO111MODULE=on' >> $BASH_ENV
- run:
name: install helm
command: .circleci/install-helm.sh
@@ -27,18 +28,20 @@ jobs:
- app.tar
build_pr:
docker:
- - image: circleci/golang:1.10.3
+ - image: circleci/golang:1.12.12
working_directory: /go/src/github.com/Azure/azure-k8s-metrics-adapter
steps:
- checkout
- setup_remote_docker
+ - run: |
+ echo 'export GO111MODULE=on' >> $BASH_ENV
- run:
name: install helm
command: .circleci/install-helm.sh
- run: make build
deploy:
docker:
- - image: circleci/golang:1.10.3
+ - image: circleci/golang:1.12.12
working_directory: /go/src/github.com/Azure/azure-k8s-metrics-adapter
steps:
- checkout
@@ -46,6 +49,7 @@ jobs:
- run: |
echo 'export VERSION="$(if [ -z ${CIRCLE_TAG} ]; then echo "beta"; else echo "$CIRCLE_TAG"; fi)-$CIRCLE_BUILD_NUM"' >> $BASH_ENV
echo 'export IMAGE="public/k8s/metrics/adapter"' >> $BASH_ENV
+ echo 'export GO111MODULE=on' >> $BASH_ENV
- restore_cache:
keys:
- v1-{{ .Environment.CIRCLE_WORKFLOW_ID }}
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 6c2969a8..69a9624a 100755
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -16,16 +16,26 @@ or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any addi
## Development
To do development you will need:
-- [Golang](https://golang.org/doc/install) - same as current [Kubernetes version ](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#go)
+- [Golang](https://golang.org/doc/install) - atleast 1.12 - but same as current [Kubernetes version ](https://github.com/kubernetes/community/blob/master/contributors/devel/development.md#go)
- Kubernetes cluster - [minikube](https://github.com/kubernetes/minikube), [Docker for Mac with Kubernetes support](https://docs.docker.com/docker-for-mac/kubernetes/), [Docker for Windows with Kubernetes support](https://docs.docker.com/docker-for-windows/kubernetes/), [AKS](https://docs.microsoft.com/en-us/azure/aks/kubernetes-walkthrough)
- [git](https://git-scm.com/downloads)
- [mercurial](https://www.mercurial-scm.org/downloads)
### Get the source
+This project uses [go modules](https://blog.golang.org/using-go-modules).
+
+If add the project to your `GOPATH` enable go modules before building.
```bash
go get github.com/Azure/azure-k8s-metrics-adapter
cd $GOPATH/src/github.com/Azure/azure-k8s-metrics-adapter
+export GO111MODULE=on
+```
+
+Otherwise you can clone outside the project go modules will automatically be used if using go 1.12:
+
+```
+git clone https://github.com/Azure/azure-k8s-metrics-adapter.git
```
### Add your fork
diff --git a/Dockerfile b/Dockerfile
index b51188d0..24bec485 100755
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM golang:1.11.11-alpine3.10 as builder
+FROM golang:1.12.12-alpine3.10 as builder
WORKDIR /go/src/github.com/Azure/azure-k8s-metrics-adapter
COPY . .
diff --git a/Gopkg.lock b/Gopkg.lock
deleted file mode 100644
index 1f6809bb..00000000
--- a/Gopkg.lock
+++ /dev/null
@@ -1,901 +0,0 @@
-# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
-
-
-[[projects]]
- branch = "default"
- name = "bitbucket.org/ww/goautoneg"
- packages = ["."]
- revision = "75cd24fc2f2c2a2088577d12123ddee5f54e0675"
-
-[[projects]]
- name = "github.com/Azure/azure-sdk-for-go"
- packages = [
- "services/appinsights/v1/insights",
- "services/preview/monitor/mgmt/2018-03-01/insights",
- "services/servicebus/mgmt/2017-04-01/servicebus",
- "version"
- ]
- revision = "d3bcaa706ac10b18784811ff31e43be635159013"
- version = "v21.0.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/Azure/go-ansiterm"
- packages = [
- ".",
- "winterm"
- ]
- revision = "d6e3b3328b783f23731bc4d058875b0371ff8109"
-
-[[projects]]
- name = "github.com/Azure/go-autorest"
- packages = [
- "autorest",
- "autorest/adal",
- "autorest/azure",
- "autorest/azure/auth",
- "autorest/date",
- "autorest/to",
- "autorest/validation",
- "logger",
- "version"
- ]
- revision = "a88c19ef2016e095f0b6c3b451074b4663f53bed"
- version = "v10.15.4"
-
-[[projects]]
- name = "github.com/NYTimes/gziphandler"
- packages = ["."]
- revision = "2600fb119af974220d3916a5916d6e31176aac1b"
- version = "v1.0.1"
-
-[[projects]]
- name = "github.com/PuerkitoBio/purell"
- packages = ["."]
- revision = "0bcb03f4b4d0a9428594752bd2a3b9aa0a9d4bd4"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/PuerkitoBio/urlesc"
- packages = ["."]
- revision = "de5bf2ad457846296e2031421a34e2568e304e35"
-
-[[projects]]
- name = "github.com/Sirupsen/logrus"
- packages = ["."]
- revision = "bcd833dfe83d3cebad139e4a29ed79cb2318bf95"
- version = "v1.2.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/beorn7/perks"
- packages = ["quantile"]
- revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
-
-[[projects]]
- name = "github.com/coreos/etcd"
- packages = [
- "auth/authpb",
- "client",
- "clientv3",
- "etcdserver/api/v3rpc/rpctypes",
- "etcdserver/etcdserverpb",
- "mvcc/mvccpb",
- "pkg/pathutil",
- "pkg/srv",
- "pkg/tlsutil",
- "pkg/transport",
- "pkg/types",
- "version"
- ]
- revision = "33245c6b5b49130ca99280408fadfab01aac0e48"
- version = "v3.3.8"
-
-[[projects]]
- name = "github.com/coreos/go-semver"
- packages = ["semver"]
- revision = "8ab6407b697782a06568d4b7f1db25550ec2e4c6"
- version = "v0.2.0"
-
-[[projects]]
- name = "github.com/coreos/go-systemd"
- packages = ["daemon"]
- revision = "39ca1b05acc7ad1220e09f133283b8859a8b71ab"
- version = "v17"
-
-[[projects]]
- name = "github.com/davecgh/go-spew"
- packages = ["spew"]
- revision = "346938d642f2ec3594ed81d874461961cd0faa76"
- version = "v1.1.0"
-
-[[projects]]
- name = "github.com/dgrijalva/jwt-go"
- packages = ["."]
- revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
- version = "v3.2.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/dimchansky/utfbom"
- packages = ["."]
- revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c"
-
-[[projects]]
- name = "github.com/docker/docker"
- packages = [
- "pkg/term",
- "pkg/term/windows"
- ]
- revision = "092cba3727bb9b4a2f0e922cd6c0f93ea270e363"
- version = "v1.13.1"
-
-[[projects]]
- name = "github.com/elazarl/go-bindata-assetfs"
- packages = ["."]
- revision = "30f82fa23fd844bd5bb1e5f216db87fd77b5eb43"
- version = "v1.0.0"
-
-[[projects]]
- name = "github.com/emicklei/go-restful"
- packages = [
- ".",
- "log"
- ]
- revision = "3eb9738c1697594ea6e71a7156a9bb32ed216cf0"
- version = "v2.8.0"
-
-[[projects]]
- name = "github.com/emicklei/go-restful-swagger12"
- packages = ["."]
- revision = "dcef7f55730566d41eae5db10e7d6981829720f6"
- version = "1.0.1"
-
-[[projects]]
- name = "github.com/evanphx/json-patch"
- packages = ["."]
- revision = "afac545df32f2287a079e2dfb7ba2745a643747e"
- version = "v3.0.0"
-
-[[projects]]
- name = "github.com/ghodss/yaml"
- packages = ["."]
- revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
- version = "v1.0.0"
-
-[[projects]]
- name = "github.com/go-openapi/jsonpointer"
- packages = ["."]
- revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2"
- version = "0.15.0"
-
-[[projects]]
- name = "github.com/go-openapi/jsonreference"
- packages = ["."]
- revision = "3fb327e6747da3043567ee86abd02bb6376b6be2"
- version = "0.15.0"
-
-[[projects]]
- name = "github.com/go-openapi/spec"
- packages = ["."]
- revision = "bce47c9386f9ecd6b86f450478a80103c3fe1402"
- version = "0.15.0"
-
-[[projects]]
- name = "github.com/go-openapi/swag"
- packages = ["."]
- revision = "2b0bd4f193d011c203529df626a65d63cb8a79e8"
- version = "0.15.0"
-
-[[projects]]
- name = "github.com/gogo/protobuf"
- packages = [
- "gogoproto",
- "proto",
- "protoc-gen-gogo/descriptor",
- "sortkeys"
- ]
- revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
- version = "v1.1.1"
-
-[[projects]]
- branch = "master"
- name = "github.com/golang/glog"
- packages = ["."]
- revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
-
-[[projects]]
- name = "github.com/golang/protobuf"
- packages = [
- "proto",
- "ptypes",
- "ptypes/any",
- "ptypes/duration",
- "ptypes/timestamp"
- ]
- revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
- version = "v1.1.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/google/btree"
- packages = ["."]
- revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
-
-[[projects]]
- branch = "master"
- name = "github.com/google/gofuzz"
- packages = ["."]
- revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
-
-[[projects]]
- name = "github.com/googleapis/gnostic"
- packages = [
- "OpenAPIv2",
- "compiler",
- "extensions"
- ]
- revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
- version = "v0.2.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/gregjones/httpcache"
- packages = [
- ".",
- "diskcache"
- ]
- revision = "9cad4c3443a7200dd6400aef47183728de563a38"
-
-[[projects]]
- name = "github.com/grpc-ecosystem/go-grpc-prometheus"
- packages = ["."]
- revision = "c225b8c3b01faf2899099b768856a9e916e5087b"
- version = "v1.2.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/hashicorp/golang-lru"
- packages = [
- ".",
- "simplelru"
- ]
- revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
-
-[[projects]]
- name = "github.com/imdario/mergo"
- packages = ["."]
- revision = "9316a62528ac99aaecb4e47eadd6dc8aa6533d58"
- version = "v0.3.5"
-
-[[projects]]
- name = "github.com/json-iterator/go"
- packages = ["."]
- revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
- version = "1.1.4"
-
-[[projects]]
- name = "github.com/konsorten/go-windows-terminal-sequences"
- packages = ["."]
- revision = "5c8c8bd35d3832f5d134ae1e1e375b69a4d25242"
- version = "v1.0.1"
-
-[[projects]]
- name = "github.com/kubernetes-incubator/custom-metrics-apiserver"
- packages = [
- "pkg/apiserver",
- "pkg/apiserver/installer",
- "pkg/cmd",
- "pkg/cmd/server",
- "pkg/dynamicmapper",
- "pkg/provider",
- "pkg/provider/helpers",
- "pkg/registry/custom_metrics",
- "pkg/registry/external_metrics"
- ]
- revision = "bb8bae16c5550f2aeef3151259a1b36078a0e544"
-
-[[projects]]
- branch = "master"
- name = "github.com/mailru/easyjson"
- packages = [
- "buffer",
- "jlexer",
- "jwriter"
- ]
- revision = "efc7eb8984d6655c26b5c9d2e65c024e5767c37c"
-
-[[projects]]
- name = "github.com/matttproud/golang_protobuf_extensions"
- packages = ["pbutil"]
- revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
- version = "v1.0.1"
-
-[[projects]]
- name = "github.com/modern-go/concurrent"
- packages = ["."]
- revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
- version = "1.0.3"
-
-[[projects]]
- name = "github.com/modern-go/reflect2"
- packages = ["."]
- revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
- version = "1.0.1"
-
-[[projects]]
- name = "github.com/pborman/uuid"
- packages = ["."]
- revision = "e790cca94e6cc75c7064b1332e63811d4aae1a53"
- version = "v1.1"
-
-[[projects]]
- branch = "master"
- name = "github.com/petar/GoLLRB"
- packages = ["llrb"]
- revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
-
-[[projects]]
- name = "github.com/peterbourgon/diskv"
- packages = ["."]
- revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
- version = "v2.0.1"
-
-[[projects]]
- name = "github.com/prometheus/client_golang"
- packages = ["prometheus"]
- revision = "c5b7fccd204277076155f10851dad72b76a49317"
- version = "v0.8.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/prometheus/client_model"
- packages = ["go"]
- revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
-
-[[projects]]
- branch = "master"
- name = "github.com/prometheus/common"
- packages = [
- "expfmt",
- "internal/bitbucket.org/ww/goautoneg",
- "model"
- ]
- revision = "7600349dcfe1abd18d72d3a1770870d9800a7801"
-
-[[projects]]
- branch = "master"
- name = "github.com/prometheus/procfs"
- packages = [
- ".",
- "internal/util",
- "nfs",
- "xfs"
- ]
- revision = "ae68e2d4c00fed4943b5f6698d504a5fe083da8a"
-
-[[projects]]
- name = "github.com/spf13/pflag"
- packages = ["."]
- revision = "583c0c0531f06d5278b7d917446061adc344b5cd"
- version = "v1.0.1"
-
-[[projects]]
- name = "github.com/ugorji/go"
- packages = ["codec"]
- revision = "b4c50a2b199d93b13dc15e78929cfb23bfdf21ab"
- version = "v1.1.1"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/crypto"
- packages = [
- "pkcs12",
- "pkcs12/internal/rc2",
- "ssh/terminal"
- ]
- revision = "a2144134853fc9a27a7b1e3eb4f19f1a76df13c9"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/net"
- packages = [
- "context",
- "context/ctxhttp",
- "http/httpguts",
- "http2",
- "http2/hpack",
- "idna",
- "internal/timeseries",
- "trace",
- "websocket"
- ]
- revision = "a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/oauth2"
- packages = [
- ".",
- "internal"
- ]
- revision = "d668ce993890a79bda886613ee587a69dd5da7a6"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/sys"
- packages = [
- "unix",
- "windows"
- ]
- revision = "ac767d655b305d4e9612f5f6e33120b9176c4ad4"
-
-[[projects]]
- name = "golang.org/x/text"
- packages = [
- "collate",
- "collate/build",
- "internal/colltab",
- "internal/gen",
- "internal/tag",
- "internal/triegen",
- "internal/ucd",
- "language",
- "secure/bidirule",
- "transform",
- "unicode/bidi",
- "unicode/cldr",
- "unicode/norm",
- "unicode/rangetable",
- "width"
- ]
- revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
- version = "v0.3.0"
-
-[[projects]]
- branch = "master"
- name = "golang.org/x/time"
- packages = ["rate"]
- revision = "fbb02b2291d28baffd63558aa44b4b56f178d650"
-
-[[projects]]
- name = "google.golang.org/appengine"
- packages = [
- "internal",
- "internal/base",
- "internal/datastore",
- "internal/log",
- "internal/remote_api",
- "internal/urlfetch",
- "urlfetch"
- ]
- revision = "e9657d882bb81064595ca3b56cbe2546bbabf7b1"
- version = "v1.4.0"
-
-[[projects]]
- branch = "master"
- name = "google.golang.org/genproto"
- packages = ["googleapis/rpc/status"]
- revision = "02b4e95473316948020af0b7a4f0f22c73929b0e"
-
-[[projects]]
- name = "google.golang.org/grpc"
- packages = [
- ".",
- "balancer",
- "balancer/base",
- "balancer/roundrobin",
- "codes",
- "connectivity",
- "credentials",
- "encoding",
- "encoding/proto",
- "grpclog",
- "health/grpc_health_v1",
- "internal",
- "internal/backoff",
- "internal/channelz",
- "internal/grpcrand",
- "keepalive",
- "metadata",
- "naming",
- "peer",
- "resolver",
- "resolver/dns",
- "resolver/passthrough",
- "stats",
- "status",
- "tap",
- "transport"
- ]
- revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
- version = "v1.13.0"
-
-[[projects]]
- name = "gopkg.in/inf.v0"
- packages = ["."]
- revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
- version = "v0.9.1"
-
-[[projects]]
- name = "gopkg.in/natefinch/lumberjack.v2"
- packages = ["."]
- revision = "a96e63847dc3c67d17befa69c303767e2f84e54f"
- version = "v2.1"
-
-[[projects]]
- name = "gopkg.in/yaml.v2"
- packages = ["."]
- revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
- version = "v2.2.1"
-
-[[projects]]
- branch = "master"
- name = "k8s.io/api"
- packages = [
- "admission/v1beta1",
- "admissionregistration/v1alpha1",
- "admissionregistration/v1beta1",
- "apps/v1",
- "apps/v1beta1",
- "apps/v1beta2",
- "authentication/v1",
- "authentication/v1beta1",
- "authorization/v1",
- "authorization/v1beta1",
- "autoscaling/v1",
- "autoscaling/v2beta1",
- "autoscaling/v2beta2",
- "batch/v1",
- "batch/v1beta1",
- "batch/v2alpha1",
- "certificates/v1beta1",
- "coordination/v1beta1",
- "core/v1",
- "events/v1beta1",
- "extensions/v1beta1",
- "networking/v1",
- "policy/v1beta1",
- "rbac/v1",
- "rbac/v1alpha1",
- "rbac/v1beta1",
- "scheduling/v1alpha1",
- "scheduling/v1beta1",
- "settings/v1alpha1",
- "storage/v1",
- "storage/v1alpha1",
- "storage/v1beta1"
- ]
- revision = "173ce66c1e39d1d0f56e0b3347ff2988068aecd0"
-
-[[projects]]
- branch = "release-1.12"
- name = "k8s.io/apimachinery"
- packages = [
- "pkg/api/equality",
- "pkg/api/errors",
- "pkg/api/meta",
- "pkg/api/resource",
- "pkg/api/validation",
- "pkg/api/validation/path",
- "pkg/apis/meta/internalversion",
- "pkg/apis/meta/v1",
- "pkg/apis/meta/v1/unstructured",
- "pkg/apis/meta/v1/validation",
- "pkg/apis/meta/v1beta1",
- "pkg/conversion",
- "pkg/conversion/queryparams",
- "pkg/fields",
- "pkg/labels",
- "pkg/runtime",
- "pkg/runtime/schema",
- "pkg/runtime/serializer",
- "pkg/runtime/serializer/json",
- "pkg/runtime/serializer/protobuf",
- "pkg/runtime/serializer/recognizer",
- "pkg/runtime/serializer/streaming",
- "pkg/runtime/serializer/versioning",
- "pkg/selection",
- "pkg/types",
- "pkg/util/cache",
- "pkg/util/clock",
- "pkg/util/diff",
- "pkg/util/errors",
- "pkg/util/framer",
- "pkg/util/intstr",
- "pkg/util/json",
- "pkg/util/mergepatch",
- "pkg/util/naming",
- "pkg/util/net",
- "pkg/util/rand",
- "pkg/util/runtime",
- "pkg/util/sets",
- "pkg/util/strategicpatch",
- "pkg/util/uuid",
- "pkg/util/validation",
- "pkg/util/validation/field",
- "pkg/util/wait",
- "pkg/util/waitgroup",
- "pkg/util/yaml",
- "pkg/version",
- "pkg/watch",
- "third_party/forked/golang/json",
- "third_party/forked/golang/reflect"
- ]
- revision = "49ce2735e5074ffc3f8190c8406cf51a96302dad"
-
-[[projects]]
- branch = "release-1.12"
- name = "k8s.io/apiserver"
- packages = [
- "pkg/admission",
- "pkg/admission/configuration",
- "pkg/admission/initializer",
- "pkg/admission/metrics",
- "pkg/admission/plugin/initialization",
- "pkg/admission/plugin/namespace/lifecycle",
- "pkg/admission/plugin/webhook/config",
- "pkg/admission/plugin/webhook/config/apis/webhookadmission",
- "pkg/admission/plugin/webhook/config/apis/webhookadmission/v1alpha1",
- "pkg/admission/plugin/webhook/errors",
- "pkg/admission/plugin/webhook/generic",
- "pkg/admission/plugin/webhook/mutating",
- "pkg/admission/plugin/webhook/namespace",
- "pkg/admission/plugin/webhook/request",
- "pkg/admission/plugin/webhook/rules",
- "pkg/admission/plugin/webhook/util",
- "pkg/admission/plugin/webhook/validating",
- "pkg/apis/apiserver",
- "pkg/apis/apiserver/install",
- "pkg/apis/apiserver/v1alpha1",
- "pkg/apis/audit",
- "pkg/apis/audit/install",
- "pkg/apis/audit/v1",
- "pkg/apis/audit/v1alpha1",
- "pkg/apis/audit/v1beta1",
- "pkg/apis/audit/validation",
- "pkg/audit",
- "pkg/audit/policy",
- "pkg/authentication/authenticator",
- "pkg/authentication/authenticatorfactory",
- "pkg/authentication/group",
- "pkg/authentication/request/anonymous",
- "pkg/authentication/request/bearertoken",
- "pkg/authentication/request/headerrequest",
- "pkg/authentication/request/union",
- "pkg/authentication/request/websocket",
- "pkg/authentication/request/x509",
- "pkg/authentication/serviceaccount",
- "pkg/authentication/token/tokenfile",
- "pkg/authentication/user",
- "pkg/authorization/authorizer",
- "pkg/authorization/authorizerfactory",
- "pkg/authorization/path",
- "pkg/authorization/union",
- "pkg/endpoints",
- "pkg/endpoints/discovery",
- "pkg/endpoints/filters",
- "pkg/endpoints/handlers",
- "pkg/endpoints/handlers/negotiation",
- "pkg/endpoints/handlers/responsewriters",
- "pkg/endpoints/metrics",
- "pkg/endpoints/openapi",
- "pkg/endpoints/request",
- "pkg/features",
- "pkg/registry/generic",
- "pkg/registry/generic/registry",
- "pkg/registry/rest",
- "pkg/server",
- "pkg/server/filters",
- "pkg/server/healthz",
- "pkg/server/httplog",
- "pkg/server/mux",
- "pkg/server/options",
- "pkg/server/resourceconfig",
- "pkg/server/routes",
- "pkg/server/routes/data/swagger",
- "pkg/server/storage",
- "pkg/storage",
- "pkg/storage/cacher",
- "pkg/storage/errors",
- "pkg/storage/etcd",
- "pkg/storage/etcd/metrics",
- "pkg/storage/etcd/util",
- "pkg/storage/etcd3",
- "pkg/storage/names",
- "pkg/storage/storagebackend",
- "pkg/storage/storagebackend/factory",
- "pkg/storage/value",
- "pkg/util/dryrun",
- "pkg/util/feature",
- "pkg/util/flag",
- "pkg/util/flushwriter",
- "pkg/util/logs",
- "pkg/util/openapi",
- "pkg/util/trace",
- "pkg/util/webhook",
- "pkg/util/wsstream",
- "plugin/pkg/audit/buffered",
- "plugin/pkg/audit/log",
- "plugin/pkg/audit/truncate",
- "plugin/pkg/audit/webhook",
- "plugin/pkg/authenticator/token/webhook",
- "plugin/pkg/authorizer/webhook"
- ]
- revision = "9601a7bf41efece7e12a8f9f74d2c3b10cdd998e"
-
-[[projects]]
- branch = "release-9.0"
- name = "k8s.io/client-go"
- packages = [
- "discovery",
- "discovery/fake",
- "dynamic",
- "dynamic/fake",
- "informers",
- "informers/admissionregistration",
- "informers/admissionregistration/v1alpha1",
- "informers/admissionregistration/v1beta1",
- "informers/apps",
- "informers/apps/v1",
- "informers/apps/v1beta1",
- "informers/apps/v1beta2",
- "informers/autoscaling",
- "informers/autoscaling/v1",
- "informers/autoscaling/v2beta1",
- "informers/autoscaling/v2beta2",
- "informers/batch",
- "informers/batch/v1",
- "informers/batch/v1beta1",
- "informers/batch/v2alpha1",
- "informers/certificates",
- "informers/certificates/v1beta1",
- "informers/coordination",
- "informers/coordination/v1beta1",
- "informers/core",
- "informers/core/v1",
- "informers/events",
- "informers/events/v1beta1",
- "informers/extensions",
- "informers/extensions/v1beta1",
- "informers/internalinterfaces",
- "informers/networking",
- "informers/networking/v1",
- "informers/policy",
- "informers/policy/v1beta1",
- "informers/rbac",
- "informers/rbac/v1",
- "informers/rbac/v1alpha1",
- "informers/rbac/v1beta1",
- "informers/scheduling",
- "informers/scheduling/v1alpha1",
- "informers/scheduling/v1beta1",
- "informers/settings",
- "informers/settings/v1alpha1",
- "informers/storage",
- "informers/storage/v1",
- "informers/storage/v1alpha1",
- "informers/storage/v1beta1",
- "kubernetes",
- "kubernetes/scheme",
- "kubernetes/typed/admissionregistration/v1alpha1",
- "kubernetes/typed/admissionregistration/v1beta1",
- "kubernetes/typed/apps/v1",
- "kubernetes/typed/apps/v1beta1",
- "kubernetes/typed/apps/v1beta2",
- "kubernetes/typed/authentication/v1",
- "kubernetes/typed/authentication/v1beta1",
- "kubernetes/typed/authorization/v1",
- "kubernetes/typed/authorization/v1beta1",
- "kubernetes/typed/autoscaling/v1",
- "kubernetes/typed/autoscaling/v2beta1",
- "kubernetes/typed/autoscaling/v2beta2",
- "kubernetes/typed/batch/v1",
- "kubernetes/typed/batch/v1beta1",
- "kubernetes/typed/batch/v2alpha1",
- "kubernetes/typed/certificates/v1beta1",
- "kubernetes/typed/coordination/v1beta1",
- "kubernetes/typed/core/v1",
- "kubernetes/typed/events/v1beta1",
- "kubernetes/typed/extensions/v1beta1",
- "kubernetes/typed/networking/v1",
- "kubernetes/typed/policy/v1beta1",
- "kubernetes/typed/rbac/v1",
- "kubernetes/typed/rbac/v1alpha1",
- "kubernetes/typed/rbac/v1beta1",
- "kubernetes/typed/scheduling/v1alpha1",
- "kubernetes/typed/scheduling/v1beta1",
- "kubernetes/typed/settings/v1alpha1",
- "kubernetes/typed/storage/v1",
- "kubernetes/typed/storage/v1alpha1",
- "kubernetes/typed/storage/v1beta1",
- "listers/admissionregistration/v1alpha1",
- "listers/admissionregistration/v1beta1",
- "listers/apps/v1",
- "listers/apps/v1beta1",
- "listers/apps/v1beta2",
- "listers/autoscaling/v1",
- "listers/autoscaling/v2beta1",
- "listers/autoscaling/v2beta2",
- "listers/batch/v1",
- "listers/batch/v1beta1",
- "listers/batch/v2alpha1",
- "listers/certificates/v1beta1",
- "listers/coordination/v1beta1",
- "listers/core/v1",
- "listers/events/v1beta1",
- "listers/extensions/v1beta1",
- "listers/networking/v1",
- "listers/policy/v1beta1",
- "listers/rbac/v1",
- "listers/rbac/v1alpha1",
- "listers/rbac/v1beta1",
- "listers/scheduling/v1alpha1",
- "listers/scheduling/v1beta1",
- "listers/settings/v1alpha1",
- "listers/storage/v1",
- "listers/storage/v1alpha1",
- "listers/storage/v1beta1",
- "pkg/apis/clientauthentication",
- "pkg/apis/clientauthentication/v1alpha1",
- "pkg/apis/clientauthentication/v1beta1",
- "pkg/version",
- "plugin/pkg/client/auth/exec",
- "rest",
- "rest/watch",
- "restmapper",
- "testing",
- "tools/auth",
- "tools/cache",
- "tools/clientcmd",
- "tools/clientcmd/api",
- "tools/clientcmd/api/latest",
- "tools/clientcmd/api/v1",
- "tools/metrics",
- "tools/pager",
- "tools/reference",
- "transport",
- "util/buffer",
- "util/cert",
- "util/connrotation",
- "util/flowcontrol",
- "util/homedir",
- "util/integer",
- "util/retry",
- "util/workqueue"
- ]
- revision = "5e6a3d4e34f694e895b13ae728111e726a5b69df"
-
-[[projects]]
- branch = "master"
- name = "k8s.io/kube-openapi"
- packages = [
- "pkg/builder",
- "pkg/common",
- "pkg/handler",
- "pkg/util",
- "pkg/util/proto"
- ]
- revision = "d8ea2fe547a448256204cfc68dfee7b26c720acb"
-
-[[projects]]
- branch = "release-1.12"
- name = "k8s.io/metrics"
- packages = [
- "pkg/apis/custom_metrics",
- "pkg/apis/custom_metrics/install",
- "pkg/apis/custom_metrics/v1beta1",
- "pkg/apis/custom_metrics/v1beta2",
- "pkg/apis/external_metrics",
- "pkg/apis/external_metrics/install",
- "pkg/apis/external_metrics/v1beta1"
- ]
- revision = "3954d62a524dad50e7c4bcf91a26eeef011040e2"
-
-[solve-meta]
- analyzer-name = "dep"
- analyzer-version = 1
- inputs-digest = "fa7184f3a501cd91f1bcedacc6dafcf9c39fecd0dfb8557f4249ff99c19f11fe"
- solver-name = "gps-cdcl"
- solver-version = 1
diff --git a/Gopkg.toml b/Gopkg.toml
deleted file mode 100755
index 0e8dfaf3..00000000
--- a/Gopkg.toml
+++ /dev/null
@@ -1,35 +0,0 @@
-# Gopkg.toml
-#
-# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
-# for detailed Gopkg.toml documentation.
-
-ignored = ["github.com/Azure/azure-service-bus-go"]
-
-# Kubernetes incubator deps
-[[constraint]]
- name = "github.com/kubernetes-incubator/custom-metrics-apiserver"
- revision = "bb8bae16c5550f2aeef3151259a1b36078a0e544" #revision number for 1.12.0 release
-
-[[constraint]]
- name = "github.com/Azure/azure-sdk-for-go"
- version = "21.0.0"
-
-[[constraint]]
- name = "k8s.io/apimachinery"
- branch = "release-1.12"
-
-[[constraint]]
- name = "k8s.io/apiserver"
- branch = "release-1.12"
-
-[[constraint]]
- name = "k8s.io/client-go"
- branch = "release-9.0"
-
-[[constraint]]
- name = "k8s.io/metrics"
- branch = "release-1.12"
-
-[prune]
- go-tests = true
- unused-packages = true
\ No newline at end of file
diff --git a/Makefile b/Makefile
index 3d88142c..9a99f00f 100755
--- a/Makefile
+++ b/Makefile
@@ -25,8 +25,8 @@ build-local: test
build: vendor verify-deploy verify-apis
docker build -t $(FULL_IMAGE):$(VERSION) .
-vendor:
- dep ensure -v
+vendor:
+ go mod vendor
test: vendor
hack/run-tests.sh
@@ -74,16 +74,12 @@ tag-ci:
docker tag $(FULL_IMAGE):$(CIRCLE_WORKFLOW_ID) $(FULL_IMAGE):$(VERSION)
# Code gen helpers
-gen-apis: codegen-get
+gen-apis: vendor
hack/update-codegen.sh
-verify-apis: codegen-get
+verify-apis: vendor
hack/verify-codegen.sh
-codegen-get:
- go get -d -u k8s.io/code-generator/...
- hack/codegen-repo-fix.sh
-
# Helm deploy generator helpers
verify-deploy:
hack/verify-deploy.sh
diff --git a/go.mod b/go.mod
new file mode 100644
index 00000000..26867ca6
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,26 @@
+module github.com/Azure/azure-k8s-metrics-adapter
+
+go 1.12
+
+require (
+ github.com/Azure/azure-sdk-for-go v30.1.0+incompatible
+ github.com/Azure/azure-service-bus-go v0.9.1
+ github.com/Azure/go-autorest v12.0.0+incompatible
+ github.com/dimchansky/utfbom v1.1.0 // indirect
+ github.com/emicklei/go-restful v2.2.1+incompatible // indirect
+ github.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305 // indirect
+ github.com/evanphx/json-patch v4.2.0+incompatible // indirect
+ github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190918110929-3d9be26a50eb
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
+ k8s.io/api v0.0.0-20190817021128-e14a4b1f5f84
+ k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d
+ k8s.io/apiserver v0.0.0-20190817022445-fd6150da8f40 // indirect
+ k8s.io/client-go v0.0.0-20190817021527-637fc595d17a
+ k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b
+ k8s.io/component-base v0.0.0-20190817022002-dd0e01d5790f
+ k8s.io/klog v0.3.1
+ k8s.io/metrics v0.0.0-20190817023635-63ee757b2e8b
+
+)
+
+replace github.com/Sirupsen/logrus v1.4.2 => github.com/sirupsen/logrus v1.4.2
diff --git a/go.sum b/go.sum
new file mode 100644
index 00000000..9cae5795
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,537 @@
+bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c h1:t+Ra932MCC0eeyD/vigXqMbZTzgZjd4JOfBJWC6VSMI=
+bitbucket.org/ww/goautoneg v0.0.0-20120707110453-75cd24fc2f2c/go.mod h1:1vhO7Mn/FZMgOgDVGLy5X1mE6rq1HbkBdkF/yj8zkcg=
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
+github.com/Azure/azure-amqp-common-go/v2 v2.1.0 h1:+QbFgmWCnPzdaRMfsI0Yb6GrRdBj5jVL8N3EXuEUcBQ=
+github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
+github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v30.1.0+incompatible h1:HyYPft8wXpxMd0kfLtXo6etWcO+XuPbLkcgx9g2cqxU=
+github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-service-bus-go v0.9.1 h1:G1qBLQvHCFDv9pcpgwgFkspzvnGknJRR0PYJ9ytY/JA=
+github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
+github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
+github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0=
+github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.0.1 h1:iLrQrdwjDd52kHDA5op2UBJFjmOb9g+7scBan4RN8F0=
+github.com/NYTimes/gziphandler v1.0.1/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.0 h1:rmGxhojJlM0tuKtfdvliR84CFHljx9ag64t2xmVkjK4=
+github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
+github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
+github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
+github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0=
+github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
+github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
+github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
+github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/coreos/bbolt v1.3.1-coreos.6/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY=
+github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
+github.com/coreos/etcd v3.3.8+incompatible h1:uDjs0KvLk1mjTf7Ykd42tRsm9EkjCQX37DAmNwb4Kxs=
+github.com/coreos/etcd v3.3.8+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-oidc v0.0.0-20180117170138-065b426bd416/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
+github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY=
+github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 h1:u9SHYsPQNyt5tgDm3YN7+9dYrpK96E5wFilTFWIDZOM=
+github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg=
+github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
+github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
+github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
+github.com/dgrijalva/jwt-go v0.0.0-20160705203006-01aeca54ebda/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/dimchansky/utfbom v1.0.0 h1:fGC2kkf4qOoKqZ4q7iIh+Vef4ubC1c38UDsEyZynZPc=
+github.com/dimchansky/utfbom v1.0.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4=
+github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
+github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
+github.com/docker/docker v1.13.1/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
+github.com/elazarl/go-bindata-assetfs v1.0.0 h1:G/bYguwHIzWq9ZoyUQqrjTmJbbYn3j3CKKpKinvZLFk=
+github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
+github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.2.1+incompatible h1:yreWt49MQDL5ac0Dau9EKE22or+LrHikXVhAqUAXnfk=
+github.com/emicklei/go-restful v2.2.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.8.0+incompatible h1:wN8GCRDPGHguIynsnBartv5GUgGUg1LAU7+xnSn1j7Q=
+github.com/emicklei/go-restful v2.8.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.11.1+incompatible h1:CjKsv3uWcCMvySPQYKxO8XX3f9zD4FeZRsW4G0B4ffE=
+github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305 h1:2vAWk0wMCWb/pYiyat2rRZp5I5ZM+efPlagySNZ3JeM=
+github.com/emicklei/go-restful-swagger12 v0.0.0-20170208215640-dcef7f557305/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ=
+github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v3.0.0+incompatible h1:l91aby7TzBXBdmF8heZqjskeH9f3g7ZOL8/sSe+vTlU=
+github.com/evanphx/json-patch v3.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
+github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
+github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v0.0.0-20180820084758-c7ce16629ff4/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
+github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.0.0-20180322222829-3a0015ad55fa h1:hr8WVDjg4JKtQptZpzyb196TmruCs7PIsdJz8KAOZp8=
+github.com/go-openapi/jsonpointer v0.0.0-20180322222829-3a0015ad55fa/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
+github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.0 h1:FTUMcX77w5rQkClIzDtTxvn6Bsa894CcrzNj2MMfeg8=
+github.com/go-openapi/jsonpointer v0.19.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
+github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0=
+github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
+github.com/go-openapi/jsonpointer v0.19.3 h1:gihV7YNZK1iK6Tgwwsxo2rJbD1GTbdm72325Bq8FI3w=
+github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.0.0-20180322222742-3fb327e6747d h1:k3UQ7Z8yFYq0BNkYykKIheY0HlZBl1Hku+pO9HE9FNU=
+github.com/go-openapi/jsonreference v0.0.0-20180322222742-3fb327e6747d/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
+github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.0 h1:BqWKpV1dFd+AuiKlgtddwVIFQsuMpxfBDBHGfM2yNpk=
+github.com/go-openapi/jsonreference v0.19.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I=
+github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w=
+github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
+github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.0.0-20180710175419-bce47c9386f9 h1:t9ogKTKyAuCXNgecaVQ1moKggJ80VexiBjTNyVNnAAU=
+github.com/go-openapi/spec v0.0.0-20180710175419-bce47c9386f9/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
+github.com/go-openapi/spec v0.17.2 h1:eb2NbuCnoe8cWAxhtK6CfMWUYmiFEZJ9Hx3Z2WRwJ5M=
+github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
+github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE=
+github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY=
+github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo=
+github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
+github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.0.0-20180703152219-2b0bd4f193d0 h1:KOHIkUyLtY/OapQTEisSFx2qbfP5mKIl9OYnBl4Uwd8=
+github.com/go-openapi/swag v0.0.0-20180703152219-2b0bd4f193d0/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
+github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.17.2 h1:K/ycE/XTUDFltNHSO32cGRUhrVGJD64o8WgAIZNyc3k=
+github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
+github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE=
+github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
+github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415 h1:WSBJMqJbLxsn+bTCPyPYZfqHdJmc8MK4wrBjMft6BAM=
+github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo=
+github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I=
+github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8=
+github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9 h1:uHTyIjqVhYRhLbJ8nIiOJHkEZZ+5YoOsAbD3sk82NiE=
+github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20160524151835-7d79101e329e/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck=
+github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
+github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
+github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
+github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
+github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g=
+github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
+github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4=
+github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
+github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM=
+github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
+github.com/gregjones/httpcache v0.0.0-20170728041850-787624de3eb7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM=
+github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
+github.com/grpc-ecosystem/go-grpc-middleware v0.0.0-20190222133341-cfaf5686ec79/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611 h1:f5vL2EW5pL274ztMNnizZAEa457nKyKPEaN/sm/kdBk=
+github.com/grpc-ecosystem/go-grpc-prometheus v0.0.0-20170330212424-2500245aa611/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho=
+github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
+github.com/grpc-ecosystem/grpc-gateway v1.3.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.2 h1:S+ef0492XaIknb8LMjcwgW2i3cNTzDYMmDrOThOJNWc=
+github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
+github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
+github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jonboulle/clockwork v0.0.0-20141017032234-72f9bd7c4e0c/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
+github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be h1:AHimNtVIpiBjPUhEF5KNCkrUyqTSA5zWUl8sQ2bfGBE=
+github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
+github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
+github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
+github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk=
+github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
+github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
+github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20181126213231-bb8bae16c555 h1:chhJD3Em4rN70IP6KmsXFDuowWO9yFlEXfH791L+EBo=
+github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20181126213231-bb8bae16c555/go.mod h1:KWRxWvzVCNvDtG9ejU5UdpgvxdCZFMUZu0xroKWG8Bo=
+github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190918110929-3d9be26a50eb h1:elCqO9RJIK1kVbs9qU0wLiksRrHKwt9P+KMCHA3iVDw=
+github.com/kubernetes-incubator/custom-metrics-apiserver v0.0.0-20190918110929-3d9be26a50eb/go.mod h1:KWRxWvzVCNvDtG9ejU5UdpgvxdCZFMUZu0xroKWG8Bo=
+github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6 h1:8/+Y8SKf0xCZ8cCTfnrMdY7HNzlEjPAt3bPjalNb6CA=
+github.com/mailru/easyjson v0.0.0-20180717111219-efc7eb8984d6/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329 h1:2gxZ0XQIU/5z3Z3bUBu+FXuk2pFbkN6tcwi/pjyaDic=
+github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4=
+github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8=
+github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
+github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
+github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
+github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
+github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
+github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
+github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs=
+github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
+github.com/natefinch/lumberjack v2.0.0+incompatible/go.mod h1:Wi9p2TTF5DG5oU+6YfsmYQpsTIOm0B1VNzQg9Mw6nPk=
+github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.10.2 h1:uqH7bpe+ERSiDa34FDOF7RikN6RzXgduUF8yarlZp94=
+github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
+github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
+github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c h1:MUyE44mTvnI5A0xrxIxaMqoWFzPfQvtE2IWUollMDMs=
+github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34=
+github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
+github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
+github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
+github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
+github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8=
+github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
+github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
+github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8=
+github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
+github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1 h1:osmNoEW2SCW3L7EX0km2LYM8HKpNWRiouxjE3XHkyGc=
+github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275 h1:PnBWHBf+6L0jOqq0gIVUe6Yk0/QMZ640k6NvkxcBf+8=
+github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
+github.com/prometheus/procfs v0.0.0-20180705121852-ae68e2d4c00f h1:c9M4CCa6g8WURSsbrl3lb/w/G1Z5xZpYvhhjdcVDOkE=
+github.com/prometheus/procfs v0.0.0-20180705121852-ae68e2d4c00f/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE=
+github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
+github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo=
+github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
+github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
+github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
+github.com/soheilhy/cmux v0.1.3/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.4 h1:0HKaf1o97UwFjHH9o5XsHUOF+tqmdA7KEzXLpiyaw0E=
+github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.1 h1:aCvUg6QPl3ibpQUxyLkrEkCHtPqYJL4x9AuhqVqFis4=
+github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
+github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/ugorji/go v1.1.1 h1:gmervu+jDMvXTbcHQ0pd2wee85nEoE0BsVyEuzkfK8w=
+github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
+github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8=
+github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
+go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk=
+go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
+go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
+go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
+golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20181025213731-e84da0312774/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8 h1:1wopBVtVdWnn03fZelqdXTqk7U7zPQCb+T4rbU9ZEoU=
+golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
+golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
+golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225 h1:kNX+jCowfMYzvlSvJu5pQWEmyWFrBXJ3PBy10xKMXK8=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b h1:lkjdUzSyJ5P1+eal9fxXX9Xg2BTfswsonKUse48C0uE=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68=
+golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7 h1:fHDIZ2oxGnUZRN6WgWFCbYBjH9uqVPRCUVUDhs0wnbA=
+golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM=
+golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
+golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190620070143-6f217b454f45 h1:Dl2hc890lrizvUppGbRWhnIh2f8jOTCQpY5IKWRS0oM=
+golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20161028155119-f51c12702a4d/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c h1:97SnQk1GYRXJgvwZ8fadnxDOWfKvkNQHH3CtZntPSrM=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 h1:QjA/9ArTfVTLfEhClDCG7SGrZkZixxWpwNCDiwJfh88=
+golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac h1:MQEvx39qSf8vyrx3XRaOe+j1UDIzKwkYOVObRgGPVqI=
+golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 h1:OB/uP/Puiu5vS5QMRPrXCDWUPb+kt8f1KW8oQzFejQw=
+gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0=
+gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
+gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g=
+google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20170731182057-09f6ed296fc6/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 h1:9VBRTdmgQxbs6HE0sUnMrSWNePppAJU07NYvX5dIB04=
+google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/inf.v0 v0.9.0 h1:3zYtXIO92bvsdS3ggAdA8Gb4Azj0YU+TVY1uGYNFA8o=
+gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7 h1:986b60BAz5vO2Vaf48yQaq+wb2bU4JsXxKu1+itW6x8=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0-20150622162204-20b71e5b60d7/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3 h1:AFxeG48hTWHhDTQDk/m2gorfVHUEa9vo3tp3D7TzwjI=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0-20170531160350-a96e63847dc3/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.0.0-20180411045311-89060dee6a84/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE=
+gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+k8s.io/api v0.0.0-20181221193117-173ce66c1e39 h1:iGq7zEPXFb0IeXAQK5RiYT1SVKX/af9F9Wv0M+yudPY=
+k8s.io/api v0.0.0-20181221193117-173ce66c1e39/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
+k8s.io/api v0.0.0-20190817021128-e14a4b1f5f84 h1:iD/UVfEW1pOJpFWFgo6o6V2ZQczJz1XtNBKUFA1386k=
+k8s.io/api v0.0.0-20190817021128-e14a4b1f5f84/go.mod h1:AOxZTnaXR/xiarlQL0JUfwQPxjmKDvVYoRp58cA7lUo=
+k8s.io/apimachinery v0.0.0-20181128191346-49ce2735e507 h1:DRtb2PO1ps3SPBkAUYz9J+rIPvYDZALFAYhWBeyvmcU=
+k8s.io/apimachinery v0.0.0-20181128191346-49ce2735e507/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0=
+k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d h1:7Kns6qqhMAQWvGkxYOLSLRZ5hJO0/5pcE5lPGP2fxUw=
+k8s.io/apimachinery v0.0.0-20190817020851-f2f3a405f61d/go.mod h1:3jediapYqJ2w1BFw7lAZPCx7scubsTfosqHkhXCWJKw=
+k8s.io/apiserver v0.0.0-20181207191401-9601a7bf41ef h1:MREYcVS7G5mUkXTCeqMuE7xEnCaoQFyV89eLqaJsHVo=
+k8s.io/apiserver v0.0.0-20181207191401-9601a7bf41ef/go.mod h1:6bqaTSOSJavUIXUtfaR9Os9JtTCm8ZqH2SUl2S60C4w=
+k8s.io/apiserver v0.0.0-20190817022445-fd6150da8f40 h1:tZJhS/LN/rTBOkFwPah1UDP3KkeAb1rZ8bHxzM8VKmY=
+k8s.io/apiserver v0.0.0-20190817022445-fd6150da8f40/go.mod h1:y8ZdhfMfrk2yoL/NSdeWl5dAfrXIbRQy8jqP/R7s41k=
+k8s.io/apiserver v0.0.0-20191026070530-d1b1b64dd924 h1:JHCK9+vhv4tGEJLraxgG7DrDeAjt5V0S9wsOfsdWtwU=
+k8s.io/client-go v0.0.0-20181205110444-5e6a3d4e34f6 h1:ECkNH3KZv9LSrnadcdkZCwcUYihlwI/jm6thHYyG8Bs=
+k8s.io/client-go v0.0.0-20181205110444-5e6a3d4e34f6/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
+k8s.io/client-go v0.0.0-20190817021527-637fc595d17a h1:+tJr7bBj6UMXCGnVPVlLrXKW7EKmRuqJGwx6R2vhgXw=
+k8s.io/client-go v0.0.0-20190817021527-637fc595d17a/go.mod h1:+Ns9AwGRd5TqhqhXZkLSCzO/bpUSWwP93/TE0q2OsLQ=
+k8s.io/code-generator v0.0.0-20190311093542-50b561225d70 h1:lgPp615xLHxN84RBd+viA/oHzJfI0miFYFH4T9wpPQ4=
+k8s.io/code-generator v0.0.0-20190311093542-50b561225d70/go.mod h1:MYiN+ZJZ9HkETbgVZdWw2AsuAi9PZ4V80cwfuf2axe8=
+k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b h1:p+PRuwXWwk5e+UYvicGiavEupapqM5NOxUl3y1GkD6c=
+k8s.io/code-generator v0.0.0-20190612205613-18da4a14b22b/go.mod h1:G8bQwmHm2eafm5bgtX67XDZQ8CWKSGu9DekI+yN4Y5I=
+k8s.io/code-generator v0.0.0-20191029223907-9f431a56fdbc h1:klQ4aWfZ3uk4UiSLkZZt5qQDI+7DwSdvbvyL5QUBHsQ=
+k8s.io/code-generator v0.0.0-20191029223907-9f431a56fdbc/go.mod h1:HtDEU3n5Xo1vbwjXWiJ/lFNb5r6BWBz6aZU1IZTr4eA=
+k8s.io/component-base v0.0.0-20190817022002-dd0e01d5790f h1:h2KEf+5lss2dWYiG6xc6bsJDULTvJHRarS7eJ7MVcxk=
+k8s.io/component-base v0.0.0-20190817022002-dd0e01d5790f/go.mod h1:DFWQCXgXVLiWtzFaS17KxHdlUeUymP7FLxZSkmL9/jU=
+k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af h1:SwjZbO0u5ZuaV6TRMWOGB40iaycX8sbdMQHtjNZ19dk=
+k8s.io/gengo v0.0.0-20190116091435-f8a0810f38af/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk=
+k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM=
+k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68=
+k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
+k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
+k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
+k8s.io/kube-openapi v0.0.0-20180719232738-d8ea2fe547a4 h1:C8xi0mJeE8wOFsLofmG7JVxRV2ZAgjYftRc9m2ypdmo=
+k8s.io/kube-openapi v0.0.0-20180719232738-d8ea2fe547a4/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
+k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30 h1:TRb4wNWoBVrH9plmkp2q86FIDppkbrEXdXlxU3a3BMI=
+k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
+k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf h1:EYm5AW/UUDbnmnI+gK0TJDVK9qPLhM+sRHYanNKw0EQ=
+k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d h1:Xpe6sK+RY4ZgCTyZ3y273UmFmURhjtoJiwOMbQsXitY=
+k8s.io/kube-openapi v0.0.0-20190918143330-0270cf2f1c1d/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
+k8s.io/metrics v0.0.0-20181128195641-3954d62a524d h1:Z8DFFPVMcsLHVRnOoAWw9pZjWTpOuuBKeXUCKuaiJyY=
+k8s.io/metrics v0.0.0-20181128195641-3954d62a524d/go.mod h1:a25VAbm3QT3xiVl1jtoF1ueAKQM149UdZ+L93ePfV3M=
+k8s.io/metrics v0.0.0-20190817023635-63ee757b2e8b h1:bAdfWdzBgUlBSwJmuAc/6bKw8CE9C8qJgQ9sqqNgcfc=
+k8s.io/metrics v0.0.0-20190817023635-63ee757b2e8b/go.mod h1:Bq04mDjH+zC+wFQK5nkGA/JRJvGmIGXG2svEbV3FNi4=
+k8s.io/utils v0.0.0-20190221042446-c2654d5206da h1:ElyM7RPonbKnQqOcw7dG2IK5uvQQn3b/WPHqD5mBvP4=
+k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
+modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
+modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk=
+modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k=
+modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs=
+modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I=
+pack.ag/amqp v0.11.2 h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg=
+pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2 h1:9r5DY45ef9LtcA6BnkhW8MPV7OKAfbf2AUwUhq3LeRk=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
+sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
+sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt
new file mode 100644
index 00000000..e69de29b
diff --git a/hack/run-e2e.sh b/hack/run-e2e.sh
index 62718c4c..0b201620 100755
--- a/hack/run-e2e.sh
+++ b/hack/run-e2e.sh
@@ -55,6 +55,8 @@ if [[ $? = 0 ]]; then
fi
./run-topic-consumer.sh
+else
+ echo $DIVIDER; echo "FAIL"; echo $DIVIDER; TEST_FAILED=1;
fi
echo "Removing adapter deployment"
diff --git a/hack/tools.go b/hack/tools.go
new file mode 100644
index 00000000..af1f099d
--- /dev/null
+++ b/hack/tools.go
@@ -0,0 +1,22 @@
+// +build tools
+
+/*
+Copyright 2019 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This package imports things required by build scripts, to force `go mod` to see them as dependencies
+package tools
+
+import _ "k8s.io/code-generator"
diff --git a/hack/update-codegen.sh b/hack/update-codegen.sh
index 5a096453..8c89ed46 100755
--- a/hack/update-codegen.sh
+++ b/hack/update-codegen.sh
@@ -4,7 +4,12 @@ set -o errexit
set -o nounset
set -o pipefail
-$GOPATH/src/k8s.io/code-generator/generate-groups.sh all \
+SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
+CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
+chmod +x ${CODEGEN_PKG}/generate-groups.sh
+
+"${CODEGEN_PKG}"/generate-groups.sh all \
github.com/Azure/azure-k8s-metrics-adapter/pkg/client \
github.com/Azure/azure-k8s-metrics-adapter/pkg/apis \
- metrics:v1alpha2
+ metrics:v1alpha2 \
+ --go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt
diff --git a/main.go b/main.go
index 1eaed3f7..6db9f8d9 100755
--- a/main.go
+++ b/main.go
@@ -19,9 +19,9 @@ import (
"github.com/Azure/azure-k8s-metrics-adapter/pkg/controller"
"github.com/Azure/azure-k8s-metrics-adapter/pkg/metriccache"
azureprovider "github.com/Azure/azure-k8s-metrics-adapter/pkg/provider"
- "github.com/golang/glog"
+ "k8s.io/klog"
basecmd "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/cmd"
- "k8s.io/apiserver/pkg/util/logs"
+ "k8s.io/component-base/logs"
)
func main() {
@@ -49,19 +49,19 @@ func main() {
//setup and run metric server
setupAzureProvider(cmd, metriccache)
if err := cmd.Run(stopCh); err != nil {
- glog.Fatalf("Unable to run Azure metrics adapter: %v", err)
+ klog.Fatalf("Unable to run Azure metrics adapter: %v", err)
}
}
func setupAzureProvider(cmd *basecmd.AdapterBase, metricsCache *metriccache.MetricCache) {
mapper, err := cmd.RESTMapper()
if err != nil {
- glog.Fatalf("unable to construct discovery REST mapper: %v", err)
+ klog.Fatalf("unable to construct discovery REST mapper: %v", err)
}
dynamicClient, err := cmd.DynamicClient()
if err != nil {
- glog.Fatalf("unable to construct dynamic k8s client: %v", err)
+ klog.Fatalf("unable to construct dynamic k8s client: %v", err)
}
defaultSubscriptionID := getDefaultSubscriptionID()
@@ -79,11 +79,11 @@ func setupAzureProvider(cmd *basecmd.AdapterBase, metricsCache *metriccache.Metr
func newController(cmd *basecmd.AdapterBase, metricsCache *metriccache.MetricCache) (*controller.Controller, informers.SharedInformerFactory) {
clientConfig, err := cmd.ClientConfig()
if err != nil {
- glog.Fatalf("unable to construct client config: %s", err)
+ klog.Fatalf("unable to construct client config: %s", err)
}
adapterClientSet, err := clientset.NewForConfig(clientConfig)
if err != nil {
- glog.Fatalf("unable to construct lister client to initialize provider: %v", err)
+ klog.Fatalf("unable to construct lister client to initialize provider: %v", err)
}
adapterInformerFactory := informers.NewSharedInformerFactory(adapterClientSet, time.Second*30)
@@ -101,18 +101,18 @@ func getDefaultSubscriptionID() string {
// if the user explicitly sets we should use that
subscriptionID := os.Getenv("SUBSCRIPTION_ID")
if subscriptionID == "" {
- glog.V(2).Info("Looking up subscription ID via instance metadata")
+ klog.V(2).Info("Looking up subscription ID via instance metadata")
//fallback to trying azure instance meta data
azureConfig, err := instancemetadata.GetAzureConfig()
if err != nil {
- glog.Errorf("Unable to get azure config from MSI: %v", err)
+ klog.Errorf("Unable to get azure config from MSI: %v", err)
}
subscriptionID = azureConfig.SubscriptionID
}
if subscriptionID == "" {
- glog.V(0).Info("Default Azure Subscription is not set. You must provide subscription id via HPA lables, set an environment variable, or enable MSI. See docs for more details")
+ klog.V(0).Info("Default Azure Subscription is not set. You must provide subscription id via HPA lables, set an environment variable, or enable MSI. See docs for more details")
}
return subscriptionID
diff --git a/pkg/apis/metrics/v1alpha2/zz_generated.deepcopy.go b/pkg/apis/metrics/v1alpha2/zz_generated.deepcopy.go
index 5e2a862a..0e29b1a1 100644
--- a/pkg/apis/metrics/v1alpha2/zz_generated.deepcopy.go
+++ b/pkg/apis/metrics/v1alpha2/zz_generated.deepcopy.go
@@ -1,21 +1,5 @@
// +build !ignore_autogenerated
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha2
@@ -87,7 +71,7 @@ func (in *CustomMetricConfig) DeepCopy() *CustomMetricConfig {
func (in *CustomMetricList) DeepCopyInto(out *CustomMetricList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]CustomMetric, len(*in))
@@ -180,7 +164,7 @@ func (in *ExternalMetricConfig) DeepCopy() *ExternalMetricConfig {
func (in *ExternalMetricList) DeepCopyInto(out *ExternalMetricList) {
*out = *in
out.TypeMeta = in.TypeMeta
- out.ListMeta = in.ListMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]ExternalMetric, len(*in))
diff --git a/pkg/azure/custommetrics/appinsights.go b/pkg/azure/custommetrics/appinsights.go
index 665e70db..d3d6d614 100755
--- a/pkg/azure/custommetrics/appinsights.go
+++ b/pkg/azure/custommetrics/appinsights.go
@@ -14,7 +14,7 @@ import (
"github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights"
"github.com/Azure/go-autorest/autorest/azure/auth"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
const (
@@ -70,7 +70,7 @@ func (c appinsightsClient) GetCustomMetric(request MetricRequest) (float64, erro
segments := *metricsResult.Value.Segments
if len(segments) <= 0 {
- glog.V(2).Info("segments length = 0")
+ klog.V(2).Info("segments length = 0")
return 0, nil
}
@@ -80,7 +80,7 @@ func (c appinsightsClient) GetCustomMetric(request MetricRequest) (float64, erro
value := metricMap["avg"]
normalizedValue := normalizeValue(value)
- glog.V(2).Infof("found metric value: %f", normalizedValue)
+ klog.V(2).Infof("found metric value: %f", normalizedValue)
return normalizedValue, nil
}
@@ -95,7 +95,7 @@ func normalizeValue(value interface{}) float64 {
case int64:
return float64(value.(int64))
default:
- glog.V(0).Infof("unexpected type: %T", t)
+ klog.V(0).Infof("unexpected type: %T", t)
return 0
}
}
@@ -103,11 +103,11 @@ func normalizeValue(value interface{}) float64 {
// GetMetric calls to API to retrieve a specific metric
func (ai appinsightsClient) getMetric(metricInfo MetricRequest) (*insights.MetricsResult, error) {
if ai.useADAuthorizer {
- glog.V(2).Infoln("No application insights key provided - using Azure GO SDK auth.")
+ klog.V(2).Infoln("No application insights key provided - using Azure GO SDK auth.")
return getMetricUsingADAuthorizer(ai, metricInfo)
}
- glog.V(2).Infoln("Application insights key has been provided - using Application Insights REST API.")
+ klog.V(2).Infoln("Application insights key has been provided - using Application Insights REST API.")
return getMetricUsingAPIKey(ai, metricInfo)
}
@@ -115,7 +115,7 @@ func getMetricUsingADAuthorizer(ai appinsightsClient, metricInfo MetricRequest)
authorizer, err := auth.NewAuthorizerFromEnvironmentWithResource(azureAdResource)
if err != nil {
- glog.Errorf("unable to retrieve an authorizer from environment: %v", err)
+ klog.Errorf("unable to retrieve an authorizer from environment: %v", err)
return nil, err
}
@@ -138,7 +138,7 @@ func getMetricUsingADAuthorizer(ai appinsightsClient, metricInfo MetricRequest)
metricsResultsItem, err := metricsClient.GetMultiple(context.Background(), ai.appID, metricsBody)
if err != nil {
- glog.Errorf("unable to get retrive metric: %v", err)
+ klog.Errorf("unable to get retrive metric: %v", err)
return nil, err
}
@@ -183,10 +183,10 @@ func getMetricUsingAPIKey(ai appinsightsClient, metricInfo MetricRequest) (*insi
q.Add("interval", metricInfo.Interval)
req.URL.RawQuery = q.Encode()
- glog.V(2).Infoln("request to: ", req.URL)
+ klog.V(2).Infoln("request to: ", req.URL)
resp, err := client.Do(req)
if err != nil {
- glog.Errorf("unable to retrive metric: %v", err)
+ klog.Errorf("unable to retrive metric: %v", err)
return nil, err
}
@@ -195,7 +195,7 @@ func getMetricUsingAPIKey(ai appinsightsClient, metricInfo MetricRequest) (*insi
defer resp.Body.Close()
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
- glog.Errorf("unable to retrieve metric: %s", err)
+ klog.Errorf("unable to retrieve metric: %s", err)
return nil, err
}
@@ -213,7 +213,7 @@ func unmarshalResponse(body io.ReadCloser, metricsResult *insights.MetricsResult
respBody, err := ioutil.ReadAll(body)
if err != nil {
- glog.Errorf("unable to get read metric response body: %v", err)
+ klog.Errorf("unable to get read metric response body: %v", err)
return nil, err
}
diff --git a/pkg/azure/externalmetrics/metricrequest.go b/pkg/azure/externalmetrics/metricrequest.go
index 702b3fca..5eae3fb2 100644
--- a/pkg/azure/externalmetrics/metricrequest.go
+++ b/pkg/azure/externalmetrics/metricrequest.go
@@ -6,7 +6,7 @@ import (
"strings"
"time"
- "github.com/golang/glog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
)
@@ -28,8 +28,8 @@ type AzureExternalMetricRequest struct {
}
func ParseAzureMetric(metricSelector labels.Selector, defaultSubscriptionID string) (AzureExternalMetricRequest, error) {
- glog.V(4).Infof("Parsing a received AzureMetric")
- glog.V(6).Infof("%v", metricSelector)
+ klog.V(4).Infof("Parsing a received AzureMetric")
+ klog.V(6).Infof("%v", metricSelector)
if metricSelector == nil {
return AzureExternalMetricRequest{}, fmt.Errorf("metricSelector cannot be nil")
@@ -57,51 +57,51 @@ func ParseAzureMetric(metricSelector labels.Selector, defaultSubscriptionID stri
switch request.Key() {
// Shared
case "metricName":
- glog.V(4).Infof("AzureMetric metricName: %s", value)
+ klog.V(4).Infof("AzureMetric metricName: %s", value)
merticReq.MetricName = value
case "resourceGroup":
- glog.V(4).Infof("AzureMetric resourceGroup: %s", value)
+ klog.V(4).Infof("AzureMetric resourceGroup: %s", value)
merticReq.ResourceGroup = value
case "subscriptionID":
// if sub id is passed via label selectors then it takes precedence
- glog.V(4).Infof("AzureMetric override azure subscription id with : %s", value)
+ klog.V(4).Infof("AzureMetric override azure subscription id with : %s", value)
merticReq.SubscriptionID = value
// Monitor
case "resourceName":
- glog.V(2).Infof("resourceName: %s", value)
+ klog.V(2).Infof("resourceName: %s", value)
merticReq.ResourceName = value
case "resourceProviderNamespace":
- glog.V(2).Infof("resourceProviderNamespace: %s", value)
+ klog.V(2).Infof("resourceProviderNamespace: %s", value)
merticReq.ResourceProviderNamespace = value
case "resourceType":
- glog.V(2).Infof("resourceType: %s", value)
+ klog.V(2).Infof("resourceType: %s", value)
merticReq.ResourceType = value
case "aggregation":
- glog.V(2).Infof("aggregation: %s", value)
+ klog.V(2).Infof("aggregation: %s", value)
merticReq.Aggregation = value
case "filter":
// TODO: Should handle filters by converting equality and setbased label selectors
// to oData syntax: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
- glog.V(2).Infof("filter: %s", value)
+ klog.V(2).Infof("filter: %s", value)
filterStrings := strings.Split(value, "_")
merticReq.Filter = fmt.Sprintf("%s %s '%s'", filterStrings[0], filterStrings[1], filterStrings[2])
- glog.V(2).Infof("filter formatted: %s", merticReq.Filter)
+ klog.V(2).Infof("filter formatted: %s", merticReq.Filter)
// Service Bus
case "namespace":
- glog.V(4).Infof("AzureMetric namespace: %s", value)
+ klog.V(4).Infof("AzureMetric namespace: %s", value)
merticReq.Namespace = value
case "topic":
- glog.V(4).Infof("AzureMetric topic: %s", value)
+ klog.V(4).Infof("AzureMetric topic: %s", value)
merticReq.Topic = value
case "subscription":
- glog.V(4).Infof("AzureMetric subscription: %s", value)
+ klog.V(4).Infof("AzureMetric subscription: %s", value)
merticReq.Subscription = value
default:
return AzureExternalMetricRequest{}, fmt.Errorf("selector label '%s' not supported", request.Key())
}
}
- glog.V(2).Infof("Successfully parsed AzureMetric %s", merticReq.MetricName)
+ klog.V(2).Infof("Successfully parsed AzureMetric %s", merticReq.MetricName)
return merticReq, nil
}
diff --git a/pkg/azure/externalmetrics/monitor_client.go b/pkg/azure/externalmetrics/monitor_client.go
index aade0d40..481a6ec4 100644
--- a/pkg/azure/externalmetrics/monitor_client.go
+++ b/pkg/azure/externalmetrics/monitor_client.go
@@ -5,7 +5,7 @@ import (
"github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
"github.com/Azure/go-autorest/autorest/azure/auth"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
type insightsmonitorClient interface {
@@ -45,7 +45,7 @@ func (c *monitorClient) GetAzureMetric(azMetricRequest AzureExternalMetricReques
}
metricResourceURI := azMetricRequest.MetricResourceURI()
- glog.V(2).Infof("resource uri: %s", metricResourceURI)
+ klog.V(2).Infof("resource uri: %s", metricResourceURI)
metricResult, err := c.client.List(context.Background(), metricResourceURI,
azMetricRequest.Timespan, nil,
@@ -57,7 +57,7 @@ func (c *monitorClient) GetAzureMetric(azMetricRequest AzureExternalMetricReques
total := extractValue(metricResult)
- glog.V(2).Infof("found metric value: %f", total)
+ klog.V(2).Infof("found metric value: %f", total)
// TODO set Value based on aggregations type
return AzureExternalMetricResponse{
diff --git a/pkg/azure/externalmetrics/service_bus_subscription_client.go b/pkg/azure/externalmetrics/service_bus_subscription_client.go
index 8edd54fc..d93b68f7 100644
--- a/pkg/azure/externalmetrics/service_bus_subscription_client.go
+++ b/pkg/azure/externalmetrics/service_bus_subscription_client.go
@@ -5,7 +5,7 @@ import (
"github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus"
"github.com/Azure/go-autorest/autorest/azure/auth"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
type servicebusSubscriptionsClient interface {
@@ -18,7 +18,7 @@ type servicebusClient struct {
}
func NewServiceBusSubscriptionClient(defaultSubscriptionID string) AzureExternalMetricClient {
- glog.V(2).Info("Creating a new Azure Service Bus Subscriptions client")
+ klog.V(2).Info("Creating a new Azure Service Bus Subscriptions client")
client := servicebus.NewSubscriptionsClient(defaultSubscriptionID)
authorizer, err := auth.NewAuthorizerFromEnvironment()
if err == nil {
@@ -39,13 +39,13 @@ func newServiceBusSubscriptionClient(defaultsubscriptionID string, client servic
}
func (c *servicebusClient) GetAzureMetric(azMetricRequest AzureExternalMetricRequest) (AzureExternalMetricResponse, error) {
- glog.V(6).Infof("Received metric request:\n%v", azMetricRequest)
+ klog.V(6).Infof("Received metric request:\n%v", azMetricRequest)
err := azMetricRequest.Validate()
if err != nil {
return AzureExternalMetricResponse{}, err
}
- glog.V(2).Infof("Requesting Service Bus Subscription %s to topic %s in namespace %s from resource group %s", azMetricRequest.Subscription, azMetricRequest.Topic, azMetricRequest.Namespace, azMetricRequest.ResourceGroup)
+ klog.V(2).Infof("Requesting Service Bus Subscription %s to topic %s in namespace %s from resource group %s", azMetricRequest.Subscription, azMetricRequest.Topic, azMetricRequest.Namespace, azMetricRequest.ResourceGroup)
subscriptionResult, err := c.client.Get(
context.Background(),
azMetricRequest.ResourceGroup,
@@ -57,12 +57,12 @@ func (c *servicebusClient) GetAzureMetric(azMetricRequest AzureExternalMetricReq
return AzureExternalMetricResponse{}, err
}
- glog.V(2).Infof("Successfully retrieved Service Bus Subscription %s to topic %s in namespace %s from resource group %s", azMetricRequest.Subscription, azMetricRequest.Topic, azMetricRequest.Namespace, azMetricRequest.ResourceGroup)
- glog.V(6).Infof("%v", subscriptionResult.Response)
+ klog.V(2).Infof("Successfully retrieved Service Bus Subscription %s to topic %s in namespace %s from resource group %s", azMetricRequest.Subscription, azMetricRequest.Topic, azMetricRequest.Namespace, azMetricRequest.ResourceGroup)
+ klog.V(6).Infof("%v", subscriptionResult.Response)
activeMessageCount := float64(*subscriptionResult.SBSubscriptionProperties.CountDetails.ActiveMessageCount)
- glog.V(4).Infof("Service Bus Subscription active message count: %f", activeMessageCount)
+ klog.V(4).Infof("Service Bus Subscription active message count: %f", activeMessageCount)
// TODO set Value based on aggregations type
return AzureExternalMetricResponse{
diff --git a/pkg/azure/instancemetadata/instancemetadata.go b/pkg/azure/instancemetadata/instancemetadata.go
index e9b8f45e..a5f7d59b 100644
--- a/pkg/azure/instancemetadata/instancemetadata.go
+++ b/pkg/azure/instancemetadata/instancemetadata.go
@@ -9,7 +9,7 @@ import (
"io/ioutil"
"net/http"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
type AzureConfig struct {
@@ -29,7 +29,7 @@ func GetAzureConfig() (AzureConfig, error) {
resp, err := client.Do(req)
if err != nil {
- glog.Errorf("unable to get metadata for azure vm: %v", err)
+ klog.Errorf("unable to get metadata for azure vm: %v", err)
return AzureConfig{}, err
}
@@ -37,7 +37,7 @@ func GetAzureConfig() (AzureConfig, error) {
respBody, _ := ioutil.ReadAll(resp.Body)
subID := string(respBody[:])
- glog.V(2).Infoln("connected to sub:", subID)
+ klog.V(2).Infoln("connected to sub:", subID)
config := AzureConfig{
SubscriptionID: subID,
diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go
index ad435db2..9871d7e0 100644
--- a/pkg/client/clientset/versioned/clientset.go
+++ b/pkg/client/clientset/versioned/clientset.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package versioned
diff --git a/pkg/client/clientset/versioned/doc.go b/pkg/client/clientset/versioned/doc.go
index 41721ca5..0e0c2a89 100644
--- a/pkg/client/clientset/versioned/doc.go
+++ b/pkg/client/clientset/versioned/doc.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go
index 01f35aa1..71c77b8c 100644
--- a/pkg/client/clientset/versioned/fake/clientset_generated.go
+++ b/pkg/client/clientset/versioned/fake/clientset_generated.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package fake
@@ -41,7 +25,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
}
}
- cs := &Clientset{}
+ cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
@@ -63,12 +47,17 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
+ tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
+func (c *Clientset) Tracker() testing.ObjectTracker {
+ return c.tracker
+}
+
var _ clientset.Interface = &Clientset{}
// AzureV1alpha2 retrieves the AzureV1alpha2Client
diff --git a/pkg/client/clientset/versioned/fake/doc.go b/pkg/client/clientset/versioned/fake/doc.go
index 9b99e716..3630ed1c 100644
--- a/pkg/client/clientset/versioned/fake/doc.go
+++ b/pkg/client/clientset/versioned/fake/doc.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
diff --git a/pkg/client/clientset/versioned/fake/register.go b/pkg/client/clientset/versioned/fake/register.go
index cfbcc4fa..c2ad99b6 100644
--- a/pkg/client/clientset/versioned/fake/register.go
+++ b/pkg/client/clientset/versioned/fake/register.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package fake
diff --git a/pkg/client/clientset/versioned/scheme/doc.go b/pkg/client/clientset/versioned/scheme/doc.go
index 7dc37561..14db57a5 100644
--- a/pkg/client/clientset/versioned/scheme/doc.go
+++ b/pkg/client/clientset/versioned/scheme/doc.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
diff --git a/pkg/client/clientset/versioned/scheme/register.go b/pkg/client/clientset/versioned/scheme/register.go
index 7bc6f2c3..840a420b 100644
--- a/pkg/client/clientset/versioned/scheme/register.go
+++ b/pkg/client/clientset/versioned/scheme/register.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package scheme
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/custommetric.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/custommetric.go
index 24d2d089..b321a1c2 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/custommetric.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/custommetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/doc.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/doc.go
index baaf2d98..c11da268 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/doc.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/doc.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/externalmetric.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/externalmetric.go
index 2c42089a..379002b2 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/externalmetric.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/externalmetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/doc.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/doc.go
index 16f44399..2b5ba4c8 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/doc.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/doc.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_custommetric.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_custommetric.go
index 7adf7f71..21fdaadf 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_custommetric.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_custommetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package fake
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_externalmetric.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_externalmetric.go
index 4a938448..771a50ef 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_externalmetric.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_externalmetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package fake
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_metrics_client.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_metrics_client.go
index fb407821..f7144eef 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_metrics_client.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/fake/fake_metrics_client.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package fake
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/generated_expansion.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/generated_expansion.go
index 188bbd3d..8ad1eb27 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/generated_expansion.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/generated_expansion.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/metrics_client.go b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/metrics_client.go
index bc69c6c0..1c1c787c 100644
--- a/pkg/client/clientset/versioned/typed/metrics/v1alpha2/metrics_client.go
+++ b/pkg/client/clientset/versioned/typed/metrics/v1alpha2/metrics_client.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by client-gen. DO NOT EDIT.
package v1alpha2
@@ -21,7 +5,6 @@ package v1alpha2
import (
v1alpha2 "github.com/Azure/azure-k8s-metrics-adapter/pkg/apis/metrics/v1alpha2"
"github.com/Azure/azure-k8s-metrics-adapter/pkg/client/clientset/versioned/scheme"
- serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
@@ -76,7 +59,7 @@ func setConfigDefaults(config *rest.Config) error {
gv := v1alpha2.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
- config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
diff --git a/pkg/client/informers/externalversions/factory.go b/pkg/client/informers/externalversions/factory.go
index fa3c9dfa..40fcc2b5 100644
--- a/pkg/client/informers/externalversions/factory.go
+++ b/pkg/client/informers/externalversions/factory.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
diff --git a/pkg/client/informers/externalversions/generic.go b/pkg/client/informers/externalversions/generic.go
index da02727e..c6780814 100644
--- a/pkg/client/informers/externalversions/generic.go
+++ b/pkg/client/informers/externalversions/generic.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
diff --git a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
index 185b3f38..422ab45f 100644
--- a/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
+++ b/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces
diff --git a/pkg/client/informers/externalversions/metrics/interface.go b/pkg/client/informers/externalversions/metrics/interface.go
index ebd59872..2f1a3f3b 100644
--- a/pkg/client/informers/externalversions/metrics/interface.go
+++ b/pkg/client/informers/externalversions/metrics/interface.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package azure
diff --git a/pkg/client/informers/externalversions/metrics/v1alpha2/custommetric.go b/pkg/client/informers/externalversions/metrics/v1alpha2/custommetric.go
index 0f4bc8b6..147f71f4 100644
--- a/pkg/client/informers/externalversions/metrics/v1alpha2/custommetric.go
+++ b/pkg/client/informers/externalversions/metrics/v1alpha2/custommetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/informers/externalversions/metrics/v1alpha2/externalmetric.go b/pkg/client/informers/externalversions/metrics/v1alpha2/externalmetric.go
index ca4778ba..9925af5a 100644
--- a/pkg/client/informers/externalversions/metrics/v1alpha2/externalmetric.go
+++ b/pkg/client/informers/externalversions/metrics/v1alpha2/externalmetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/informers/externalversions/metrics/v1alpha2/interface.go b/pkg/client/informers/externalversions/metrics/v1alpha2/interface.go
index 8db02530..959e8def 100644
--- a/pkg/client/informers/externalversions/metrics/v1alpha2/interface.go
+++ b/pkg/client/informers/externalversions/metrics/v1alpha2/interface.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/listers/metrics/v1alpha2/custommetric.go b/pkg/client/listers/metrics/v1alpha2/custommetric.go
index 2644d61f..d9bbeb1f 100644
--- a/pkg/client/listers/metrics/v1alpha2/custommetric.go
+++ b/pkg/client/listers/metrics/v1alpha2/custommetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/listers/metrics/v1alpha2/expansion_generated.go b/pkg/client/listers/metrics/v1alpha2/expansion_generated.go
index cd321eb3..da580ed3 100644
--- a/pkg/client/listers/metrics/v1alpha2/expansion_generated.go
+++ b/pkg/client/listers/metrics/v1alpha2/expansion_generated.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/client/listers/metrics/v1alpha2/externalmetric.go b/pkg/client/listers/metrics/v1alpha2/externalmetric.go
index 48bb3217..139b9185 100644
--- a/pkg/client/listers/metrics/v1alpha2/externalmetric.go
+++ b/pkg/client/listers/metrics/v1alpha2/externalmetric.go
@@ -1,19 +1,3 @@
-/*
-Copyright The Kubernetes Authors.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha2
diff --git a/pkg/controller/controller.go b/pkg/controller/controller.go
index 3ba59255..d957f8bd 100644
--- a/pkg/controller/controller.go
+++ b/pkg/controller/controller.go
@@ -6,7 +6,7 @@ import (
"github.com/Azure/azure-k8s-metrics-adapter/pkg/apis/metrics/v1alpha2"
- "github.com/golang/glog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@@ -37,7 +37,7 @@ func NewController(externalMetricInformer informers.ExternalMetricInformer, cust
// wire up enque step. This provides a hook for testing enqueue step
controller.enqueuer = controller.enqueueExternalMetric
- glog.Info("Setting up external metric event handlers")
+ klog.Info("Setting up external metric event handlers")
externalMetricInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueuer,
UpdateFunc: func(old, new interface{}) {
@@ -49,7 +49,7 @@ func NewController(externalMetricInformer informers.ExternalMetricInformer, cust
DeleteFunc: controller.enqueuer,
})
- glog.Info("Setting up custom metric event handlers")
+ klog.Info("Setting up custom metric event handlers")
customMetricInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueuer,
UpdateFunc: func(old, new interface{}) {
@@ -66,7 +66,7 @@ func (c *Controller) Run(numberOfWorkers int, interval time.Duration, stopCh <-c
defer utilruntime.HandleCrash()
defer c.metricQueue.ShutDown()
- glog.V(2).Info("initializing controller")
+ klog.V(2).Info("initializing controller")
// do the initial synchronization (one time) to populate resources
if !cache.WaitForCacheSync(stopCh, c.externalMetricSynced, c.customMetricSynced) {
@@ -74,32 +74,32 @@ func (c *Controller) Run(numberOfWorkers int, interval time.Duration, stopCh <-c
return
}
- glog.V(2).Infof("starting %d workers with %d interval", numberOfWorkers, interval)
+ klog.V(2).Infof("starting %d workers with %d interval", numberOfWorkers, interval)
for i := 0; i < numberOfWorkers; i++ {
go wait.Until(c.runWorker, interval, stopCh)
}
<-stopCh
- glog.Info("Shutting down workers")
+ klog.Info("Shutting down workers")
return
}
func (c *Controller) runWorker() {
- glog.V(2).Info("Worker starting")
+ klog.V(2).Info("Worker starting")
for c.processNextItem() {
- glog.V(2).Info("processing next item")
+ klog.V(2).Info("processing next item")
}
- glog.V(2).Info("worker completed")
+ klog.V(2).Info("worker completed")
}
func (c *Controller) processNextItem() bool {
- glog.V(2).Info("processing item")
+ klog.V(2).Info("processing item")
rawItem, quit := c.metricQueue.Get()
if quit {
- glog.V(2).Info("recieved quit signal")
+ klog.V(2).Info("recieved quit signal")
return false
}
@@ -118,20 +118,20 @@ func (c *Controller) processNextItem() bool {
if err != nil {
retrys := c.metricQueue.NumRequeues(rawItem)
if retrys < 5 {
- glog.Errorf("Transient error with %d retrys for key %s: %s", retrys, rawItem, err)
+ klog.Errorf("Transient error with %d retrys for key %s: %s", retrys, rawItem, err)
c.metricQueue.AddRateLimited(rawItem)
return true
}
// something was wrong with the item on queue
- glog.Errorf("Max retries hit for key %s: %s", rawItem, err)
+ klog.Errorf("Max retries hit for key %s: %s", rawItem, err)
c.metricQueue.Forget(rawItem)
utilruntime.HandleError(err)
return true
}
//if here success for get item
- glog.V(2).Infof("succesfully proccessed item '%s'", queueItem)
+ klog.V(2).Infof("succesfully proccessed item '%s'", queueItem)
c.metricQueue.Forget(rawItem)
return true
}
@@ -146,7 +146,7 @@ func (c *Controller) enqueueExternalMetric(obj interface{}) {
kind := getKind(obj)
- glog.V(2).Infof("adding item to queue for '%s' with kind '%s'", key, kind)
+ klog.V(2).Infof("adding item to queue for '%s' with kind '%s'", key, kind)
c.metricQueue.AddRateLimited(namespacedQueueItem{
namespaceKey: key,
kind: kind,
@@ -182,7 +182,7 @@ func getKind(obj interface{}) string {
case *v1alpha2.CustomMetric:
return "CustomMetric"
default:
- glog.Error("No known type of object")
+ klog.Error("No known type of object")
return ""
}
}
diff --git a/pkg/controller/handler.go b/pkg/controller/handler.go
index 2e65214e..3a980dd7 100644
--- a/pkg/controller/handler.go
+++ b/pkg/controller/handler.go
@@ -7,7 +7,7 @@ import (
"github.com/Azure/azure-k8s-metrics-adapter/pkg/azure/externalmetrics"
listers "github.com/Azure/azure-k8s-metrics-adapter/pkg/client/listers/metrics/v1alpha2"
"github.com/Azure/azure-k8s-metrics-adapter/pkg/metriccache"
- "github.com/golang/glog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
@@ -54,12 +54,12 @@ func (h *Handler) Process(queueItem namespacedQueueItem) error {
func (h *Handler) handleCustomMetric(ns, name string, queueItem namespacedQueueItem) error {
// check if item exists
- glog.V(2).Infof("processing item '%s' in namespace '%s'", name, ns)
+ klog.V(2).Infof("processing item '%s' in namespace '%s'", name, ns)
customMetricInfo, err := h.customMetricLister.CustomMetrics(ns).Get(name)
if err != nil {
if errors.IsNotFound(err) {
// Then this we should remove
- glog.V(2).Infof("removing item from cache '%s' in namespace '%s'", name, ns)
+ klog.V(2).Infof("removing item from cache '%s' in namespace '%s'", name, ns)
h.metriccache.Remove(queueItem.Key())
return nil
}
@@ -71,7 +71,7 @@ func (h *Handler) handleCustomMetric(ns, name string, queueItem namespacedQueueI
MetricName: customMetricInfo.Spec.MetricConfig.MetricName,
}
- glog.V(2).Infof("adding to cache item '%s' in namespace '%s'", name, ns)
+ klog.V(2).Infof("adding to cache item '%s' in namespace '%s'", name, ns)
h.metriccache.Update(queueItem.Key(), metric)
return nil
@@ -79,12 +79,12 @@ func (h *Handler) handleCustomMetric(ns, name string, queueItem namespacedQueueI
func (h *Handler) handleExternalMetric(ns, name string, queueItem namespacedQueueItem) error {
// check if item exists
- glog.V(2).Infof("processing item '%s' in namespace '%s'", name, ns)
+ klog.V(2).Infof("processing item '%s' in namespace '%s'", name, ns)
externalMetricInfo, err := h.externalmetricLister.ExternalMetrics(ns).Get(name)
if err != nil {
if errors.IsNotFound(err) {
// Then this we should remove
- glog.V(2).Infof("removing item from cache '%s' in namespace '%s'", name, ns)
+ klog.V(2).Infof("removing item from cache '%s' in namespace '%s'", name, ns)
h.metriccache.Remove(queueItem.Key())
return nil
}
@@ -108,7 +108,7 @@ func (h *Handler) handleExternalMetric(ns, name string, queueItem namespacedQueu
Subscription: externalMetricInfo.Spec.AzureConfig.ServiceBusSubscription,
}
- glog.V(2).Infof("adding to cache item '%s' in namespace '%s'", name, ns)
+ klog.V(2).Infof("adding to cache item '%s' in namespace '%s'", name, ns)
h.metriccache.Update(queueItem.Key(), azureMetricRequest)
return nil
diff --git a/pkg/metriccache/metric_cache.go b/pkg/metriccache/metric_cache.go
index a91fec1b..5a075b7e 100644
--- a/pkg/metriccache/metric_cache.go
+++ b/pkg/metriccache/metric_cache.go
@@ -6,7 +6,7 @@ import (
"github.com/Azure/azure-k8s-metrics-adapter/pkg/azure/custommetrics"
"github.com/Azure/azure-k8s-metrics-adapter/pkg/azure/externalmetrics"
- "github.com/golang/glog"
+ "k8s.io/klog"
)
// MetricCache holds the loaded metric request info in the system
@@ -38,7 +38,7 @@ func (mc *MetricCache) GetAzureExternalMetricRequest(namepace, name string) (ext
key := externalMetricKey(namepace, name)
metricRequest, exists := mc.metricRequests[key]
if !exists {
- glog.V(2).Infof("metric not found %s", key)
+ klog.V(2).Infof("metric not found %s", key)
return externalmetrics.AzureExternalMetricRequest{}, false
}
@@ -53,7 +53,7 @@ func (mc *MetricCache) GetAppInsightsRequest(namespace, name string) (custommetr
key := customMetricKey(namespace, name)
metricRequest, exists := mc.metricRequests[key]
if !exists {
- glog.V(2).Infof("metric not found %s", key)
+ klog.V(2).Infof("metric not found %s", key)
return custommetrics.MetricRequest{}, false
}
diff --git a/pkg/provider/provider_custom.go b/pkg/provider/provider_custom.go
index 2b7a4acf..4645c60e 100644
--- a/pkg/provider/provider_custom.go
+++ b/pkg/provider/provider_custom.go
@@ -8,7 +8,7 @@ import (
"time"
"github.com/Azure/azure-k8s-metrics-adapter/pkg/azure/custommetrics"
- "github.com/golang/glog"
+ "k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -22,15 +22,15 @@ import (
// GetMetricByName fetches a particular metric for a particular object.
// The namespace will be empty if the metric is root-scoped.
-func (p *AzureProvider) GetMetricByName(name types.NamespacedName, info provider.CustomMetricInfo) (*custom_metrics.MetricValue, error) {
+func (p *AzureProvider) GetMetricByName(name types.NamespacedName, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValue, error) {
// not implemented yet
return nil, errors.NewServiceUnavailable("not implemented yet")
}
// GetMetricBySelector fetches a particular metric for a set of objects matching
// the given label selector. The namespace will be empty if the metric is root-scoped.
-func (p *AzureProvider) GetMetricBySelector(namespace string, selector labels.Selector, info provider.CustomMetricInfo) (*custom_metrics.MetricValueList, error) {
- glog.V(0).Infof("Received request for custom metric: groupresource: %s, namespace: %s, metric name: %s, selectors: %s", info.GroupResource.String(), namespace, info.Metric, selector.String())
+func (p *AzureProvider) GetMetricBySelector(namespace string, selector labels.Selector, info provider.CustomMetricInfo, metricSelector labels.Selector) (*custom_metrics.MetricValueList, error) {
+ klog.V(0).Infof("Received request for custom metric: groupresource: %s, namespace: %s, metric name: %s, selectors: %s", info.GroupResource.String(), namespace, info.Metric, selector.String())
_, selectable := selector.Requirements()
if !selectable {
@@ -42,13 +42,13 @@ func (p *AzureProvider) GetMetricBySelector(namespace string, selector labels.Se
// TODO use selector info to restrict metric query to specific app.
val, err := p.appinsightsClient.GetCustomMetric(metricRequestInfo)
if err != nil {
- glog.Errorf("bad request: %v", err)
+ klog.Errorf("bad request: %v", err)
return nil, errors.NewBadRequest(err.Error())
}
resourceNames, err := helpers.ListObjectNames(p.mapper, p.kubeClient, namespace, selector, info)
if err != nil {
- glog.Errorf("not able to list objects from api server: %v", err)
+ klog.Errorf("not able to list objects from api server: %v", err)
return nil, errors.NewInternalError(fmt.Errorf("not able to list objects from api server for this resource"))
}
@@ -109,7 +109,7 @@ func (p *AzureProvider) getCustomMetricRequest(namespace string, selector labels
// because metrics names are multipart in AI and we can not pass an extra /
// through k8s api we convert - to / to get around that
convertedMetricName := strings.Replace(info.Metric, "-", "/", -1)
- glog.V(2).Infof("New call to GetCustomMetric: %s", convertedMetricName)
+ klog.V(2).Infof("New call to GetCustomMetric: %s", convertedMetricName)
metricRequestInfo := custommetrics.NewMetricRequest(convertedMetricName)
return metricRequestInfo
diff --git a/pkg/provider/provider_custom_test.go b/pkg/provider/provider_custom_test.go
index 3b28ef55..3b56cc1f 100644
--- a/pkg/provider/provider_custom_test.go
+++ b/pkg/provider/provider_custom_test.go
@@ -45,7 +45,7 @@ func TestReturnsCustomMetricConverted(t *testing.T) {
storeObjects = append(storeObjects, pod)
provider, _ := newFakeCustomProvider(fakeClient, storeObjects)
- returnList, err := provider.GetMetricBySelector("default", selector, info)
+ returnList, err := provider.GetMetricBySelector("default", selector, info, selector)
if err != nil {
t.Errorf("error after processing got: %v, want nil", err)
@@ -87,7 +87,7 @@ func TestReturnsCustomMetricConvertedWithMultiplePods(t *testing.T) {
storeObjects = append(storeObjects, pod, pod2, pod3)
provider, _ := newFakeCustomProvider(fakeClient, storeObjects)
- returnList, err := provider.GetMetricBySelector("default", selector, info)
+ returnList, err := provider.GetMetricBySelector("default", selector, info, selector)
if err != nil {
t.Errorf("error after processing got: %v, want nil", err)
@@ -139,7 +139,7 @@ func TestReturnsCustomMetricWhenInCache(t *testing.T) {
cache.Update("CustomMetric/default/MetricName", request)
- returnList, err := provider.GetMetricBySelector("default", selector, info)
+ returnList, err := provider.GetMetricBySelector("default", selector, info, selector)
if err != nil {
t.Errorf("error after processing got: %v, want nil", err)
@@ -178,7 +178,7 @@ func TestReturnsErrorIfAppInsightsFails(t *testing.T) {
storeObjects = append(storeObjects, pod)
provider, _ := newFakeCustomProvider(fakeClient, storeObjects)
- _, err := provider.GetMetricBySelector("default", selector, info)
+ _, err := provider.GetMetricBySelector("default", selector, info, selector)
if !k8serrors.IsBadRequest(err) {
t.Errorf("error after processing got: %v, want an bad request error", err)
diff --git a/pkg/provider/provider_external.go b/pkg/provider/provider_external.go
index 4595bcc8..eb94c8ac 100644
--- a/pkg/provider/provider_external.go
+++ b/pkg/provider/provider_external.go
@@ -4,7 +4,7 @@ package provider
import (
"github.com/Azure/azure-k8s-metrics-adapter/pkg/azure/externalmetrics"
- "github.com/golang/glog"
+ "k8s.io/klog"
"github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/provider"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
@@ -21,7 +21,7 @@ func (p *AzureProvider) GetExternalMetric(namespace string, metricSelector label
// Note:
// metric name and namespace is used to lookup for the CRD which contains configuration to call azure
// if not found then ignored and label selector is parsed for all the metrics
- glog.V(0).Infof("Received request for namespace: %s, metric name: %s, metric selectors: %s", namespace, info.Metric, metricSelector.String())
+ klog.V(0).Infof("Received request for namespace: %s, metric name: %s, metric selectors: %s", namespace, info.Metric, metricSelector.String())
_, selectable := metricSelector.Requirements()
if !selectable {
@@ -40,7 +40,7 @@ func (p *AzureProvider) GetExternalMetric(namespace string, metricSelector label
metricValue, err := externalMetricClient.GetAzureMetric(azMetricRequest)
if err != nil {
- glog.Errorf("bad request: %v", err)
+ klog.Errorf("bad request: %v", err)
return nil, errors.NewBadRequest(err.Error())
}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
new file mode 100644
index 00000000..ee417bbe
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/.travis.yml
@@ -0,0 +1,18 @@
+language: go
+
+go:
+ - 1.11.x
+
+go_import_path: contrib.go.opencensus.io/exporter/ocagent
+
+before_script:
+ - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any
+ - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any
+
+script:
+ - go build ./... # Ensure dependency updates don't break build
+ - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi
+ - go vet ./...
+ - GO111MODULE=on go test -v -race $PKGS # Run all the tests with the race detector enabled
+ - GO111MODULE=off go test -v -race $PKGS # Make sure tests still pass when not using Go modules.
+ - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi'
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
new file mode 100644
index 00000000..0786fdf4
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/CONTRIBUTING.md
@@ -0,0 +1,24 @@
+# How to contribute
+
+We'd love to accept your patches and contributions to this project. There are
+just a few small guidelines you need to follow.
+
+## Contributor License Agreement
+
+Contributions to this project must be accompanied by a Contributor License
+Agreement. You (or your employer) retain the copyright to your contribution,
+this simply gives us permission to use and redistribute your contributions as
+part of the project. Head over to to see
+your current agreements on file or to sign a new one.
+
+You generally only need to submit a CLA once, so if you've already submitted one
+(even if it was for a different project), you probably don't need to do it
+again.
+
+## Code reviews
+
+All submissions, including submissions by project members, require review. We
+use GitHub pull requests for this purpose. Consult [GitHub Help] for more
+information on using pull requests.
+
+[GitHub Help]: https://help.github.com/articles/about-pull-requests/
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
new file mode 100644
index 00000000..3b9e908f
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/README.md
@@ -0,0 +1,61 @@
+# OpenCensus Agent Go Exporter
+
+[![Build Status][travis-image]][travis-url] [![GoDoc][godoc-image]][godoc-url]
+
+
+This repository contains the Go implementation of the OpenCensus Agent (OC-Agent) Exporter.
+OC-Agent is a deamon process running in a VM that can retrieve spans/stats/metrics from
+OpenCensus Library, export them to other backends and possibly push configurations back to
+Library. See more details on [OC-Agent Readme][OCAgentReadme].
+
+Note: This is an experimental repository and is likely to get backwards-incompatible changes.
+Ultimately we may want to move the OC-Agent Go Exporter to [OpenCensus Go core library][OpenCensusGo].
+
+## Installation
+
+```bash
+$ go get -u contrib.go.opencensus.io/exporter/ocagent
+```
+
+## Usage
+
+```go
+import (
+ "context"
+ "fmt"
+ "log"
+ "time"
+
+ "contrib.go.opencensus.io/exporter/ocagent"
+ "go.opencensus.io/trace"
+)
+
+func Example() {
+ exp, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithServiceName("your-service-name"))
+ if err != nil {
+ log.Fatalf("Failed to create the agent exporter: %v", err)
+ }
+ defer exp.Stop()
+
+ // Now register it as a trace exporter.
+ trace.RegisterExporter(exp)
+
+ // Then use the OpenCensus tracing library, like we normally would.
+ ctx, span := trace.StartSpan(context.Background(), "AgentExporter-Example")
+ defer span.End()
+
+ for i := 0; i < 10; i++ {
+ _, iSpan := trace.StartSpan(ctx, fmt.Sprintf("Sample-%d", i))
+ <-time.After(6 * time.Millisecond)
+ iSpan.End()
+ }
+}
+```
+
+[OCAgentReadme]: https://github.com/census-instrumentation/opencensus-proto/tree/master/opencensus/proto/agent#opencensus-agent-proto
+[OpenCensusGo]: https://github.com/census-instrumentation/opencensus-go
+[godoc-image]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent?status.svg
+[godoc-url]: https://godoc.org/contrib.go.opencensus.io/exporter/ocagent
+[travis-image]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent.svg?branch=master
+[travis-url]: https://travis-ci.org/census-ecosystem/opencensus-go-exporter-ocagent
+
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
new file mode 100644
index 00000000..297e44b6
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/common.go
@@ -0,0 +1,38 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "math/rand"
+ "time"
+)
+
+var randSrc = rand.New(rand.NewSource(time.Now().UnixNano()))
+
+// retries function fn upto n times, if fn returns an error lest it returns nil early.
+// It applies exponential backoff in units of (1< 0 {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
+ }
+ traceExporter, err := traceSvcClient.Export(ctx)
+ if err != nil {
+ return fmt.Errorf("Exporter.Start:: TraceServiceClient: %v", err)
+ }
+
+ firstTraceMessage := &agenttracepb.ExportTraceServiceRequest{
+ Node: node,
+ Resource: ae.resource,
+ }
+ if err := traceExporter.Send(firstTraceMessage); err != nil {
+ return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
+ }
+
+ ae.mu.Lock()
+ ae.traceExporter = traceExporter
+ ae.mu.Unlock()
+
+ // Initiate the config service by sending over node identifier info.
+ configStream, err := traceSvcClient.Config(context.Background())
+ if err != nil {
+ return fmt.Errorf("Exporter.Start:: ConfigStream: %v", err)
+ }
+ firstCfgMessage := &agenttracepb.CurrentLibraryConfig{Node: node}
+ if err := configStream.Send(firstCfgMessage); err != nil {
+ return fmt.Errorf("Exporter.Start:: Failed to initiate the Config service: %v", err)
+ }
+
+ // In the background, handle trace configurations that are beamed down
+ // by the agent, but also reply to it with the applied configuration.
+ go ae.handleConfigStreaming(configStream)
+
+ return nil
+}
+
+func (ae *Exporter) createMetricsServiceConnection(cc *grpc.ClientConn, node *commonpb.Node) error {
+ metricsSvcClient := agentmetricspb.NewMetricsServiceClient(cc)
+ metricsExporter, err := metricsSvcClient.Export(context.Background())
+ if err != nil {
+ return fmt.Errorf("MetricsExporter: failed to start the service client: %v", err)
+ }
+ // Initiate the metrics service by sending over the first message just containing the Node and Resource.
+ firstMetricsMessage := &agentmetricspb.ExportMetricsServiceRequest{
+ Node: node,
+ Resource: ae.resource,
+ }
+ if err := metricsExporter.Send(firstMetricsMessage); err != nil {
+ return fmt.Errorf("MetricsExporter:: failed to send the first message: %v", err)
+ }
+
+ ae.mu.Lock()
+ ae.metricsExporter = metricsExporter
+ ae.mu.Unlock()
+
+ // With that we are good to go and can start sending metrics
+ return nil
+}
+
+func (ae *Exporter) dialToAgent() (*grpc.ClientConn, error) {
+ addr := ae.prepareAgentAddress()
+ var dialOpts []grpc.DialOption
+ if ae.clientTransportCredentials != nil {
+ dialOpts = append(dialOpts, grpc.WithTransportCredentials(ae.clientTransportCredentials))
+ } else if ae.canDialInsecure {
+ dialOpts = append(dialOpts, grpc.WithInsecure())
+ }
+ if ae.compressor != "" {
+ dialOpts = append(dialOpts, grpc.WithDefaultCallOptions(grpc.UseCompressor(ae.compressor)))
+ }
+ dialOpts = append(dialOpts, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}))
+ if len(ae.grpcDialOptions) != 0 {
+ dialOpts = append(dialOpts, ae.grpcDialOptions...)
+ }
+
+ ctx := context.Background()
+ if len(ae.headers) > 0 {
+ ctx = metadata.NewOutgoingContext(ctx, metadata.New(ae.headers))
+ }
+ return grpc.DialContext(ctx, addr, dialOpts...)
+}
+
+func (ae *Exporter) handleConfigStreaming(configStream agenttracepb.TraceService_ConfigClient) error {
+ // Note: We haven't yet implemented configuration sending so we
+ // should NOT be changing connection states within this function for now.
+ for {
+ recv, err := configStream.Recv()
+ if err != nil {
+ // TODO: Check if this is a transient error or exponential backoff-able.
+ return err
+ }
+ cfg := recv.Config
+ if cfg == nil {
+ continue
+ }
+
+ // Otherwise now apply the trace configuration sent down from the agent
+ if psamp := cfg.GetProbabilitySampler(); psamp != nil {
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.ProbabilitySampler(psamp.SamplingProbability)})
+ } else if csamp := cfg.GetConstantSampler(); csamp != nil {
+ alwaysSample := csamp.Decision == tracepb.ConstantSampler_ALWAYS_ON
+ if alwaysSample {
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
+ } else {
+ trace.ApplyConfig(trace.Config{DefaultSampler: trace.NeverSample()})
+ }
+ } else { // TODO: Add the rate limiting sampler here
+ }
+
+ // Then finally send back to upstream the newly applied configuration
+ err = configStream.Send(&agenttracepb.CurrentLibraryConfig{Config: &tracepb.TraceConfig{Sampler: cfg.Sampler}})
+ if err != nil {
+ return err
+ }
+ }
+}
+
+// Stop shuts down all the connections and resources
+// related to the exporter.
+func (ae *Exporter) Stop() error {
+ ae.mu.RLock()
+ cc := ae.grpcClientConn
+ started := ae.started
+ stopped := ae.stopped
+ ae.mu.RUnlock()
+
+ if !started {
+ return errNotStarted
+ }
+ if stopped {
+ // TODO: tell the user that we've already stopped, so perhaps a sentinel error?
+ return nil
+ }
+
+ ae.Flush()
+
+ // Now close the underlying gRPC connection.
+ var err error
+ if cc != nil {
+ err = cc.Close()
+ }
+
+ // At this point we can change the state variables: started and stopped
+ ae.mu.Lock()
+ ae.started = false
+ ae.stopped = true
+ ae.mu.Unlock()
+ close(ae.stopCh)
+
+ // Ensure that the backgroundConnector returns
+ <-ae.backgroundConnectionDoneCh
+
+ return err
+}
+
+func (ae *Exporter) ExportSpan(sd *trace.SpanData) {
+ if sd == nil {
+ return
+ }
+ _ = ae.traceBundler.Add(sd, 1)
+}
+
+func (ae *Exporter) ExportTraceServiceRequest(batch *agenttracepb.ExportTraceServiceRequest) error {
+ if batch == nil || len(batch.Spans) == 0 {
+ return nil
+ }
+
+ select {
+ case <-ae.stopCh:
+ return errStopped
+
+ default:
+ if lastConnectErr := ae.lastConnectError(); lastConnectErr != nil {
+ return fmt.Errorf("ExportTraceServiceRequest: no active connection, last connection error: %v", lastConnectErr)
+ }
+
+ ae.senderMu.Lock()
+ err := ae.traceExporter.Send(batch)
+ ae.senderMu.Unlock()
+ if err != nil {
+ if err == io.EOF {
+ ae.recvMu.Lock()
+ // Perform a .Recv to try to find out why the RPC actually ended.
+ // See:
+ // * https://github.com/grpc/grpc-go/blob/d389f9fac68eea0dcc49957d0b4cca5b3a0a7171/stream.go#L98-L100
+ // * https://groups.google.com/forum/#!msg/grpc-io/XcN4hA9HonI/F_UDiejTAwAJ
+ for {
+ _, err = ae.traceExporter.Recv()
+ if err != nil {
+ break
+ }
+ }
+ ae.recvMu.Unlock()
+ }
+
+ ae.setStateDisconnected(err)
+ if err != io.EOF {
+ return err
+ }
+ }
+ return nil
+ }
+}
+
+func (ae *Exporter) ExportView(vd *view.Data) {
+ if vd == nil {
+ return
+ }
+ _ = ae.viewDataBundler.Add(vd, 1)
+}
+
+func ocSpanDataToPbSpans(sdl []*trace.SpanData) []*tracepb.Span {
+ if len(sdl) == 0 {
+ return nil
+ }
+ protoSpans := make([]*tracepb.Span, 0, len(sdl))
+ for _, sd := range sdl {
+ if sd != nil {
+ protoSpans = append(protoSpans, ocSpanToProtoSpan(sd))
+ }
+ }
+ return protoSpans
+}
+
+func (ae *Exporter) uploadTraces(sdl []*trace.SpanData) {
+ select {
+ case <-ae.stopCh:
+ return
+
+ default:
+ if !ae.connected() {
+ return
+ }
+
+ protoSpans := ocSpanDataToPbSpans(sdl)
+ if len(protoSpans) == 0 {
+ return
+ }
+ ae.senderMu.Lock()
+ err := ae.traceExporter.Send(&agenttracepb.ExportTraceServiceRequest{
+ Spans: protoSpans,
+ })
+ ae.senderMu.Unlock()
+ if err != nil {
+ ae.setStateDisconnected(err)
+ }
+ }
+}
+
+func ocViewDataToPbMetrics(vdl []*view.Data) []*metricspb.Metric {
+ if len(vdl) == 0 {
+ return nil
+ }
+ metrics := make([]*metricspb.Metric, 0, len(vdl))
+ for _, vd := range vdl {
+ if vd != nil {
+ vmetric, err := viewDataToMetric(vd)
+ // TODO: (@odeke-em) somehow report this error, if it is non-nil.
+ if err == nil && vmetric != nil {
+ metrics = append(metrics, vmetric)
+ }
+ }
+ }
+ return metrics
+}
+
+func (ae *Exporter) uploadViewData(vdl []*view.Data) {
+ select {
+ case <-ae.stopCh:
+ return
+
+ default:
+ if !ae.connected() {
+ return
+ }
+
+ protoMetrics := ocViewDataToPbMetrics(vdl)
+ if len(protoMetrics) == 0 {
+ return
+ }
+ err := ae.metricsExporter.Send(&agentmetricspb.ExportMetricsServiceRequest{
+ Metrics: protoMetrics,
+ // TODO:(@odeke-em)
+ // a) Figure out how to derive a Node from the environment
+ // b) Figure out how to derive a Resource from the environment
+ // or better letting users of the exporter configure it.
+ })
+ if err != nil {
+ ae.setStateDisconnected(err)
+ }
+ }
+}
+
+func (ae *Exporter) Flush() {
+ ae.traceBundler.Flush()
+ ae.viewDataBundler.Flush()
+}
+
+func resourceProtoFromEnv() *resourcepb.Resource {
+ rs, _ := resource.FromEnv(context.Background())
+ if rs == nil {
+ return nil
+ }
+
+ rprs := &resourcepb.Resource{
+ Type: rs.Type,
+ }
+ if rs.Labels != nil {
+ rprs.Labels = make(map[string]string)
+ for k, v := range rs.Labels {
+ rprs.Labels[k] = v
+ }
+ }
+ return rprs
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
new file mode 100644
index 00000000..edeae65e
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/options.go
@@ -0,0 +1,144 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "time"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+)
+
+const (
+ DefaultAgentPort uint16 = 55678
+ DefaultAgentHost string = "localhost"
+)
+
+type ExporterOption interface {
+ withExporter(e *Exporter)
+}
+
+type insecureGrpcConnection int
+
+var _ ExporterOption = (*insecureGrpcConnection)(nil)
+
+func (igc *insecureGrpcConnection) withExporter(e *Exporter) {
+ e.canDialInsecure = true
+}
+
+// WithInsecure disables client transport security for the exporter's gRPC connection
+// just like grpc.WithInsecure() https://godoc.org/google.golang.org/grpc#WithInsecure
+// does. Note, by default, client security is required unless WithInsecure is used.
+func WithInsecure() ExporterOption { return new(insecureGrpcConnection) }
+
+type addressSetter string
+
+func (as addressSetter) withExporter(e *Exporter) {
+ e.agentAddress = string(as)
+}
+
+var _ ExporterOption = (*addressSetter)(nil)
+
+// WithAddress allows one to set the address that the exporter will
+// connect to the agent on. If unset, it will instead try to use
+// connect to DefaultAgentHost:DefaultAgentPort
+func WithAddress(addr string) ExporterOption {
+ return addressSetter(addr)
+}
+
+type serviceNameSetter string
+
+func (sns serviceNameSetter) withExporter(e *Exporter) {
+ e.serviceName = string(sns)
+}
+
+var _ ExporterOption = (*serviceNameSetter)(nil)
+
+// WithServiceName allows one to set/override the service name
+// that the exporter will report to the agent.
+func WithServiceName(serviceName string) ExporterOption {
+ return serviceNameSetter(serviceName)
+}
+
+type reconnectionPeriod time.Duration
+
+func (rp reconnectionPeriod) withExporter(e *Exporter) {
+ e.reconnectionPeriod = time.Duration(rp)
+}
+
+func WithReconnectionPeriod(rp time.Duration) ExporterOption {
+ return reconnectionPeriod(rp)
+}
+
+type compressorSetter string
+
+func (c compressorSetter) withExporter(e *Exporter) {
+ e.compressor = string(c)
+}
+
+// UseCompressor will set the compressor for the gRPC client to use when sending requests.
+// It is the responsibility of the caller to ensure that the compressor set has been registered
+// with google.golang.org/grpc/encoding. This can be done by encoding.RegisterCompressor. Some
+// compressors auto-register on import, such as gzip, which can be registered by calling
+// `import _ "google.golang.org/grpc/encoding/gzip"`
+func UseCompressor(compressorName string) ExporterOption {
+ return compressorSetter(compressorName)
+}
+
+type headerSetter map[string]string
+
+func (h headerSetter) withExporter(e *Exporter) {
+ e.headers = map[string]string(h)
+}
+
+// WithHeaders will send the provided headers when the gRPC stream connection
+// is instantiated
+func WithHeaders(headers map[string]string) ExporterOption {
+ return headerSetter(headers)
+}
+
+type clientCredentials struct {
+ credentials.TransportCredentials
+}
+
+var _ ExporterOption = (*clientCredentials)(nil)
+
+// WithTLSCredentials allows the connection to use TLS credentials
+// when talking to the server. It takes in grpc.TransportCredentials instead
+// of say a Certificate file or a tls.Certificate, because the retrieving
+// these credentials can be done in many ways e.g. plain file, in code tls.Config
+// or by certificate rotation, so it is up to the caller to decide what to use.
+func WithTLSCredentials(creds credentials.TransportCredentials) ExporterOption {
+ return &clientCredentials{TransportCredentials: creds}
+}
+
+func (cc *clientCredentials) withExporter(e *Exporter) {
+ e.clientTransportCredentials = cc.TransportCredentials
+}
+
+type grpcDialOptions []grpc.DialOption
+
+var _ ExporterOption = (*grpcDialOptions)(nil)
+
+// WithGRPCDialOption opens support to any grpc.DialOption to be used. If it conflicts
+// with some other configuration the GRPC specified via the agent the ones here will
+// take preference since they are set last.
+func WithGRPCDialOption(opts ...grpc.DialOption) ExporterOption {
+ return grpcDialOptions(opts)
+}
+
+func (opts grpcDialOptions) withExporter(e *Exporter) {
+ e.grpcDialOptions = opts
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
new file mode 100644
index 00000000..983ebe7b
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_spans.go
@@ -0,0 +1,248 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "math"
+ "time"
+
+ "go.opencensus.io/trace"
+ "go.opencensus.io/trace/tracestate"
+
+ tracepb "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
+ "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+const (
+ maxAnnotationEventsPerSpan = 32
+ maxMessageEventsPerSpan = 128
+)
+
+func ocSpanToProtoSpan(sd *trace.SpanData) *tracepb.Span {
+ if sd == nil {
+ return nil
+ }
+ var namePtr *tracepb.TruncatableString
+ if sd.Name != "" {
+ namePtr = &tracepb.TruncatableString{Value: sd.Name}
+ }
+ return &tracepb.Span{
+ TraceId: sd.TraceID[:],
+ SpanId: sd.SpanID[:],
+ ParentSpanId: sd.ParentSpanID[:],
+ Status: ocStatusToProtoStatus(sd.Status),
+ StartTime: timeToTimestamp(sd.StartTime),
+ EndTime: timeToTimestamp(sd.EndTime),
+ Links: ocLinksToProtoLinks(sd.Links),
+ Kind: ocSpanKindToProtoSpanKind(sd.SpanKind),
+ Name: namePtr,
+ Attributes: ocAttributesToProtoAttributes(sd.Attributes),
+ TimeEvents: ocTimeEventsToProtoTimeEvents(sd.Annotations, sd.MessageEvents),
+ Tracestate: ocTracestateToProtoTracestate(sd.Tracestate),
+ }
+}
+
+var blankStatus trace.Status
+
+func ocStatusToProtoStatus(status trace.Status) *tracepb.Status {
+ if status == blankStatus {
+ return nil
+ }
+ return &tracepb.Status{
+ Code: status.Code,
+ Message: status.Message,
+ }
+}
+
+func ocLinksToProtoLinks(links []trace.Link) *tracepb.Span_Links {
+ if len(links) == 0 {
+ return nil
+ }
+
+ sl := make([]*tracepb.Span_Link, 0, len(links))
+ for _, ocLink := range links {
+ // This redefinition is necessary to prevent ocLink.*ID[:] copies
+ // being reused -- in short we need a new ocLink per iteration.
+ ocLink := ocLink
+
+ sl = append(sl, &tracepb.Span_Link{
+ TraceId: ocLink.TraceID[:],
+ SpanId: ocLink.SpanID[:],
+ Type: ocLinkTypeToProtoLinkType(ocLink.Type),
+ })
+ }
+
+ return &tracepb.Span_Links{
+ Link: sl,
+ }
+}
+
+func ocLinkTypeToProtoLinkType(oct trace.LinkType) tracepb.Span_Link_Type {
+ switch oct {
+ case trace.LinkTypeChild:
+ return tracepb.Span_Link_CHILD_LINKED_SPAN
+ case trace.LinkTypeParent:
+ return tracepb.Span_Link_PARENT_LINKED_SPAN
+ default:
+ return tracepb.Span_Link_TYPE_UNSPECIFIED
+ }
+}
+
+func ocAttributesToProtoAttributes(attrs map[string]interface{}) *tracepb.Span_Attributes {
+ if len(attrs) == 0 {
+ return nil
+ }
+ outMap := make(map[string]*tracepb.AttributeValue)
+ for k, v := range attrs {
+ switch v := v.(type) {
+ case bool:
+ outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_BoolValue{BoolValue: v}}
+
+ case int:
+ outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: int64(v)}}
+
+ case int64:
+ outMap[k] = &tracepb.AttributeValue{Value: &tracepb.AttributeValue_IntValue{IntValue: v}}
+
+ case string:
+ outMap[k] = &tracepb.AttributeValue{
+ Value: &tracepb.AttributeValue_StringValue{
+ StringValue: &tracepb.TruncatableString{Value: v},
+ },
+ }
+ }
+ }
+ return &tracepb.Span_Attributes{
+ AttributeMap: outMap,
+ }
+}
+
+// This code is mostly copied from
+// https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver/blob/master/trace_proto.go#L46
+func ocTimeEventsToProtoTimeEvents(as []trace.Annotation, es []trace.MessageEvent) *tracepb.Span_TimeEvents {
+ if len(as) == 0 && len(es) == 0 {
+ return nil
+ }
+
+ timeEvents := &tracepb.Span_TimeEvents{}
+ var annotations, droppedAnnotationsCount int
+ var messageEvents, droppedMessageEventsCount int
+
+ // Transform annotations
+ for i, a := range as {
+ if annotations >= maxAnnotationEventsPerSpan {
+ droppedAnnotationsCount = len(as) - i
+ break
+ }
+ annotations++
+ timeEvents.TimeEvent = append(timeEvents.TimeEvent,
+ &tracepb.Span_TimeEvent{
+ Time: timeToTimestamp(a.Time),
+ Value: transformAnnotationToTimeEvent(&a),
+ },
+ )
+ }
+
+ // Transform message events
+ for i, e := range es {
+ if messageEvents >= maxMessageEventsPerSpan {
+ droppedMessageEventsCount = len(es) - i
+ break
+ }
+ messageEvents++
+ timeEvents.TimeEvent = append(timeEvents.TimeEvent,
+ &tracepb.Span_TimeEvent{
+ Time: timeToTimestamp(e.Time),
+ Value: transformMessageEventToTimeEvent(&e),
+ },
+ )
+ }
+
+ // Process dropped counter
+ timeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount)
+ timeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount)
+
+ return timeEvents
+}
+
+func transformAnnotationToTimeEvent(a *trace.Annotation) *tracepb.Span_TimeEvent_Annotation_ {
+ return &tracepb.Span_TimeEvent_Annotation_{
+ Annotation: &tracepb.Span_TimeEvent_Annotation{
+ Description: &tracepb.TruncatableString{Value: a.Message},
+ Attributes: ocAttributesToProtoAttributes(a.Attributes),
+ },
+ }
+}
+
+func transformMessageEventToTimeEvent(e *trace.MessageEvent) *tracepb.Span_TimeEvent_MessageEvent_ {
+ return &tracepb.Span_TimeEvent_MessageEvent_{
+ MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{
+ Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType),
+ Id: uint64(e.MessageID),
+ UncompressedSize: uint64(e.UncompressedByteSize),
+ CompressedSize: uint64(e.CompressedByteSize),
+ },
+ }
+}
+
+// clip32 clips an int to the range of an int32.
+func clip32(x int) int32 {
+ if x < math.MinInt32 {
+ return math.MinInt32
+ }
+ if x > math.MaxInt32 {
+ return math.MaxInt32
+ }
+ return int32(x)
+}
+
+func timeToTimestamp(t time.Time) *timestamp.Timestamp {
+ nanoTime := t.UnixNano()
+ return ×tamp.Timestamp{
+ Seconds: nanoTime / 1e9,
+ Nanos: int32(nanoTime % 1e9),
+ }
+}
+
+func ocSpanKindToProtoSpanKind(kind int) tracepb.Span_SpanKind {
+ switch kind {
+ case trace.SpanKindClient:
+ return tracepb.Span_CLIENT
+ case trace.SpanKindServer:
+ return tracepb.Span_SERVER
+ default:
+ return tracepb.Span_SPAN_KIND_UNSPECIFIED
+ }
+}
+
+func ocTracestateToProtoTracestate(ts *tracestate.Tracestate) *tracepb.Span_Tracestate {
+ if ts == nil {
+ return nil
+ }
+ return &tracepb.Span_Tracestate{
+ Entries: ocTracestateEntriesToProtoTracestateEntries(ts.Entries()),
+ }
+}
+
+func ocTracestateEntriesToProtoTracestateEntries(entries []tracestate.Entry) []*tracepb.Span_Tracestate_Entry {
+ protoEntries := make([]*tracepb.Span_Tracestate_Entry, 0, len(entries))
+ for _, entry := range entries {
+ protoEntries = append(protoEntries, &tracepb.Span_Tracestate_Entry{
+ Key: entry.Key,
+ Value: entry.Value,
+ })
+ }
+ return protoEntries
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
new file mode 100644
index 00000000..43f18dec
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/transform_stats_to_metrics.go
@@ -0,0 +1,274 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+import (
+ "errors"
+ "time"
+
+ "go.opencensus.io/stats"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/tag"
+
+ "github.com/golang/protobuf/ptypes/timestamp"
+
+ metricspb "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
+)
+
+var (
+ errNilMeasure = errors.New("expecting a non-nil stats.Measure")
+ errNilView = errors.New("expecting a non-nil view.View")
+ errNilViewData = errors.New("expecting a non-nil view.Data")
+)
+
+func viewDataToMetric(vd *view.Data) (*metricspb.Metric, error) {
+ if vd == nil {
+ return nil, errNilViewData
+ }
+
+ descriptor, err := viewToMetricDescriptor(vd.View)
+ if err != nil {
+ return nil, err
+ }
+
+ timeseries, err := viewDataToTimeseries(vd)
+ if err != nil {
+ return nil, err
+ }
+
+ metric := &metricspb.Metric{
+ MetricDescriptor: descriptor,
+ Timeseries: timeseries,
+ }
+ return metric, nil
+}
+
+func viewToMetricDescriptor(v *view.View) (*metricspb.MetricDescriptor, error) {
+ if v == nil {
+ return nil, errNilView
+ }
+ if v.Measure == nil {
+ return nil, errNilMeasure
+ }
+
+ desc := &metricspb.MetricDescriptor{
+ Name: stringOrCall(v.Name, v.Measure.Name),
+ Description: stringOrCall(v.Description, v.Measure.Description),
+ Unit: v.Measure.Unit(),
+ Type: aggregationToMetricDescriptorType(v),
+ LabelKeys: tagKeysToLabelKeys(v.TagKeys),
+ }
+ return desc, nil
+}
+
+func stringOrCall(first string, call func() string) string {
+ if first != "" {
+ return first
+ }
+ return call()
+}
+
+type measureType uint
+
+const (
+ measureUnknown measureType = iota
+ measureInt64
+ measureFloat64
+)
+
+func measureTypeFromMeasure(m stats.Measure) measureType {
+ switch m.(type) {
+ default:
+ return measureUnknown
+ case *stats.Float64Measure:
+ return measureFloat64
+ case *stats.Int64Measure:
+ return measureInt64
+ }
+}
+
+func aggregationToMetricDescriptorType(v *view.View) metricspb.MetricDescriptor_Type {
+ if v == nil || v.Aggregation == nil {
+ return metricspb.MetricDescriptor_UNSPECIFIED
+ }
+ if v.Measure == nil {
+ return metricspb.MetricDescriptor_UNSPECIFIED
+ }
+
+ switch v.Aggregation.Type {
+ case view.AggTypeCount:
+ // Cumulative on int64
+ return metricspb.MetricDescriptor_CUMULATIVE_INT64
+
+ case view.AggTypeDistribution:
+ // Cumulative types
+ return metricspb.MetricDescriptor_CUMULATIVE_DISTRIBUTION
+
+ case view.AggTypeLastValue:
+ // Gauge types
+ switch measureTypeFromMeasure(v.Measure) {
+ case measureFloat64:
+ return metricspb.MetricDescriptor_GAUGE_DOUBLE
+ case measureInt64:
+ return metricspb.MetricDescriptor_GAUGE_INT64
+ }
+
+ case view.AggTypeSum:
+ // Cumulative types
+ switch measureTypeFromMeasure(v.Measure) {
+ case measureFloat64:
+ return metricspb.MetricDescriptor_CUMULATIVE_DOUBLE
+ case measureInt64:
+ return metricspb.MetricDescriptor_CUMULATIVE_INT64
+ }
+ }
+
+ // For all other cases, return unspecified.
+ return metricspb.MetricDescriptor_UNSPECIFIED
+}
+
+func tagKeysToLabelKeys(tagKeys []tag.Key) []*metricspb.LabelKey {
+ labelKeys := make([]*metricspb.LabelKey, 0, len(tagKeys))
+ for _, tagKey := range tagKeys {
+ labelKeys = append(labelKeys, &metricspb.LabelKey{
+ Key: tagKey.Name(),
+ })
+ }
+ return labelKeys
+}
+
+func viewDataToTimeseries(vd *view.Data) ([]*metricspb.TimeSeries, error) {
+ if vd == nil || len(vd.Rows) == 0 {
+ return nil, nil
+ }
+
+ // Given that view.Data only contains Start, End
+ // the timestamps for all the row data will be the exact same
+ // per aggregation. However, the values will differ.
+ // Each row has its own tags.
+ startTimestamp := timeToProtoTimestamp(vd.Start)
+ endTimestamp := timeToProtoTimestamp(vd.End)
+
+ mType := measureTypeFromMeasure(vd.View.Measure)
+ timeseries := make([]*metricspb.TimeSeries, 0, len(vd.Rows))
+ // It is imperative that the ordering of "LabelValues" matches those
+ // of the Label keys in the metric descriptor.
+ for _, row := range vd.Rows {
+ labelValues := labelValuesFromTags(row.Tags)
+ point := rowToPoint(vd.View, row, endTimestamp, mType)
+ timeseries = append(timeseries, &metricspb.TimeSeries{
+ StartTimestamp: startTimestamp,
+ LabelValues: labelValues,
+ Points: []*metricspb.Point{point},
+ })
+ }
+
+ if len(timeseries) == 0 {
+ return nil, nil
+ }
+
+ return timeseries, nil
+}
+
+func timeToProtoTimestamp(t time.Time) *timestamp.Timestamp {
+ unixNano := t.UnixNano()
+ return ×tamp.Timestamp{
+ Seconds: int64(unixNano / 1e9),
+ Nanos: int32(unixNano % 1e9),
+ }
+}
+
+func rowToPoint(v *view.View, row *view.Row, endTimestamp *timestamp.Timestamp, mType measureType) *metricspb.Point {
+ pt := &metricspb.Point{
+ Timestamp: endTimestamp,
+ }
+
+ switch data := row.Data.(type) {
+ case *view.CountData:
+ pt.Value = &metricspb.Point_Int64Value{Int64Value: data.Value}
+
+ case *view.DistributionData:
+ pt.Value = &metricspb.Point_DistributionValue{
+ DistributionValue: &metricspb.DistributionValue{
+ Count: data.Count,
+ Sum: float64(data.Count) * data.Mean, // because Mean := Sum/Count
+ // TODO: Add Exemplar
+ Buckets: bucketsToProtoBuckets(data.CountPerBucket),
+ BucketOptions: &metricspb.DistributionValue_BucketOptions{
+ Type: &metricspb.DistributionValue_BucketOptions_Explicit_{
+ Explicit: &metricspb.DistributionValue_BucketOptions_Explicit{
+ Bounds: v.Aggregation.Buckets,
+ },
+ },
+ },
+ SumOfSquaredDeviation: data.SumOfSquaredDev,
+ }}
+
+ case *view.LastValueData:
+ setPointValue(pt, data.Value, mType)
+
+ case *view.SumData:
+ setPointValue(pt, data.Value, mType)
+ }
+
+ return pt
+}
+
+// Not returning anything from this function because metricspb.Point.is_Value is an unexported
+// interface hence we just have to set its value by pointer.
+func setPointValue(pt *metricspb.Point, value float64, mType measureType) {
+ if mType == measureInt64 {
+ pt.Value = &metricspb.Point_Int64Value{Int64Value: int64(value)}
+ } else {
+ pt.Value = &metricspb.Point_DoubleValue{DoubleValue: value}
+ }
+}
+
+func bucketsToProtoBuckets(countPerBucket []int64) []*metricspb.DistributionValue_Bucket {
+ distBuckets := make([]*metricspb.DistributionValue_Bucket, len(countPerBucket))
+ for i := 0; i < len(countPerBucket); i++ {
+ count := countPerBucket[i]
+
+ distBuckets[i] = &metricspb.DistributionValue_Bucket{
+ Count: count,
+ }
+ }
+
+ return distBuckets
+}
+
+func labelValuesFromTags(tags []tag.Tag) []*metricspb.LabelValue {
+ if len(tags) == 0 {
+ return nil
+ }
+
+ labelValues := make([]*metricspb.LabelValue, 0, len(tags))
+ for _, tag_ := range tags {
+ labelValues = append(labelValues, &metricspb.LabelValue{
+ Value: tag_.Value,
+
+ // It is imperative that we set the "HasValue" attribute,
+ // in order to distinguish missing a label from the empty string.
+ // https://godoc.org/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1#LabelValue.HasValue
+ //
+ // OpenCensus-Go uses non-pointers for tags as seen by this function's arguments,
+ // so the best case that we can use to distinguish missing labels/tags from the
+ // empty string is by checking if the Tag.Key.Name() != "" to indicate that we have
+ // a value.
+ HasValue: tag_.Key.Name() != "",
+ })
+ }
+ return labelValues
+}
diff --git a/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
new file mode 100644
index 00000000..68be4c75
--- /dev/null
+++ b/vendor/contrib.go.opencensus.io/exporter/ocagent/version.go
@@ -0,0 +1,17 @@
+// Copyright 2018, OpenCensus Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package ocagent
+
+const Version = "0.0.1"
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/.gitignore b/vendor/github.com/Azure/azure-amqp-common-go/v2/.gitignore
new file mode 100644
index 00000000..c805fdab
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/.gitignore
@@ -0,0 +1,19 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+vendor
+.idea
+
+.DS_Store
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/.travis.yml b/vendor/github.com/Azure/azure-amqp-common-go/v2/.travis.yml
new file mode 100644
index 00000000..bb9bfb18
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+sudo: false
+go:
+ - 1.x
+ - 1.11.x
+
+matrix:
+ fast_finish: true
+
+before_install:
+ - cd ${TRAVIS_HOME}
+ - go get github.com/mattn/goveralls
+ - go get golang.org/x/tools/cmd/cover
+ - go get github.com/fzipp/gocyclo
+ - go get golang.org/x/lint/golint
+ - cd ${TRAVIS_BUILD_DIR}
+
+script:
+ - export GO111MODULE=on
+ - make test
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/LICENSE b/vendor/github.com/Azure/azure-amqp-common-go/v2/LICENSE
new file mode 100644
index 00000000..21071075
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/Makefile b/vendor/github.com/Azure/azure-amqp-common-go/v2/Makefile
new file mode 100644
index 00000000..371bf356
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/Makefile
@@ -0,0 +1,97 @@
+PACKAGE = github.com/Azure/azure-amqp-common-go
+DATE ?= $(shell date +%FT%T%z)
+VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \
+ cat $(CURDIR)/.version 2> /dev/null || echo v0)
+BIN = $(GOPATH)/bin
+BASE = $(CURDIR)
+PKGS = $(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -vE "^$(PACKAGE)/_examples|templates/"))
+TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS))
+GO_FILES = find . -iname '*.go' -type f
+
+GO = go
+GODOC = godoc
+GOFMT = gofmt
+GOCYCLO = gocyclo
+
+V = 0
+Q = $(if $(filter 1,$V),,@)
+M = $(shell printf "\033[34;1m▶\033[0m")
+TIMEOUT = 360
+
+.PHONY: all
+all: fmt go.sum lint vet tidy | $(BASE) ; $(info $(M) building library…) @ ## Build program
+ $Q cd $(BASE) && $(GO) build \
+ -tags release \
+ -ldflags '-X $(PACKAGE)/cmd.Version=$(VERSION) -X $(PACKAGE)/cmd.BuildDate=$(DATE)' \
+ ./...
+
+$(BASE): ; $(info $(M) setting GOPATH…)
+ @mkdir -p $(dir $@)
+ @ln -sf $(CURDIR) $@
+
+# Tools
+
+GOLINT = $(BIN)/golint
+$(BIN)/golint: | $(BASE) ; $(info $(M) building golint…)
+ $Q go get -u golang.org/x/lint/golint
+
+.PHONY: tidy
+tidy: ; $(info $(M) running tidy…) @ ## Run tidy
+ $Q $(GO) mod tidy
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-debug
+.PHONY: $(TEST_TARGETS) test-xml check test tests
+test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks
+test-short: ARGS=-short ## Run only short tests
+test-verbose: ARGS=-v ## Run tests in verbose mode
+test-debug: ARGS=-v -debug ## Run tests in verbose mode with debug output
+test-race: ARGS=-race ## Run tests with race detector
+test-cover: ARGS=-cover ## Run tests in verbose mode with coverage
+$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%)
+$(TEST_TARGETS): test
+check test tests: cyclo lint vet go.sum | $(BASE) ; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests
+ $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) $(TESTPKGS)
+
+.PHONY: vet
+vet: go.sum | $(BASE) $(GOLINT) ; $(info $(M) running vet…) @ ## Run vet
+ $Q cd $(BASE) && $(GO) vet ./...
+
+.PHONY: lint
+lint: go.sum | $(BASE) $(GOLINT) ; $(info $(M) running golint…) @ ## Run golint
+ $Q cd $(BASE) && ret=0 && for pkg in $(PKGS); do \
+ test -z "$$($(GOLINT) $$pkg | tee /dev/stderr)" || ret=1 ; \
+ done ; exit $$ret
+
+.PHONY: fmt
+fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files
+ @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./...); do \
+ $(GOFMT) -l -w $$d/*.go || ret=$$? ; \
+ done ; exit $$ret
+
+.PHONY: cyclo
+cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files
+ $Q cd $(BASE) && $(GOCYCLO) -over 19 $$($(GO_FILES))
+# Dependency management
+
+go.sum: go.mod ; $(info $(M) verifying modules...) @ ## Run go mod verify
+ $Q cd $(BASE) && $(GO) mod verify
+
+go.mod:
+ $Q cd $(BASE) && $(GO) mod tidy
+
+# Misc
+
+.PHONY: clean
+clean: ; $(info $(M) cleaning…) @ ## Cleanup everything
+ @rm -rf test/tests.* test/coverage.*
+
+.PHONY: help
+help:
+ @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
+ awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: version
+version:
+ @echo $(VERSION)
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/README.md b/vendor/github.com/Azure/azure-amqp-common-go/v2/README.md
new file mode 100644
index 00000000..3f4ec80e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/README.md
@@ -0,0 +1,46 @@
+# Azure AMQP Common
+[](https://goreportcard.com/report/github.com/Azure/azure-amqp-common-go)
+[](https://godoc.org/github.com/Azure/azure-amqp-common-go)
+[](https://travis-ci.org/Azure/azure-amqp-common-go)
+
+This project contains reusable components for AMQP based services like Event Hub and Service Bus. You will find
+abstractions over authentication, claims-based security, connection string parsing and RPC for AMQP.
+
+If you are looking for the Azure Event Hub library for go, you can find it [here](https://aka.ms/azure-event-hubs-go).
+
+If you are looking for the Azure Service Bus library for go, you can find it [here](https://aka.ms/azure-service-bus-go).
+
+## Install with Go modules
+If you want to use stable versions of the library, please use Go modules.
+
+### Using go get targeting version 2.x.x
+``` bash
+go get -u github.com/Azure/azure-amqp-common-go/v2
+```
+
+### Using go get targeting version 1.x.x
+``` bash
+go get -u github.com/Azure/azure-amqp-common-go
+```
+
+## Contributing
+
+This project welcomes contributions and suggestions. Most contributions require you to agree to a
+Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us
+the rights to use your contribution. For details, visit https://cla.microsoft.com.
+
+When you submit a pull request, a CLA-bot will automatically determine whether you need to provide
+a CLA and decorate the PR appropriately (e.g., label, comment). Simply follow the instructions
+provided by the bot. You will only need to do this once across all repos using our CLA.
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
+
+## License
+
+MIT, see [LICENSE](./LICENSE).
+
+## Contribute
+
+See [CONTRIBUTING.md](.github/CONTRIBUTING.md).
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/auth/token.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/auth/token.go
new file mode 100644
index 00000000..ea7e5da1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/auth/token.go
@@ -0,0 +1,58 @@
+// Package auth provides an abstraction over claims-based security for Azure Event Hub and Service Bus.
+package auth
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+const (
+ // CBSTokenTypeJWT is the type of token to be used for JWTs. For example Azure Active Directory tokens.
+ CBSTokenTypeJWT TokenType = "jwt"
+ // CBSTokenTypeSAS is the type of token to be used for SAS tokens.
+ CBSTokenTypeSAS TokenType = "servicebus.windows.net:sastoken"
+)
+
+type (
+ // TokenType represents types of tokens known for claims-based auth
+ TokenType string
+
+ // Token contains all of the information to negotiate authentication
+ Token struct {
+ // TokenType is the type of CBS token
+ TokenType TokenType
+ Token string
+ Expiry string
+ }
+
+ // TokenProvider abstracts the fetching of authentication tokens
+ TokenProvider interface {
+ GetToken(uri string) (*Token, error)
+ }
+)
+
+// NewToken constructs a new auth token
+func NewToken(tokenType TokenType, token, expiry string) *Token {
+ return &Token{
+ TokenType: tokenType,
+ Token: token,
+ Expiry: expiry,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/cbs/cbs.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/cbs/cbs.go
new file mode 100644
index 00000000..699f2452
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/cbs/cbs.go
@@ -0,0 +1,90 @@
+// Package cbs provides the functionality for negotiating claims-based security over AMQP for use in Azure Service Bus
+// and Event Hubs.
+package cbs
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+
+ "github.com/Azure/azure-amqp-common-go/v2/auth"
+ "github.com/Azure/azure-amqp-common-go/v2/internal/tracing"
+ "github.com/Azure/azure-amqp-common-go/v2/rpc"
+)
+
+const (
+ cbsAddress = "$cbs"
+ cbsOperationKey = "operation"
+ cbsOperationPutToken = "put-token"
+ cbsTokenTypeKey = "type"
+ cbsAudienceKey = "name"
+ cbsExpirationKey = "expiration"
+)
+
+// NegotiateClaim attempts to put a token to the $cbs management endpoint to negotiate auth for the given audience
+func NegotiateClaim(ctx context.Context, audience string, conn *amqp.Client, provider auth.TokenProvider) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.cbs.NegotiateClaim")
+ defer span.End()
+
+ link, err := rpc.NewLink(conn, cbsAddress)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ defer func() {
+ if err := link.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ }
+ }()
+
+ token, err := provider.GetToken(audience)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ tab.For(ctx).Debug(fmt.Sprintf("negotiating claim for audience %s with token type %s and expiry of %s", audience, token.TokenType, token.Expiry))
+ msg := &amqp.Message{
+ Value: token.Token,
+ ApplicationProperties: map[string]interface{}{
+ cbsOperationKey: cbsOperationPutToken,
+ cbsTokenTypeKey: string(token.TokenType),
+ cbsAudienceKey: audience,
+ cbsExpirationKey: token.Expiry,
+ },
+ }
+
+ res, err := link.RetryableRPC(ctx, 3, 1*time.Second, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ tab.For(ctx).Debug(fmt.Sprintf("negotiated with response code %d and message: %s", res.Code, res.Description))
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/changelog.md b/vendor/github.com/Azure/azure-amqp-common-go/v2/changelog.md
new file mode 100644
index 00000000..8e623314
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/changelog.md
@@ -0,0 +1,83 @@
+# Change Log
+
+## `head`
+
+## `v2.1.0`
+- add session filters for RPC links
+- bump amqp to v0.11.2
+- add more logging in RPC operations
+
+## `v2.0.0`
+- [**breaking change** remove persist and move into the Event Hubs package](https://github.com/Azure/azure-event-hubs-go/pull/112)
+- **breaking change** remove log package in favor of https://github.com/devigned/tab
+
+## `v1.1.4`
+- allow status description on RPC calls to be empty without returning an error https://github.com/Azure/azure-event-hubs-go/issues/88
+
+## `v1.1.3`
+- adding automatic server-timeout field for `rpc` package. It gleans the appropriate value from the context passed to it
+
+## `v1.1.2`
+- adopting go modules
+
+## `v1.1.1`
+- broadening accepted versions of pack.ag/amqp
+
+## `v1.1.0`
+
+- adding the ability to reuse an AMQP session while making RPCs
+- bug fixes
+
+## `v1.0.3`
+- updating dependencies, adding new 'go-autorest' constraint
+
+## `v1.0.2`
+- adding resiliency against malformed "status-code" and "status-description" properties in rpc responses
+
+## `v1.0.1`
+- bump version constant
+
+## `v1.0.0`
+- moved to opencensus from opentracing
+- committing to backward compatibility
+
+## `v0.7.0`
+- update AMQP dependency to 0.7.0
+
+## `v0.6.0`
+- **Breaking Change** change the parse connection signature and make it more strict
+- fix errors imports
+
+## `v0.5.0`
+- **Breaking Change** lock dependency to AMQP
+
+## `v0.4.0`
+- **Breaking Change** remove namespace from SAS provider and return struct rather than interface
+
+## `v0.3.2`
+- Return error on retry. Was returning nil if not retryable.
+
+## `v0.3.1`
+- Fix missing defer on spans
+
+## `v0.3.0`
+- add opentracing support
+- upgrade amqp to pull in the changes where close accepts context (breaking change)
+
+## `v0.2.4`
+- connection string keys are case insensitive
+
+## `v0.2.3`
+- handle remove trailing slash from host
+
+## `v0.2.2`
+- handle connection string values which contain `=`
+
+## `v0.2.1`
+- parse connection strings using key / values rather than regex
+
+## `v0.2.0`
+- add file checkpoint persister
+
+## `v0.1.0`
+- initial release
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/conn/conn.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/conn/conn.go
new file mode 100644
index 00000000..4d539cf9
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/conn/conn.go
@@ -0,0 +1,112 @@
+package conn
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strings"
+)
+
+const (
+ endpointKey = "Endpoint"
+ sharedAccessKeyNameKey = "SharedAccessKeyName"
+ sharedAccessKeyKey = "SharedAccessKey"
+ entityPathKey = "EntityPath"
+)
+
+type (
+ // ParsedConn is the structure of a parsed Service Bus or Event Hub connection string.
+ ParsedConn struct {
+ Host string
+ Suffix string
+ Namespace string
+ HubName string
+ KeyName string
+ Key string
+ }
+)
+
+// newParsedConnection is a constructor for a parsedConn and verifies each of the inputs is non-null.
+func newParsedConnection(namespace, suffix, hubName, keyName, key string) *ParsedConn {
+ return &ParsedConn{
+ Host: "amqps://" + namespace + "." + suffix,
+ Suffix: suffix,
+ Namespace: namespace,
+ KeyName: keyName,
+ Key: key,
+ HubName: hubName,
+ }
+}
+
+// ParsedConnectionFromStr takes a string connection string from the Azure portal and returns the parsed representation.
+// The method will return an error if the Endpoint, SharedAccessKeyName or SharedAccessKey is empty.
+func ParsedConnectionFromStr(connStr string) (*ParsedConn, error) {
+ var namespace, suffix, hubName, keyName, secret string
+ splits := strings.Split(connStr, ";")
+ for _, split := range splits {
+ keyAndValue := strings.Split(split, "=")
+ if len(keyAndValue) < 2 {
+ return nil, errors.New("failed parsing connection string due to unmatched key value separated by '='")
+ }
+
+ // if a key value pair has `=` in the value, recombine them
+ key := keyAndValue[0]
+ value := strings.Join(keyAndValue[1:], "=")
+ switch {
+ case strings.EqualFold(endpointKey, key):
+ u, err := url.Parse(value)
+ if err != nil {
+ return nil, errors.New("failed parsing connection string due to an incorrectly formatted Endpoint value")
+ }
+ hostSplits := strings.Split(u.Host, ".")
+ if len(hostSplits) < 2 {
+ return nil, errors.New("failed parsing connection string due to Endpoint value not containing a URL with a namespace and a suffix")
+ }
+ namespace = hostSplits[0]
+ suffix = strings.Join(hostSplits[1:], ".")
+ case strings.EqualFold(sharedAccessKeyNameKey, key):
+ keyName = value
+ case strings.EqualFold(sharedAccessKeyKey, key):
+ secret = value
+ case strings.EqualFold(entityPathKey, key):
+ hubName = value
+ }
+ }
+
+ parsed := newParsedConnection(namespace, suffix, hubName, keyName, secret)
+ if namespace == "" {
+ return parsed, fmt.Errorf("key %q must not be empty", endpointKey)
+ }
+
+ if keyName == "" {
+ return parsed, fmt.Errorf("key %q must not be empty", sharedAccessKeyNameKey)
+ }
+
+ if secret == "" {
+ return parsed, fmt.Errorf("key %q must not be empty", sharedAccessKeyKey)
+ }
+
+ return parsed, nil
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/go.mod b/vendor/github.com/Azure/azure-amqp-common-go/v2/go.mod
new file mode 100644
index 00000000..273f4af3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/go.mod
@@ -0,0 +1,18 @@
+module github.com/Azure/azure-amqp-common-go/v2
+
+go 1.12
+
+require (
+ contrib.go.opencensus.io/exporter/ocagent v0.5.0 // indirect
+ github.com/Azure/azure-sdk-for-go v29.0.0+incompatible // indirect
+ github.com/Azure/go-autorest v12.0.0+incompatible
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/devigned/tab v0.1.1
+ github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
+ github.com/fortytw2/leaktest v1.2.0 // indirect
+ github.com/pkg/errors v0.8.0 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/stretchr/testify v1.2.2
+ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2
+ pack.ag/amqp v0.11.2
+)
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/go.sum b/vendor/github.com/Azure/azure-amqp-common-go/v2/go.sum
new file mode 100644
index 00000000..c33e6c34
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/go.sum
@@ -0,0 +1,92 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
+github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE=
+github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
+github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
+github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
+github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
+github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+pack.ag/amqp v0.11.2 h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg=
+pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/internal/tracing/tracing.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/internal/tracing/tracing.go
new file mode 100644
index 00000000..0ede73a3
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/internal/tracing/tracing.go
@@ -0,0 +1,32 @@
+package tracing
+
+import (
+ "context"
+ "os"
+
+ "github.com/devigned/tab"
+
+ "github.com/Azure/azure-amqp-common-go/v2/internal"
+)
+
+// StartSpanFromContext starts a span given a context and applies common library information
+func StartSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ ApplyComponentInfo(span)
+ return ctx, span
+}
+
+// ApplyComponentInfo applies eventhub library and network info to the span
+func ApplyComponentInfo(span tab.Spanner) {
+ span.AddAttributes(
+ tab.StringAttribute("component", "github.com/Azure/azure-amqp-common-go"),
+ tab.StringAttribute("version", common.Version))
+ applyNetworkInfo(span)
+}
+
+func applyNetworkInfo(span tab.Spanner) {
+ hostname, err := os.Hostname()
+ if err == nil {
+ span.AddAttributes(tab.StringAttribute("peer.hostname", hostname))
+ }
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/internal/version.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/internal/version.go
new file mode 100644
index 00000000..624e23bc
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/internal/version.go
@@ -0,0 +1,6 @@
+package common
+
+const (
+ // Version is the semantic version of the library
+ Version = "2.1.0"
+)
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/ptrs.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/ptrs.go
new file mode 100644
index 00000000..17327900
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/ptrs.go
@@ -0,0 +1,44 @@
+package common
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+// PtrBool takes a boolean and returns a pointer to that bool. For use in literal pointers, ptrBool(true) -> *bool
+func PtrBool(toPtr bool) *bool {
+ return &toPtr
+}
+
+// PtrString takes a string and returns a pointer to that string. For use in literal pointers,
+// PtrString(fmt.Sprintf("..", foo)) -> *string
+func PtrString(toPtr string) *string {
+ return &toPtr
+}
+
+// PtrInt32 takes a int32 and returns a pointer to that int32. For use in literal pointers, ptrInt32(1) -> *int32
+func PtrInt32(number int32) *int32 {
+ return &number
+}
+
+// PtrInt64 takes a int64 and returns a pointer to that int64. For use in literal pointers, ptrInt64(1) -> *int64
+func PtrInt64(number int64) *int64 {
+ return &number
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/retry.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/retry.go
new file mode 100644
index 00000000..3d9edc1a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/retry.go
@@ -0,0 +1,54 @@
+package common
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "time"
+)
+
+// Retryable represents an error which should be able to be retried
+type Retryable string
+
+// Error implementation for Retryable
+func (r Retryable) Error() string {
+ return string(r)
+}
+
+// Retry will attempt to retry an action a number of times if the action returns a retryable error
+func Retry(times int, delay time.Duration, action func() (interface{}, error)) (interface{}, error) {
+ var lastErr error
+ for i := 0; i < times; i++ {
+ item, err := action()
+ if err != nil {
+ if retryable, ok := err.(Retryable); ok {
+ lastErr = retryable
+ time.Sleep(delay)
+ continue
+ } else {
+ return nil, err
+ }
+ }
+ return item, nil
+ }
+ return nil, lastErr
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/rpc/rpc.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/rpc/rpc.go
new file mode 100644
index 00000000..552a754d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/rpc/rpc.go
@@ -0,0 +1,318 @@
+// Package rpc provides functionality for request / reply messaging. It is used by package mgmt and cbs.
+package rpc
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+
+ "github.com/Azure/azure-amqp-common-go/v2"
+ "github.com/Azure/azure-amqp-common-go/v2/internal/tracing"
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+)
+
+const (
+ replyPostfix = "-reply-to-"
+ statusCodeKey = "status-code"
+ descriptionKey = "status-description"
+)
+
+type (
+ // Link is the bidirectional communication structure used for CBS negotiation
+ Link struct {
+ session *amqp.Session
+ receiver *amqp.Receiver
+ sender *amqp.Sender
+ clientAddress string
+ rpcMu sync.Mutex
+ sessionID *string
+ useSessionID bool
+ id string
+ }
+
+ // Response is the simplified response structure from an RPC like call
+ Response struct {
+ Code int
+ Description string
+ Message *amqp.Message
+ }
+
+ // LinkOption provides a way to customize the construction of a Link
+ LinkOption func(link *Link) error
+)
+
+// LinkWithSessionFilter configures a Link to use a session filter
+func LinkWithSessionFilter(sessionID *string) LinkOption {
+ return func(l *Link) error {
+ l.sessionID = sessionID
+ l.useSessionID = true
+ return nil
+ }
+}
+
+// NewLink will build a new request response link
+func NewLink(conn *amqp.Client, address string, opts ...LinkOption) (*Link, error) {
+ authSession, err := conn.NewSession()
+ if err != nil {
+ return nil, err
+ }
+
+ return NewLinkWithSession(authSession, address, opts...)
+}
+
+// NewLinkWithSession will build a new request response link, but will reuse an existing AMQP session
+func NewLinkWithSession(session *amqp.Session, address string, opts ...LinkOption) (*Link, error) {
+ linkID, err := uuid.NewV4()
+ if err != nil {
+ return nil, err
+ }
+
+ id := linkID.String()
+ link := &Link{
+ session: session,
+ clientAddress: strings.Replace("$", "", address, -1) + replyPostfix + id,
+ id: id,
+ }
+
+ for _, opt := range opts {
+ if err := opt(link); err != nil {
+ return nil, err
+ }
+ }
+
+ sender, err := session.NewSender(
+ amqp.LinkTargetAddress(address),
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ receiverOpts := []amqp.LinkOption{
+ amqp.LinkSourceAddress(address),
+ amqp.LinkTargetAddress(link.clientAddress),
+ }
+
+ if link.sessionID != nil {
+ const name = "com.microsoft:session-filter"
+ const code = uint64(0x00000137000000C)
+ if link.sessionID == nil {
+ receiverOpts = append(receiverOpts, amqp.LinkSourceFilter(name, code, nil))
+ } else {
+ receiverOpts = append(receiverOpts, amqp.LinkSourceFilter(name, code, link.sessionID))
+ }
+ receiverOpts = append(receiverOpts)
+ }
+
+ receiver, err := session.NewReceiver(receiverOpts...)
+ if err != nil {
+ // make sure we close the sender
+ clsCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+
+ _ = sender.Close(clsCtx)
+ return nil, err
+ }
+
+ link.sender = sender
+ link.receiver = receiver
+
+ return link, nil
+}
+
+// RetryableRPC attempts to retry a request a number of times with delay
+func (l *Link) RetryableRPC(ctx context.Context, times int, delay time.Duration, msg *amqp.Message) (*Response, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.RetryableRPC")
+ defer span.End()
+
+ res, err := common.Retry(times, delay, func() (interface{}, error) {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.RetryableRPC.retry")
+ defer span.End()
+
+ res, err := l.RPC(ctx, msg)
+ if err != nil {
+ tab.For(ctx).Error(fmt.Errorf("error in RPC via link %s: %v", l.id, err))
+ return nil, err
+ }
+
+ switch {
+ case res.Code >= 200 && res.Code < 300:
+ tab.For(ctx).Debug(fmt.Sprintf("successful rpc on link %s: status code %d and description: %s", l.id, res.Code, res.Description))
+ return res, nil
+ case res.Code >= 500:
+ errMessage := fmt.Sprintf("server error link %s: status code %d and description: %s", l.id, res.Code, res.Description)
+ tab.For(ctx).Error(errors.New(errMessage))
+ return nil, common.Retryable(errMessage)
+ default:
+ errMessage := fmt.Sprintf("unhandled error link %s: status code %d and description: %s", l.id, res.Code, res.Description)
+ tab.For(ctx).Error(errors.New(errMessage))
+ return nil, common.Retryable(errMessage)
+ }
+ })
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+ return res.(*Response), nil
+}
+
+// RPC sends a request and waits on a response for that request
+func (l *Link) RPC(ctx context.Context, msg *amqp.Message) (*Response, error) {
+ const altStatusCodeKey, altDescriptionKey = "statusCode", "statusDescription"
+
+ l.rpcMu.Lock()
+ defer l.rpcMu.Unlock()
+
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.RPC")
+ defer span.End()
+
+ if msg.Properties == nil {
+ msg.Properties = &amqp.MessageProperties{}
+ }
+ msg.Properties.ReplyTo = l.clientAddress
+
+ if msg.ApplicationProperties == nil {
+ msg.ApplicationProperties = make(map[string]interface{})
+ }
+
+ if _, ok := msg.ApplicationProperties["server-timeout"]; !ok {
+ if deadline, ok := ctx.Deadline(); ok {
+ msg.ApplicationProperties["server-timeout"] = uint(time.Until(deadline) / time.Millisecond)
+ }
+ }
+
+ err := l.sender.Send(ctx, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ res, err := l.receiver.Receive(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var statusCode int
+ statusCodeCandidates := []string{statusCodeKey, altStatusCodeKey}
+ for i := range statusCodeCandidates {
+ if rawStatusCode, ok := res.ApplicationProperties[statusCodeCandidates[i]]; ok {
+ if cast, ok := rawStatusCode.(int32); ok {
+ statusCode = int(cast)
+ break
+ } else {
+ err := errors.New("status code was not of expected type int32")
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+ }
+ }
+ if statusCode == 0 {
+ err := errors.New("status codes was not found on rpc message")
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var description string
+ descriptionCandidates := []string{descriptionKey, altDescriptionKey}
+ for i := range descriptionCandidates {
+ if rawDescription, ok := res.ApplicationProperties[descriptionCandidates[i]]; ok {
+ if description, ok = rawDescription.(string); ok || rawDescription == nil {
+ break
+ } else {
+ return nil, errors.New("status description was not of expected type string")
+ }
+ }
+ }
+
+ span.AddAttributes(tab.StringAttribute("http.status_code", fmt.Sprintf("%d", statusCode)))
+
+ response := &Response{
+ Code: int(statusCode),
+ Description: description,
+ Message: res,
+ }
+
+ if err := res.Accept(); err != nil {
+ tab.For(ctx).Error(err)
+ return response, err
+ }
+
+ return response, err
+}
+
+// Close the link receiver, sender and session
+func (l *Link) Close(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.Close")
+ defer span.End()
+
+ if err := l.closeReceiver(ctx); err != nil {
+ _ = l.closeSender(ctx)
+ _ = l.closeSession(ctx)
+ return err
+ }
+
+ if err := l.closeSender(ctx); err != nil {
+ _ = l.closeSession(ctx)
+ return err
+ }
+
+ return l.closeSession(ctx)
+}
+
+func (l *Link) closeReceiver(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.closeReceiver")
+ defer span.End()
+
+ if l.receiver != nil {
+ return l.receiver.Close(ctx)
+ }
+ return nil
+}
+
+func (l *Link) closeSender(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.closeSender")
+ defer span.End()
+
+ if l.sender != nil {
+ return l.sender.Close(ctx)
+ }
+ return nil
+}
+
+func (l *Link) closeSession(ctx context.Context) error {
+ ctx, span := tracing.StartSpanFromContext(ctx, "az-amqp-common.rpc.closeSession")
+ defer span.End()
+
+ if l.session != nil {
+ return l.session.Close(ctx)
+ }
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/sas/sas.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/sas/sas.go
new file mode 100644
index 00000000..101fd640
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/sas/sas.go
@@ -0,0 +1,158 @@
+// Package sas provides SAS token functionality which implements TokenProvider from package auth for use with Azure
+// Event Hubs and Service Bus.
+package sas
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "crypto/hmac"
+ "crypto/sha256"
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "net/url"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2/auth"
+ "github.com/Azure/azure-amqp-common-go/v2/conn"
+)
+
+type (
+ // Signer provides SAS token generation for use in Service Bus and Event Hub
+ Signer struct {
+ KeyName string
+ Key string
+ }
+
+ // TokenProvider is a SAS claims-based security token provider
+ TokenProvider struct {
+ signer *Signer
+ }
+
+ // TokenProviderOption provides configuration options for SAS Token Providers
+ TokenProviderOption func(*TokenProvider) error
+)
+
+// TokenProviderWithEnvironmentVars creates a new SAS TokenProvider from environment variables
+//
+// There are two sets of environment variables which can produce a SAS TokenProvider
+//
+// 1) Expected Environment Variables:
+// - "EVENTHUB_KEY_NAME" the name of the Event Hub key
+// - "EVENTHUB_KEY_VALUE" the secret for the Event Hub key named in "EVENTHUB_KEY_NAME"
+//
+// 2) Expected Environment Variable:
+// - "EVENTHUB_CONNECTION_STRING" connection string from the Azure portal
+//
+// looks like: Endpoint=sb://namespace.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=superSecret1234=
+func TokenProviderWithEnvironmentVars() TokenProviderOption {
+ return func(provider *TokenProvider) error {
+ connStr := os.Getenv("EVENTHUB_CONNECTION_STRING")
+ if connStr != "" {
+ parsed, err := conn.ParsedConnectionFromStr(connStr)
+ if err != nil {
+ return err
+ }
+ provider.signer = NewSigner(parsed.KeyName, parsed.Key)
+ return nil
+ }
+
+ var (
+ keyName = os.Getenv("EVENTHUB_KEY_NAME")
+ keyValue = os.Getenv("EVENTHUB_KEY_VALUE")
+ )
+
+ if keyName == "" || keyValue == "" {
+ return errors.New("unable to build SAS token provider because (EVENTHUB_KEY_NAME and EVENTHUB_KEY_VALUE) were empty, and EVENTHUB_CONNECTION_STRING was empty")
+ }
+ provider.signer = NewSigner(keyName, keyValue)
+ return nil
+ }
+}
+
+// TokenProviderWithKey configures a SAS TokenProvider to use the given key name and key (secret) for signing
+func TokenProviderWithKey(keyName, key string) TokenProviderOption {
+ return func(provider *TokenProvider) error {
+ provider.signer = NewSigner(keyName, key)
+ return nil
+ }
+}
+
+// NewTokenProvider builds a SAS claims-based security token provider
+func NewTokenProvider(opts ...TokenProviderOption) (*TokenProvider, error) {
+ provider := new(TokenProvider)
+ for _, opt := range opts {
+ err := opt(provider)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return provider, nil
+}
+
+// GetToken gets a CBS SAS token
+func (t *TokenProvider) GetToken(audience string) (*auth.Token, error) {
+ signature, expiry := t.signer.SignWithDuration(audience, 2*time.Hour)
+ return auth.NewToken(auth.CBSTokenTypeSAS, signature, expiry), nil
+}
+
+// NewSigner builds a new SAS signer for use in generation Service Bus and Event Hub SAS tokens
+func NewSigner(keyName, key string) *Signer {
+ return &Signer{
+ KeyName: keyName,
+ Key: key,
+ }
+}
+
+// SignWithDuration signs a given for a period of time from now
+func (s *Signer) SignWithDuration(uri string, interval time.Duration) (signature, expiry string) {
+ expiry = signatureExpiry(time.Now().UTC(), interval)
+ return s.SignWithExpiry(uri, expiry), expiry
+}
+
+// SignWithExpiry signs a given uri with a given expiry string
+func (s *Signer) SignWithExpiry(uri, expiry string) string {
+ audience := strings.ToLower(url.QueryEscape(uri))
+ sts := stringToSign(audience, expiry)
+ sig := s.signString(sts)
+ return fmt.Sprintf("SharedAccessSignature sr=%s&sig=%s&se=%s&skn=%s", audience, sig, expiry, s.KeyName)
+}
+
+func signatureExpiry(from time.Time, interval time.Duration) string {
+ t := from.Add(interval).Round(time.Second).Unix()
+ return strconv.FormatInt(t, 10)
+}
+
+func stringToSign(uri, expiry string) string {
+ return uri + "\n" + expiry
+}
+
+func (s *Signer) signString(str string) string {
+ h := hmac.New(sha256.New, []byte(s.Key))
+ h.Write([]byte(str))
+ encodedSig := base64.StdEncoding.EncodeToString(h.Sum(nil))
+ return url.QueryEscape(encodedSig)
+}
diff --git a/vendor/github.com/Azure/azure-amqp-common-go/v2/uuid/uuid.go b/vendor/github.com/Azure/azure-amqp-common-go/v2/uuid/uuid.go
new file mode 100644
index 00000000..8d1f5f74
--- /dev/null
+++ b/vendor/github.com/Azure/azure-amqp-common-go/v2/uuid/uuid.go
@@ -0,0 +1,72 @@
+package uuid
+
+import (
+ "crypto/rand"
+ "encoding/hex"
+)
+
+// Size of a UUID in bytes.
+const Size = 16
+
+// UUID versions
+const (
+ _ byte = iota
+ _
+ _
+ _
+ V4
+ _
+
+ _ byte = iota
+ VariantRFC4122
+)
+
+type (
+ // UUID representation compliant with specification
+ // described in RFC 4122.
+ UUID [Size]byte
+)
+
+var (
+ randomReader = rand.Reader
+
+ // Nil is special form of UUID that is specified to have all
+ // 128 bits set to zero.
+ Nil = UUID{}
+)
+
+// NewV4 returns random generated UUID.
+func NewV4() (UUID, error) {
+ u := UUID{}
+ if _, err := randomReader.Read(u[:]); err != nil {
+ return Nil, err
+ }
+ u.setVersion(V4)
+ u.setVariant(VariantRFC4122)
+
+ return u, nil
+}
+
+func (u *UUID) setVersion(v byte) {
+ u[6] = (u[6] & 0x0f) | (v << 4)
+}
+
+func (u *UUID) setVariant(v byte) {
+ u[8] = u[8]&(0xff>>2) | (0x02 << 6)
+}
+
+func (u UUID) String() string {
+ buf := make([]byte, 36)
+
+ hex.Encode(buf[0:8], u[0:4])
+ buf[8] = '-'
+ hex.Encode(buf[9:13], u[4:6])
+ buf[13] = '-'
+ hex.Encode(buf[14:18], u[6:8])
+ buf[18] = '-'
+ hex.Encode(buf[19:23], u[8:10])
+ buf[23] = '-'
+ hex.Encode(buf[24:], u[10:])
+
+ return string(buf)
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/events.go b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/events.go
index 5deba043..64825cbc 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/events.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/events.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -50,6 +51,16 @@ func NewEventsClientWithBaseURI(baseURI string) EventsClient {
// timespan - optional. The timespan over which to retrieve events. This is an ISO8601 time period value. This
// timespan is applied in addition to any that are specified in the Odata expression.
func (client EventsClient) Get(ctx context.Context, appID string, eventType EventType, eventID string, timespan string) (result EventsResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, appID, eventType, eventID, timespan)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.EventsClient", "Get", nil, "Failure preparing request")
@@ -132,6 +143,16 @@ func (client EventsClient) GetResponder(resp *http.Response) (result EventsResul
// count - request a count of matching items included with the returned events
// apply - an expression used for aggregation over returned events
func (client EventsClient) GetByType(ctx context.Context, appID string, eventType EventType, timespan string, filter string, search string, orderby string, selectParameter string, skip *int32, top *int32, formatParameter string, count *bool, apply string) (result EventsResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventsClient.GetByType")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetByTypePreparer(ctx, appID, eventType, timespan, filter, search, orderby, selectParameter, skip, top, formatParameter, count, apply)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.EventsClient", "GetByType", nil, "Failure preparing request")
@@ -225,6 +246,16 @@ func (client EventsClient) GetByTypeResponder(resp *http.Response) (result Event
// appID - ID of the application. This is Application ID from the API Access settings blade in the Azure
// portal.
func (client EventsClient) GetOdataMetadata(ctx context.Context, appID string) (result SetObject, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventsClient.GetOdataMetadata")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetOdataMetadataPreparer(ctx, appID)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.EventsClient", "GetOdataMetadata", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/metrics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/metrics.go
index f54e8677..70f525fd 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/metrics.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/metrics.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -66,6 +67,16 @@ func NewMetricsClientWithBaseURI(baseURI string) MetricsClient {
// filter - an expression used to filter the results. This value should be a valid OData filter expression
// where the keys of each clause should be applicable dimensions for the metric you are retrieving.
func (client MetricsClient) Get(ctx context.Context, appID string, metricID MetricID, timespan string, interval *string, aggregation []MetricsAggregation, segment []MetricsSegment, top *int32, orderby string, filter string) (result MetricsResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: aggregation,
Constraints: []validation.Constraint{{Target: "aggregation", Name: validation.Null, Rule: false,
@@ -160,6 +171,16 @@ func (client MetricsClient) GetResponder(resp *http.Response) (result MetricsRes
// appID - ID of the application. This is Application ID from the API Access settings blade in the Azure
// portal.
func (client MetricsClient) GetMetadata(ctx context.Context, appID string) (result SetObject, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricsClient.GetMetadata")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetMetadataPreparer(ctx, appID)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricsClient", "GetMetadata", nil, "Failure preparing request")
@@ -220,6 +241,16 @@ func (client MetricsClient) GetMetadataResponder(resp *http.Response) (result Se
// portal.
// body - the batched metrics query.
func (client MetricsClient) GetMultiple(ctx context.Context, appID string, body []MetricsPostBodySchema) (result ListMetricsResultsItem, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricsClient.GetMultiple")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/models.go
index b4b3d6fb..5165da8b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/models.go
@@ -23,6 +23,9 @@ import (
"github.com/Azure/go-autorest/autorest/date"
)
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights"
+
// EventType enumerates the values for event type.
type EventType string
@@ -2547,5 +2550,5 @@ type Table struct {
// Columns - The list of columns in this table.
Columns *[]Column `json:"columns,omitempty"`
// Rows - The resulting rows from this query.
- Rows *[][]string `json:"rows,omitempty"`
+ Rows *[][]interface{} `json:"rows,omitempty"`
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/query.go b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/query.go
index d306b6bf..ae9cd261 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/query.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/appinsights/v1/insights/query.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -49,6 +50,16 @@ func NewQueryClientWithBaseURI(baseURI string) QueryClient {
// body - the Analytics query. Learn more about the [Analytics query
// syntax](https://azure.microsoft.com/documentation/articles/app-insights-analytics-reference/)
func (client QueryClient) Execute(ctx context.Context, appID string, body QueryBody) (result QueryResults, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueryClient.Execute")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: body,
Constraints: []validation.Constraint{{Target: "body.Query", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go
index 36c28c60..b7a2c3b3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/actiongroups.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -46,6 +47,16 @@ func NewActionGroupsClientWithBaseURI(baseURI string, subscriptionID string) Act
// actionGroupName - the name of the action group.
// actionGroup - the action group to create or use for the update.
func (client ActionGroupsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroup ActionGroupResource) (result ActionGroupResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: actionGroup,
Constraints: []validation.Constraint{{Target: "actionGroup.ActionGroup", Name: validation.Null, Rule: false,
@@ -125,6 +136,16 @@ func (client ActionGroupsClient) CreateOrUpdateResponder(resp *http.Response) (r
// resourceGroupName - the name of the resource group.
// actionGroupName - the name of the action group.
func (client ActionGroupsClient) Delete(ctx context.Context, resourceGroupName string, actionGroupName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceGroupName, actionGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Delete", nil, "Failure preparing request")
@@ -193,6 +214,16 @@ func (client ActionGroupsClient) DeleteResponder(resp *http.Response) (result au
// actionGroupName - the name of the action group.
// enableRequest - the receiver to re-enable.
func (client ActionGroupsClient) EnableReceiver(ctx context.Context, resourceGroupName string, actionGroupName string, enableRequest EnableRequest) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.EnableReceiver")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: enableRequest,
Constraints: []validation.Constraint{{Target: "enableRequest.ReceiverName", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
@@ -267,6 +298,16 @@ func (client ActionGroupsClient) EnableReceiverResponder(resp *http.Response) (r
// resourceGroupName - the name of the resource group.
// actionGroupName - the name of the action group.
func (client ActionGroupsClient) Get(ctx context.Context, resourceGroupName string, actionGroupName string) (result ActionGroupResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, actionGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Get", nil, "Failure preparing request")
@@ -333,6 +374,16 @@ func (client ActionGroupsClient) GetResponder(resp *http.Response) (result Actio
// Parameters:
// resourceGroupName - the name of the resource group.
func (client ActionGroupsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ActionGroupList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListByResourceGroup", nil, "Failure preparing request")
@@ -396,6 +447,16 @@ func (client ActionGroupsClient) ListByResourceGroupResponder(resp *http.Respons
// ListBySubscriptionID get a list of all action groups in a subscription.
func (client ActionGroupsClient) ListBySubscriptionID(ctx context.Context) (result ActionGroupList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.ListBySubscriptionID")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListBySubscriptionIDPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "ListBySubscriptionID", nil, "Failure preparing request")
@@ -462,6 +523,16 @@ func (client ActionGroupsClient) ListBySubscriptionIDResponder(resp *http.Respon
// actionGroupName - the name of the action group.
// actionGroupPatch - parameters supplied to the operation.
func (client ActionGroupsClient) Update(ctx context.Context, resourceGroupName string, actionGroupName string, actionGroupPatch ActionGroupPatchBody) (result ActionGroupResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActionGroupsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, resourceGroupName, actionGroupName, actionGroupPatch)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActionGroupsClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go
index 9b86523b..1e0b071b 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogalerts.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -46,6 +47,16 @@ func NewActivityLogAlertsClientWithBaseURI(baseURI string, subscriptionID string
// activityLogAlertName - the name of the activity log alert.
// activityLogAlert - the activity log alert to create or use for the update.
func (client ActivityLogAlertsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlert ActivityLogAlertResource) (result ActivityLogAlertResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: activityLogAlert,
Constraints: []validation.Constraint{{Target: "activityLogAlert.ActivityLogAlert", Name: validation.Null, Rule: false,
@@ -126,6 +137,16 @@ func (client ActivityLogAlertsClient) CreateOrUpdateResponder(resp *http.Respons
// resourceGroupName - the name of the resource group.
// activityLogAlertName - the name of the activity log alert.
func (client ActivityLogAlertsClient) Delete(ctx context.Context, resourceGroupName string, activityLogAlertName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceGroupName, activityLogAlertName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Delete", nil, "Failure preparing request")
@@ -192,6 +213,16 @@ func (client ActivityLogAlertsClient) DeleteResponder(resp *http.Response) (resu
// resourceGroupName - the name of the resource group.
// activityLogAlertName - the name of the activity log alert.
func (client ActivityLogAlertsClient) Get(ctx context.Context, resourceGroupName string, activityLogAlertName string) (result ActivityLogAlertResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, activityLogAlertName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Get", nil, "Failure preparing request")
@@ -258,6 +289,16 @@ func (client ActivityLogAlertsClient) GetResponder(resp *http.Response) (result
// Parameters:
// resourceGroupName - the name of the resource group.
func (client ActivityLogAlertsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result ActivityLogAlertList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListByResourceGroup", nil, "Failure preparing request")
@@ -321,6 +362,16 @@ func (client ActivityLogAlertsClient) ListByResourceGroupResponder(resp *http.Re
// ListBySubscriptionID get a list of all activity log alerts in a subscription.
func (client ActivityLogAlertsClient) ListBySubscriptionID(ctx context.Context) (result ActivityLogAlertList, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.ListBySubscriptionID")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListBySubscriptionIDPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "ListBySubscriptionID", nil, "Failure preparing request")
@@ -387,6 +438,16 @@ func (client ActivityLogAlertsClient) ListBySubscriptionIDResponder(resp *http.R
// activityLogAlertName - the name of the activity log alert.
// activityLogAlertPatch - parameters supplied to the operation.
func (client ActivityLogAlertsClient) Update(ctx context.Context, resourceGroupName string, activityLogAlertName string, activityLogAlertPatch ActivityLogAlertPatchBody) (result ActivityLogAlertResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogAlertsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, resourceGroupName, activityLogAlertName, activityLogAlertPatch)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ActivityLogAlertsClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go
index 0a3c4e11..92120d84 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/activitylogs.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -59,6 +60,16 @@ func NewActivityLogsClientWithBaseURI(baseURI string, subscriptionID string) Act
// *operationId*, *operationName*, *properties*, *resourceGroupName*, *resourceProviderName*, *resourceId*,
// *status*, *submissionTimestamp*, *subStatus*, *subscriptionId*
func (client ActivityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogsClient.List")
+ defer func() {
+ sc := -1
+ if result.edc.Response.Response != nil {
+ sc = result.edc.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, filter, selectParameter)
if err != nil {
@@ -127,8 +138,8 @@ func (client ActivityLogsClient) ListResponder(resp *http.Response) (result Even
}
// listNextResults retrieves the next set of results, if any.
-func (client ActivityLogsClient) listNextResults(lastResults EventDataCollection) (result EventDataCollection, err error) {
- req, err := lastResults.eventDataCollectionPreparer()
+func (client ActivityLogsClient) listNextResults(ctx context.Context, lastResults EventDataCollection) (result EventDataCollection, err error) {
+ req, err := lastResults.eventDataCollectionPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "insights.ActivityLogsClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -149,6 +160,16 @@ func (client ActivityLogsClient) listNextResults(lastResults EventDataCollection
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client ActivityLogsClient) ListComplete(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ActivityLogsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx, filter, selectParameter)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go
index aa0f1f5b..71ddce0d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertruleincidents.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -45,6 +46,16 @@ func NewAlertRuleIncidentsClientWithBaseURI(baseURI string, subscriptionID strin
// ruleName - the name of the rule.
// incidentName - the name of the incident to retrieve.
func (client AlertRuleIncidentsClient) Get(ctx context.Context, resourceGroupName string, ruleName string, incidentName string) (result Incident, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleIncidentsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, ruleName, incidentName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "Get", nil, "Failure preparing request")
@@ -113,6 +124,16 @@ func (client AlertRuleIncidentsClient) GetResponder(resp *http.Response) (result
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client AlertRuleIncidentsClient) ListByAlertRule(ctx context.Context, resourceGroupName string, ruleName string) (result IncidentListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRuleIncidentsClient.ListByAlertRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByAlertRulePreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRuleIncidentsClient", "ListByAlertRule", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go
index 10956eed..4de0da4d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/alertrules.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -46,6 +47,16 @@ func NewAlertRulesClientWithBaseURI(baseURI string, subscriptionID string) Alert
// ruleName - the name of the rule.
// parameters - the parameters of the rule to create or update.
func (client AlertRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters AlertRuleResource) (result AlertRuleResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.AlertRule", Name: validation.Null, Rule: true,
@@ -125,6 +136,16 @@ func (client AlertRulesClient) CreateOrUpdateResponder(resp *http.Response) (res
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client AlertRulesClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Delete", nil, "Failure preparing request")
@@ -191,6 +212,16 @@ func (client AlertRulesClient) DeleteResponder(resp *http.Response) (result auto
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client AlertRulesClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result AlertRuleResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Get", nil, "Failure preparing request")
@@ -257,6 +288,16 @@ func (client AlertRulesClient) GetResponder(resp *http.Response) (result AlertRu
// Parameters:
// resourceGroupName - the name of the resource group.
func (client AlertRulesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AlertRuleResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListByResourceGroup", nil, "Failure preparing request")
@@ -320,6 +361,16 @@ func (client AlertRulesClient) ListByResourceGroupResponder(resp *http.Response)
// ListBySubscription list the alert rules within a subscription.
func (client AlertRulesClient) ListBySubscription(ctx context.Context) (result AlertRuleResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListBySubscriptionPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "ListBySubscription", nil, "Failure preparing request")
@@ -386,6 +437,16 @@ func (client AlertRulesClient) ListBySubscriptionResponder(resp *http.Response)
// ruleName - the name of the rule.
// alertRulesResource - parameters supplied to the operation.
func (client AlertRulesClient) Update(ctx context.Context, resourceGroupName string, ruleName string, alertRulesResource AlertRuleResourcePatch) (result AlertRuleResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AlertRulesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, alertRulesResource)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AlertRulesClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go
index 72f97584..dd6a3b0e 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/autoscalesettings.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -46,6 +47,16 @@ func NewAutoscaleSettingsClientWithBaseURI(baseURI string, subscriptionID string
// autoscaleSettingName - the autoscale setting name.
// parameters - parameters supplied to the operation.
func (client AutoscaleSettingsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, autoscaleSettingName string, parameters AutoscaleSettingResource) (result AutoscaleSettingResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.AutoscaleSetting", Name: validation.Null, Rule: true,
@@ -124,6 +135,16 @@ func (client AutoscaleSettingsClient) CreateOrUpdateResponder(resp *http.Respons
// resourceGroupName - the name of the resource group.
// autoscaleSettingName - the autoscale setting name.
func (client AutoscaleSettingsClient) Delete(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceGroupName, autoscaleSettingName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Delete", nil, "Failure preparing request")
@@ -190,6 +211,16 @@ func (client AutoscaleSettingsClient) DeleteResponder(resp *http.Response) (resu
// resourceGroupName - the name of the resource group.
// autoscaleSettingName - the autoscale setting name.
func (client AutoscaleSettingsClient) Get(ctx context.Context, resourceGroupName string, autoscaleSettingName string) (result AutoscaleSettingResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, autoscaleSettingName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Get", nil, "Failure preparing request")
@@ -256,6 +287,16 @@ func (client AutoscaleSettingsClient) GetResponder(resp *http.Response) (result
// Parameters:
// resourceGroupName - the name of the resource group.
func (client AutoscaleSettingsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result AutoscaleSettingResourceCollectionPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.asrc.Response.Response != nil {
+ sc = result.asrc.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listByResourceGroupNextResults
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
@@ -319,8 +360,8 @@ func (client AutoscaleSettingsClient) ListByResourceGroupResponder(resp *http.Re
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client AutoscaleSettingsClient) listByResourceGroupNextResults(lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) {
- req, err := lastResults.autoscaleSettingResourceCollectionPreparer()
+func (client AutoscaleSettingsClient) listByResourceGroupNextResults(ctx context.Context, lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) {
+ req, err := lastResults.autoscaleSettingResourceCollectionPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
@@ -341,12 +382,32 @@ func (client AutoscaleSettingsClient) listByResourceGroupNextResults(lastResults
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client AutoscaleSettingsClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result AutoscaleSettingResourceCollectionIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
// ListBySubscription lists the autoscale settings for a subscription
func (client AutoscaleSettingsClient) ListBySubscription(ctx context.Context) (result AutoscaleSettingResourceCollectionPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.asrc.Response.Response != nil {
+ sc = result.asrc.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listBySubscriptionNextResults
req, err := client.ListBySubscriptionPreparer(ctx)
if err != nil {
@@ -409,8 +470,8 @@ func (client AutoscaleSettingsClient) ListBySubscriptionResponder(resp *http.Res
}
// listBySubscriptionNextResults retrieves the next set of results, if any.
-func (client AutoscaleSettingsClient) listBySubscriptionNextResults(lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) {
- req, err := lastResults.autoscaleSettingResourceCollectionPreparer()
+func (client AutoscaleSettingsClient) listBySubscriptionNextResults(ctx context.Context, lastResults AutoscaleSettingResourceCollection) (result AutoscaleSettingResourceCollection, err error) {
+ req, err := lastResults.autoscaleSettingResourceCollectionPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "listBySubscriptionNextResults", nil, "Failure preparing next results request")
}
@@ -431,6 +492,16 @@ func (client AutoscaleSettingsClient) listBySubscriptionNextResults(lastResults
// ListBySubscriptionComplete enumerates all values, automatically crossing page boundaries as required.
func (client AutoscaleSettingsClient) ListBySubscriptionComplete(ctx context.Context) (result AutoscaleSettingResourceCollectionIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListBySubscription(ctx)
return
}
@@ -441,6 +512,16 @@ func (client AutoscaleSettingsClient) ListBySubscriptionComplete(ctx context.Con
// autoscaleSettingName - the autoscale setting name.
// autoscaleSettingResource - parameters supplied to the operation.
func (client AutoscaleSettingsClient) Update(ctx context.Context, resourceGroupName string, autoscaleSettingName string, autoscaleSettingResource AutoscaleSettingResourcePatch) (result AutoscaleSettingResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, resourceGroupName, autoscaleSettingName, autoscaleSettingResource)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.AutoscaleSettingsClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go
index 1010aad7..441f65a3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettings.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -45,6 +46,16 @@ func NewDiagnosticSettingsClientWithBaseURI(baseURI string, subscriptionID strin
// parameters - parameters supplied to the operation.
// name - the name of the diagnostic setting.
func (client DiagnosticSettingsClient) CreateOrUpdate(ctx context.Context, resourceURI string, parameters DiagnosticSettingsResource, name string) (result DiagnosticSettingsResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.CreateOrUpdatePreparer(ctx, resourceURI, parameters, name)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "CreateOrUpdate", nil, "Failure preparing request")
@@ -113,6 +124,16 @@ func (client DiagnosticSettingsClient) CreateOrUpdateResponder(resp *http.Respon
// resourceURI - the identifier of the resource.
// name - the name of the diagnostic setting.
func (client DiagnosticSettingsClient) Delete(ctx context.Context, resourceURI string, name string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceURI, name)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Delete", nil, "Failure preparing request")
@@ -178,6 +199,16 @@ func (client DiagnosticSettingsClient) DeleteResponder(resp *http.Response) (res
// resourceURI - the identifier of the resource.
// name - the name of the diagnostic setting.
func (client DiagnosticSettingsClient) Get(ctx context.Context, resourceURI string, name string) (result DiagnosticSettingsResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceURI, name)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "Get", nil, "Failure preparing request")
@@ -243,6 +274,16 @@ func (client DiagnosticSettingsClient) GetResponder(resp *http.Response) (result
// Parameters:
// resourceURI - the identifier of the resource.
func (client DiagnosticSettingsClient) List(ctx context.Context, resourceURI string) (result DiagnosticSettingsResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx, resourceURI)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsClient", "List", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go
index ec64a3a8..724253f3 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/diagnosticsettingscategory.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -44,6 +45,16 @@ func NewDiagnosticSettingsCategoryClientWithBaseURI(baseURI string, subscription
// resourceURI - the identifier of the resource.
// name - the name of the diagnostic setting.
func (client DiagnosticSettingsCategoryClient) Get(ctx context.Context, resourceURI string, name string) (result DiagnosticSettingsCategoryResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsCategoryClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceURI, name)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "Get", nil, "Failure preparing request")
@@ -109,6 +120,16 @@ func (client DiagnosticSettingsCategoryClient) GetResponder(resp *http.Response)
// Parameters:
// resourceURI - the identifier of the resource.
func (client DiagnosticSettingsCategoryClient) List(ctx context.Context, resourceURI string) (result DiagnosticSettingsCategoryResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DiagnosticSettingsCategoryClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx, resourceURI)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.DiagnosticSettingsCategoryClient", "List", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go
index fe2200db..ed5854d8 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/eventcategories.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -42,6 +43,16 @@ func NewEventCategoriesClientWithBaseURI(baseURI string, subscriptionID string)
// List get the list of available event categories supported in the Activity Logs Service.
The current list includes
// the following: Administrative, Security, ServiceHealth, Alert, Recommendation, Policy.
func (client EventCategoriesClient) List(ctx context.Context) (result EventCategoryCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventCategoriesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.EventCategoriesClient", "List", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go
index 70f4ec53..442bc2f2 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/logprofiles.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -45,6 +46,16 @@ func NewLogProfilesClientWithBaseURI(baseURI string, subscriptionID string) LogP
// logProfileName - the name of the log profile.
// parameters - parameters supplied to the operation.
func (client LogProfilesClient) CreateOrUpdate(ctx context.Context, logProfileName string, parameters LogProfileResource) (result LogProfileResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.LogProfileProperties", Name: validation.Null, Rule: true,
@@ -126,6 +137,16 @@ func (client LogProfilesClient) CreateOrUpdateResponder(resp *http.Response) (re
// Parameters:
// logProfileName - the name of the log profile.
func (client LogProfilesClient) Delete(ctx context.Context, logProfileName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, logProfileName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Delete", nil, "Failure preparing request")
@@ -190,6 +211,16 @@ func (client LogProfilesClient) DeleteResponder(resp *http.Response) (result aut
// Parameters:
// logProfileName - the name of the log profile.
func (client LogProfilesClient) Get(ctx context.Context, logProfileName string) (result LogProfileResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, logProfileName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Get", nil, "Failure preparing request")
@@ -253,6 +284,16 @@ func (client LogProfilesClient) GetResponder(resp *http.Response) (result LogPro
// List list the log profiles.
func (client LogProfilesClient) List(ctx context.Context) (result LogProfileCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "List", nil, "Failure preparing request")
@@ -318,6 +359,16 @@ func (client LogProfilesClient) ListResponder(resp *http.Response) (result LogPr
// logProfileName - the name of the log profile.
// logProfilesResource - parameters supplied to the operation.
func (client LogProfilesClient) Update(ctx context.Context, logProfileName string, logProfilesResource LogProfileResourcePatch) (result LogProfileResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/LogProfilesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, logProfileName, logProfilesResource)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.LogProfilesClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go
index 60395cc2..240ac032 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalerts.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -46,6 +47,16 @@ func NewMetricAlertsClientWithBaseURI(baseURI string, subscriptionID string) Met
// ruleName - the name of the rule.
// parameters - the parameters of the rule to create or update.
func (client MetricAlertsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResource) (result MetricAlertResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.MetricAlertProperties", Name: validation.Null, Rule: true,
@@ -123,11 +134,21 @@ func (client MetricAlertsClient) CreateOrUpdateResponder(resp *http.Response) (r
return
}
-// Delete delete an alert rule defitiniton.
+// Delete delete an alert rule definition.
// Parameters:
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client MetricAlertsClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Delete", nil, "Failure preparing request")
@@ -189,11 +210,21 @@ func (client MetricAlertsClient) DeleteResponder(resp *http.Response) (result au
return
}
-// Get retrieve an alert rule definiton.
+// Get retrieve an alert rule definition.
// Parameters:
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client MetricAlertsClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result MetricAlertResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Get", nil, "Failure preparing request")
@@ -256,10 +287,20 @@ func (client MetricAlertsClient) GetResponder(resp *http.Response) (result Metri
return
}
-// ListByResourceGroup retrieve alert rule defintions in a resource group.
+// ListByResourceGroup retrieve alert rule definitions in a resource group.
// Parameters:
// resourceGroupName - the name of the resource group.
func (client MetricAlertsClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result MetricAlertResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListByResourceGroup", nil, "Failure preparing request")
@@ -323,6 +364,16 @@ func (client MetricAlertsClient) ListByResourceGroupResponder(resp *http.Respons
// ListBySubscription retrieve alert rule definitions in a subscription.
func (client MetricAlertsClient) ListBySubscription(ctx context.Context) (result MetricAlertResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListBySubscriptionPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "ListBySubscription", nil, "Failure preparing request")
@@ -389,6 +440,16 @@ func (client MetricAlertsClient) ListBySubscriptionResponder(resp *http.Response
// ruleName - the name of the rule.
// parameters - the parameters of the rule to update.
func (client MetricAlertsClient) Update(ctx context.Context, resourceGroupName string, ruleName string, parameters MetricAlertResourcePatch) (result MetricAlertResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go
index 7ee078c4..389ce69d 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricalertsstatus.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -44,6 +45,16 @@ func NewMetricAlertsStatusClientWithBaseURI(baseURI string, subscriptionID strin
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client MetricAlertsStatusClient) List(ctx context.Context, resourceGroupName string, ruleName string) (result MetricAlertStatusCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsStatusClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "List", nil, "Failure preparing request")
@@ -112,6 +123,16 @@ func (client MetricAlertsStatusClient) ListResponder(resp *http.Response) (resul
// ruleName - the name of the rule.
// statusName - the name of the status.
func (client MetricAlertsStatusClient) ListByName(ctx context.Context, resourceGroupName string, ruleName string, statusName string) (result MetricAlertStatusCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricAlertsStatusClient.ListByName")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByNamePreparer(ctx, resourceGroupName, ruleName, statusName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricAlertsStatusClient", "ListByName", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go
index 02cb8cba..43042eac 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricbaseline.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -48,6 +49,16 @@ func NewMetricBaselineClientWithBaseURI(baseURI string, subscriptionID string) M
// subscriptions/b368ca2f-e298-46b7-b0ab-012281956afa/resourceGroups/vms/providers/Microsoft.Compute/virtualMachines/vm1
// timeSeriesInformation - information that need to be specified to calculate a baseline on a time series.
func (client MetricBaselineClient) CalculateBaseline(ctx context.Context, resourceURI string, timeSeriesInformation TimeSeriesInformation) (result CalculateBaselineResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricBaselineClient.CalculateBaseline")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: timeSeriesInformation,
Constraints: []validation.Constraint{{Target: "timeSeriesInformation.Sensitivities", Name: validation.Null, Rule: true, Chain: nil},
@@ -131,6 +142,16 @@ func (client MetricBaselineClient) CalculateBaselineResponder(resp *http.Respons
// sensitivities - the list of sensitivities (comma separated) to retrieve.
// resultType - allows retrieving only metadata of the baseline. On data request all information is retrieved.
func (client MetricBaselineClient) Get(ctx context.Context, resourceURI string, metricName string, timespan string, interval *string, aggregation string, sensitivities string, resultType ResultType) (result BaselineResponse, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricBaselineClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceURI, metricName, timespan, interval, aggregation, sensitivities, resultType)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricBaselineClient", "Get", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go
index dc73c330..289abe93 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metricdefinitions.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -44,6 +45,16 @@ func NewMetricDefinitionsClientWithBaseURI(baseURI string, subscriptionID string
// resourceURI - the identifier of the resource.
// metricnamespace - metric namespace to query metric definitions for.
func (client MetricDefinitionsClient) List(ctx context.Context, resourceURI string, metricnamespace string) (result MetricDefinitionCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricDefinitionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx, resourceURI, metricnamespace)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricDefinitionsClient", "List", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go
index a381c665..4c48dcb9 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/metrics.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -64,6 +65,16 @@ func NewMetricsClientWithBaseURI(baseURI string, subscriptionID string) MetricsC
// operation's description for details.
// metricnamespace - metric namespace to query metric definitions for.
func (client MetricsClient) List(ctx context.Context, resourceURI string, timespan string, interval *string, metricnames string, aggregation string, top *int32, orderby string, filter string, resultType ResultType, metricnamespace string) (result Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MetricsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx, resourceURI, timespan, interval, metricnames, aggregation, top, orderby, filter, resultType, metricnamespace)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.MetricsClient", "List", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go
index 5cf5d117..d8a54921 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/models.go
@@ -18,13 +18,18 @@ package insights
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
+ "context"
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights"
+
// AggregationType enumerates the values for aggregation type.
type AggregationType string
@@ -143,6 +148,23 @@ func PossibleConditionOperatorValues() []ConditionOperator {
return []ConditionOperator{ConditionOperatorGreaterThan, ConditionOperatorGreaterThanOrEqual, ConditionOperatorLessThan, ConditionOperatorLessThanOrEqual}
}
+// CriterionType enumerates the values for criterion type.
+type CriterionType string
+
+const (
+ // CriterionTypeDynamicThresholdCriterion ...
+ CriterionTypeDynamicThresholdCriterion CriterionType = "DynamicThresholdCriterion"
+ // CriterionTypeMultiMetricCriteria ...
+ CriterionTypeMultiMetricCriteria CriterionType = "MultiMetricCriteria"
+ // CriterionTypeStaticThresholdCriterion ...
+ CriterionTypeStaticThresholdCriterion CriterionType = "StaticThresholdCriterion"
+)
+
+// PossibleCriterionTypeValues returns an array of possible values for the CriterionType const type.
+func PossibleCriterionTypeValues() []CriterionType {
+ return []CriterionType{CriterionTypeDynamicThresholdCriterion, CriterionTypeMultiMetricCriteria, CriterionTypeStaticThresholdCriterion}
+}
+
// Enabled enumerates the values for enabled.
type Enabled string
@@ -238,11 +260,13 @@ const (
OdataTypeAction OdataTypeBasicAction = "Action"
// OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction ...
OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction OdataTypeBasicAction = "Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.AlertingAction"
+ // OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction ...
+ OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction OdataTypeBasicAction = "Microsoft.WindowsAzure.Management.Monitoring.Alerts.Models.Microsoft.AppInsights.Nexus.DataContracts.Resources.ScheduledQueryRules.LogToMetricAction"
)
// PossibleOdataTypeBasicActionValues returns an array of possible values for the OdataTypeBasicAction const type.
func PossibleOdataTypeBasicActionValues() []OdataTypeBasicAction {
- return []OdataTypeBasicAction{OdataTypeAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction}
+ return []OdataTypeBasicAction{OdataTypeAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction, OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction}
}
// OdataTypeBasicMetricAlertCriteria enumerates the values for odata type basic metric alert criteria.
@@ -251,13 +275,15 @@ type OdataTypeBasicMetricAlertCriteria string
const (
// OdataTypeMetricAlertCriteria ...
OdataTypeMetricAlertCriteria OdataTypeBasicMetricAlertCriteria = "MetricAlertCriteria"
+ // OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria ...
+ OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.MultipleResourceMultipleMetricCriteria"
// OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria ...
OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria OdataTypeBasicMetricAlertCriteria = "Microsoft.Azure.Monitor.SingleResourceMultipleMetricCriteria"
)
// PossibleOdataTypeBasicMetricAlertCriteriaValues returns an array of possible values for the OdataTypeBasicMetricAlertCriteria const type.
func PossibleOdataTypeBasicMetricAlertCriteriaValues() []OdataTypeBasicMetricAlertCriteria {
- return []OdataTypeBasicMetricAlertCriteria{OdataTypeMetricAlertCriteria, OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria}
+ return []OdataTypeBasicMetricAlertCriteria{OdataTypeMetricAlertCriteria, OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria, OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria}
}
// OdataTypeBasicRuleAction enumerates the values for odata type basic rule action.
@@ -514,12 +540,13 @@ func PossibleUnitValues() []Unit {
// BasicAction action descriptor.
type BasicAction interface {
AsAlertingAction() (*AlertingAction, bool)
+ AsLogToMetricAction() (*LogToMetricAction, bool)
AsAction() (*Action, bool)
}
// Action action descriptor.
type Action struct {
- // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction'
+ // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction'
OdataType OdataTypeBasicAction `json:"odata.type,omitempty"`
}
@@ -535,6 +562,10 @@ func unmarshalBasicAction(body []byte) (BasicAction, error) {
var aa AlertingAction
err := json.Unmarshal(body, &aa)
return aa, err
+ case string(OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction):
+ var ltma LogToMetricAction
+ err := json.Unmarshal(body, <ma)
+ return ltma, err
default:
var a Action
err := json.Unmarshal(body, &a)
@@ -575,6 +606,11 @@ func (a Action) AsAlertingAction() (*AlertingAction, bool) {
return nil, false
}
+// AsLogToMetricAction is the BasicAction implementation for Action.
+func (a Action) AsLogToMetricAction() (*LogToMetricAction, bool) {
+ return nil, false
+}
+
// AsAction is the BasicAction implementation for Action.
func (a Action) AsAction() (*Action, bool) {
return &a, true
@@ -684,11 +720,11 @@ type ActionGroupResource struct {
autorest.Response `json:"-"`
// ActionGroup - The action groups properties of the resource.
*ActionGroup `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -702,15 +738,6 @@ func (agr ActionGroupResource) MarshalJSON() ([]byte, error) {
if agr.ActionGroup != nil {
objectMap["properties"] = agr.ActionGroup
}
- if agr.ID != nil {
- objectMap["id"] = agr.ID
- }
- if agr.Name != nil {
- objectMap["name"] = agr.Name
- }
- if agr.Type != nil {
- objectMap["type"] = agr.Type
- }
if agr.Location != nil {
objectMap["location"] = agr.Location
}
@@ -829,15 +856,15 @@ type ActivityLogAlertActionList struct {
ActionGroups *[]ActivityLogAlertActionGroup `json:"actionGroups,omitempty"`
}
-// ActivityLogAlertAllOfCondition an Activity Log alert condition that is met when all its member conditions are
-// met.
+// ActivityLogAlertAllOfCondition an Activity Log alert condition that is met when all its member
+// conditions are met.
type ActivityLogAlertAllOfCondition struct {
// AllOf - The list of activity log alert conditions.
AllOf *[]ActivityLogAlertLeafCondition `json:"allOf,omitempty"`
}
-// ActivityLogAlertLeafCondition an Activity Log alert condition that is met by comparing an activity log field and
-// value.
+// ActivityLogAlertLeafCondition an Activity Log alert condition that is met by comparing an activity log
+// field and value.
type ActivityLogAlertLeafCondition struct {
// Field - The name of the field that this condition will examine. The possible values for this field are (case-insensitive): 'resourceId', 'category', 'caller', 'level', 'operationName', 'resourceGroup', 'resourceProvider', 'status', 'subStatus', 'resourceType', or anything beginning with 'properties.'.
Field *string `json:"field,omitempty"`
@@ -918,11 +945,11 @@ type ActivityLogAlertResource struct {
autorest.Response `json:"-"`
// ActivityLogAlert - The activity log alert properties of the resource.
*ActivityLogAlert `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -936,15 +963,6 @@ func (alar ActivityLogAlertResource) MarshalJSON() ([]byte, error) {
if alar.ActivityLogAlert != nil {
objectMap["properties"] = alar.ActivityLogAlert
}
- if alar.ID != nil {
- objectMap["id"] = alar.ID
- }
- if alar.Name != nil {
- objectMap["name"] = alar.Name
- }
- if alar.Type != nil {
- objectMap["type"] = alar.Type
- }
if alar.Location != nil {
objectMap["location"] = alar.Location
}
@@ -1023,7 +1041,7 @@ func (alar *ActivityLogAlertResource) UnmarshalJSON(body []byte) error {
return nil
}
-// AlertingAction specifiy action need to be taken when rule type is Alert
+// AlertingAction specify action need to be taken when rule type is Alert
type AlertingAction struct {
// Severity - Severity of the alert. Possible values include: 'Zero', 'One', 'Two', 'Three', 'Four'
Severity AlertSeverity `json:"severity,omitempty"`
@@ -1033,7 +1051,7 @@ type AlertingAction struct {
ThrottlingInMin *int32 `json:"throttlingInMin,omitempty"`
// Trigger - The trigger condition that results in the alert rule being.
Trigger *TriggerCondition `json:"trigger,omitempty"`
- // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction'
+ // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction'
OdataType OdataTypeBasicAction `json:"odata.type,omitempty"`
}
@@ -1064,6 +1082,11 @@ func (aa AlertingAction) AsAlertingAction() (*AlertingAction, bool) {
return &aa, true
}
+// AsLogToMetricAction is the BasicAction implementation for AlertingAction.
+func (aa AlertingAction) AsLogToMetricAction() (*LogToMetricAction, bool) {
+ return nil, false
+}
+
// AsAction is the BasicAction implementation for AlertingAction.
func (aa AlertingAction) AsAction() (*Action, bool) {
return nil, false
@@ -1086,7 +1109,7 @@ type AlertRule struct {
Condition BasicRuleCondition `json:"condition,omitempty"`
// Actions - the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
Actions *[]BasicRuleAction `json:"actions,omitempty"`
- // LastUpdatedTime - Last time the rule was updated in ISO8601 format.
+ // LastUpdatedTime - READ-ONLY; Last time the rule was updated in ISO8601 format.
LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"`
}
@@ -1162,11 +1185,11 @@ type AlertRuleResource struct {
autorest.Response `json:"-"`
// AlertRule - The alert rule properties of the resource.
*AlertRule `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -1180,15 +1203,6 @@ func (arr AlertRuleResource) MarshalJSON() ([]byte, error) {
if arr.AlertRule != nil {
objectMap["properties"] = arr.AlertRule
}
- if arr.ID != nil {
- objectMap["id"] = arr.ID
- }
- if arr.Name != nil {
- objectMap["name"] = arr.Name
- }
- if arr.Type != nil {
- objectMap["type"] = arr.Type
- }
if arr.Location != nil {
objectMap["location"] = arr.Location
}
@@ -1367,7 +1381,8 @@ type AutoscaleProfile struct {
Recurrence *Recurrence `json:"recurrence,omitempty"`
}
-// AutoscaleSetting a setting that contains all of the configuration for the automatic scaling of a resource.
+// AutoscaleSetting a setting that contains all of the configuration for the automatic scaling of a
+// resource.
type AutoscaleSetting struct {
// Profiles - the collection of automatic scaling profiles that specify different scaling parameters for different time periods. A maximum of 20 profiles can be specified.
Profiles *[]AutoscaleProfile `json:"profiles,omitempty"`
@@ -1386,11 +1401,11 @@ type AutoscaleSettingResource struct {
autorest.Response `json:"-"`
// AutoscaleSetting - The autoscale setting of the resource.
*AutoscaleSetting `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -1404,15 +1419,6 @@ func (asr AutoscaleSettingResource) MarshalJSON() ([]byte, error) {
if asr.AutoscaleSetting != nil {
objectMap["properties"] = asr.AutoscaleSetting
}
- if asr.ID != nil {
- objectMap["id"] = asr.ID
- }
- if asr.Name != nil {
- objectMap["name"] = asr.Name
- }
- if asr.Type != nil {
- objectMap["type"] = asr.Type
- }
if asr.Location != nil {
objectMap["location"] = asr.Location
}
@@ -1500,21 +1506,31 @@ type AutoscaleSettingResourceCollection struct {
NextLink *string `json:"nextLink,omitempty"`
}
-// AutoscaleSettingResourceCollectionIterator provides access to a complete listing of AutoscaleSettingResource
-// values.
+// AutoscaleSettingResourceCollectionIterator provides access to a complete listing of
+// AutoscaleSettingResource values.
type AutoscaleSettingResourceCollectionIterator struct {
i int
page AutoscaleSettingResourceCollectionPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *AutoscaleSettingResourceCollectionIterator) Next() error {
+func (iter *AutoscaleSettingResourceCollectionIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingResourceCollectionIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -1523,6 +1539,13 @@ func (iter *AutoscaleSettingResourceCollectionIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *AutoscaleSettingResourceCollectionIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter AutoscaleSettingResourceCollectionIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -1542,6 +1565,11 @@ func (iter AutoscaleSettingResourceCollectionIterator) Value() AutoscaleSettingR
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the AutoscaleSettingResourceCollectionIterator type.
+func NewAutoscaleSettingResourceCollectionIterator(page AutoscaleSettingResourceCollectionPage) AutoscaleSettingResourceCollectionIterator {
+ return AutoscaleSettingResourceCollectionIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (asrc AutoscaleSettingResourceCollection) IsEmpty() bool {
return asrc.Value == nil || len(*asrc.Value) == 0
@@ -1549,11 +1577,11 @@ func (asrc AutoscaleSettingResourceCollection) IsEmpty() bool {
// autoscaleSettingResourceCollectionPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (asrc AutoscaleSettingResourceCollection) autoscaleSettingResourceCollectionPreparer() (*http.Request, error) {
+func (asrc AutoscaleSettingResourceCollection) autoscaleSettingResourceCollectionPreparer(ctx context.Context) (*http.Request, error) {
if asrc.NextLink == nil || len(to.String(asrc.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(asrc.NextLink)))
@@ -1561,14 +1589,24 @@ func (asrc AutoscaleSettingResourceCollection) autoscaleSettingResourceCollectio
// AutoscaleSettingResourceCollectionPage contains a page of AutoscaleSettingResource values.
type AutoscaleSettingResourceCollectionPage struct {
- fn func(AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error)
+ fn func(context.Context, AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error)
asrc AutoscaleSettingResourceCollection
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *AutoscaleSettingResourceCollectionPage) Next() error {
- next, err := page.fn(page.asrc)
+func (page *AutoscaleSettingResourceCollectionPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/AutoscaleSettingResourceCollectionPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.asrc)
if err != nil {
return err
}
@@ -1576,6 +1614,13 @@ func (page *AutoscaleSettingResourceCollectionPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *AutoscaleSettingResourceCollectionPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page AutoscaleSettingResourceCollectionPage) NotDone() bool {
return !page.asrc.IsEmpty()
@@ -1594,6 +1639,11 @@ func (page AutoscaleSettingResourceCollectionPage) Values() []AutoscaleSettingRe
return *page.asrc.Value
}
+// Creates a new instance of the AutoscaleSettingResourceCollectionPage type.
+func NewAutoscaleSettingResourceCollectionPage(getNextPage func(context.Context, AutoscaleSettingResourceCollection) (AutoscaleSettingResourceCollection, error)) AutoscaleSettingResourceCollectionPage {
+ return AutoscaleSettingResourceCollectionPage{fn: getNextPage}
+}
+
// AutoscaleSettingResourcePatch the autoscale setting object for patch operations.
type AutoscaleSettingResourcePatch struct {
// Tags - Resource tags
@@ -1653,7 +1703,7 @@ type AzNsActionGroup struct {
ActionGroup *[]string `json:"actionGroup,omitempty"`
// EmailSubject - Custom subject override for all email ids in Azure action group
EmailSubject *string `json:"emailSubject,omitempty"`
- // CustomWebhookPayload - Custom payload to be sent for all webook URI in Azure action group
+ // CustomWebhookPayload - Custom payload to be sent for all webhook URI in Azure action group
CustomWebhookPayload *string `json:"customWebhookPayload,omitempty"`
}
@@ -1697,7 +1747,7 @@ type BaselineMetadataValue struct {
// BaselineProperties the baseline properties class.
type BaselineProperties struct {
- // Timespan - The timespan for which the data was retrieved. Its value consists of two datatimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested.
+ // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested.
Timespan *string `json:"timespan,omitempty"`
// Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made.
Interval *string `json:"interval,omitempty"`
@@ -1714,11 +1764,11 @@ type BaselineProperties struct {
// BaselineResponse the response to a baseline query.
type BaselineResponse struct {
autorest.Response `json:"-"`
- // ID - the metric baseline Id.
+ // ID - READ-ONLY; the metric baseline Id.
ID *string `json:"id,omitempty"`
- // Type - the resource type of the baseline resource.
+ // Type - READ-ONLY; the resource type of the baseline resource.
Type *string `json:"type,omitempty"`
- // Name - the name and the display name of the metric, i.e. it is localizable string.
+ // Name - READ-ONLY; the name and the display name of the metric, i.e. it is localizable string.
Name *LocalizableString `json:"name,omitempty"`
// BaselineProperties - the properties of the baseline.
*BaselineProperties `json:"properties,omitempty"`
@@ -1727,15 +1777,6 @@ type BaselineResponse struct {
// MarshalJSON is the custom marshaler for BaselineResponse.
func (br BaselineResponse) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if br.ID != nil {
- objectMap["id"] = br.ID
- }
- if br.Type != nil {
- objectMap["type"] = br.Type
- }
- if br.Name != nil {
- objectMap["name"] = br.Name
- }
if br.BaselineProperties != nil {
objectMap["properties"] = br.BaselineProperties
}
@@ -1793,7 +1834,7 @@ func (br *BaselineResponse) UnmarshalJSON(body []byte) error {
return nil
}
-// CalculateBaselineResponse the response to a calcualte baseline call.
+// CalculateBaselineResponse the response to a calculate baseline call.
type CalculateBaselineResponse struct {
autorest.Response `json:"-"`
// Type - the resource type of the baseline resource.
@@ -1804,6 +1845,14 @@ type CalculateBaselineResponse struct {
Baseline *[]Baseline `json:"baseline,omitempty"`
}
+// Criteria specifies the criteria for converting log to metric.
+type Criteria struct {
+ // MetricName - Name of the metric
+ MetricName *string `json:"metricName,omitempty"`
+ // Dimensions - List of Dimensions for creating metric
+ Dimensions *[]Dimension `json:"dimensions,omitempty"`
+}
+
// DiagnosticSettings the diagnostic settings.
type DiagnosticSettings struct {
// StorageAccountID - The resource ID of the storage account to which you would like to send Diagnostic Logs.
@@ -1833,11 +1882,11 @@ type DiagnosticSettingsCategoryResource struct {
autorest.Response `json:"-"`
// DiagnosticSettingsCategory - The properties of a Diagnostic Settings Category.
*DiagnosticSettingsCategory `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
}
@@ -1847,15 +1896,6 @@ func (dscr DiagnosticSettingsCategoryResource) MarshalJSON() ([]byte, error) {
if dscr.DiagnosticSettingsCategory != nil {
objectMap["properties"] = dscr.DiagnosticSettingsCategory
}
- if dscr.ID != nil {
- objectMap["id"] = dscr.ID
- }
- if dscr.Name != nil {
- objectMap["name"] = dscr.Name
- }
- if dscr.Type != nil {
- objectMap["type"] = dscr.Type
- }
return json.Marshal(objectMap)
}
@@ -1910,7 +1950,8 @@ func (dscr *DiagnosticSettingsCategoryResource) UnmarshalJSON(body []byte) error
return nil
}
-// DiagnosticSettingsCategoryResourceCollection represents a collection of diagnostic setting category resources.
+// DiagnosticSettingsCategoryResourceCollection represents a collection of diagnostic setting category
+// resources.
type DiagnosticSettingsCategoryResourceCollection struct {
autorest.Response `json:"-"`
// Value - The collection of diagnostic settings category resources.
@@ -1922,11 +1963,11 @@ type DiagnosticSettingsResource struct {
autorest.Response `json:"-"`
// DiagnosticSettings - Properties of a Diagnostic Settings Resource.
*DiagnosticSettings `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
}
@@ -1936,15 +1977,6 @@ func (dsr DiagnosticSettingsResource) MarshalJSON() ([]byte, error) {
if dsr.DiagnosticSettings != nil {
objectMap["properties"] = dsr.DiagnosticSettings
}
- if dsr.ID != nil {
- objectMap["id"] = dsr.ID
- }
- if dsr.Name != nil {
- objectMap["name"] = dsr.Name
- }
- if dsr.Type != nil {
- objectMap["type"] = dsr.Type
- }
return json.Marshal(objectMap)
}
@@ -2006,6 +2038,228 @@ type DiagnosticSettingsResourceCollection struct {
Value *[]DiagnosticSettingsResource `json:"value,omitempty"`
}
+// Dimension specifies the criteria for converting log to metric.
+type Dimension struct {
+ // Name - Name of the dimension
+ Name *string `json:"name,omitempty"`
+ // Operator - Operator for dimension values
+ Operator *string `json:"operator,omitempty"`
+ // Values - List of dimension values
+ Values *[]string `json:"values,omitempty"`
+}
+
+// DynamicMetricCriteria criterion for dynamic threshold.
+type DynamicMetricCriteria struct {
+ // Operator - The operator used to compare the metric value against the threshold.
+ Operator interface{} `json:"operator,omitempty"`
+ // AlertSensitivity - The extent of deviation required to trigger an alert. This will affect how tight the threshold is to the metric series pattern.
+ AlertSensitivity interface{} `json:"alertSensitivity,omitempty"`
+ // FailingPeriods - The minimum number of violations required within the selected lookback time window required to raise an alert.
+ FailingPeriods *DynamicThresholdFailingPeriods `json:"failingPeriods,omitempty"`
+ // IgnoreDataBefore - Use this option to set the date from which to start learning the metric historical data and calculate the dynamic thresholds (in ISO8601 format)
+ IgnoreDataBefore *date.Time `json:"ignoreDataBefore,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Name - Name of the criteria.
+ Name *string `json:"name,omitempty"`
+ // MetricName - Name of the metric.
+ MetricName *string `json:"metricName,omitempty"`
+ // MetricNamespace - Namespace of the metric.
+ MetricNamespace *string `json:"metricNamespace,omitempty"`
+ // TimeAggregation - the criteria time aggregation types.
+ TimeAggregation interface{} `json:"timeAggregation,omitempty"`
+ // Dimensions - List of dimension conditions.
+ Dimensions *[]MetricDimension `json:"dimensions,omitempty"`
+ // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion'
+ CriterionType CriterionType `json:"criterionType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for DynamicMetricCriteria.
+func (dmc DynamicMetricCriteria) MarshalJSON() ([]byte, error) {
+ dmc.CriterionType = CriterionTypeDynamicThresholdCriterion
+ objectMap := make(map[string]interface{})
+ if dmc.Operator != nil {
+ objectMap["operator"] = dmc.Operator
+ }
+ if dmc.AlertSensitivity != nil {
+ objectMap["alertSensitivity"] = dmc.AlertSensitivity
+ }
+ if dmc.FailingPeriods != nil {
+ objectMap["failingPeriods"] = dmc.FailingPeriods
+ }
+ if dmc.IgnoreDataBefore != nil {
+ objectMap["ignoreDataBefore"] = dmc.IgnoreDataBefore
+ }
+ if dmc.Name != nil {
+ objectMap["name"] = dmc.Name
+ }
+ if dmc.MetricName != nil {
+ objectMap["metricName"] = dmc.MetricName
+ }
+ if dmc.MetricNamespace != nil {
+ objectMap["metricNamespace"] = dmc.MetricNamespace
+ }
+ if dmc.TimeAggregation != nil {
+ objectMap["timeAggregation"] = dmc.TimeAggregation
+ }
+ if dmc.Dimensions != nil {
+ objectMap["dimensions"] = dmc.Dimensions
+ }
+ if dmc.CriterionType != "" {
+ objectMap["criterionType"] = dmc.CriterionType
+ }
+ for k, v := range dmc.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria.
+func (dmc DynamicMetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) {
+ return nil, false
+}
+
+// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria.
+func (dmc DynamicMetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) {
+ return &dmc, true
+}
+
+// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria.
+func (dmc DynamicMetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) {
+ return nil, false
+}
+
+// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for DynamicMetricCriteria.
+func (dmc DynamicMetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) {
+ return &dmc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for DynamicMetricCriteria struct.
+func (dmc *DynamicMetricCriteria) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "operator":
+ if v != nil {
+ var operator interface{}
+ err = json.Unmarshal(*v, &operator)
+ if err != nil {
+ return err
+ }
+ dmc.Operator = operator
+ }
+ case "alertSensitivity":
+ if v != nil {
+ var alertSensitivity interface{}
+ err = json.Unmarshal(*v, &alertSensitivity)
+ if err != nil {
+ return err
+ }
+ dmc.AlertSensitivity = alertSensitivity
+ }
+ case "failingPeriods":
+ if v != nil {
+ var failingPeriods DynamicThresholdFailingPeriods
+ err = json.Unmarshal(*v, &failingPeriods)
+ if err != nil {
+ return err
+ }
+ dmc.FailingPeriods = &failingPeriods
+ }
+ case "ignoreDataBefore":
+ if v != nil {
+ var ignoreDataBefore date.Time
+ err = json.Unmarshal(*v, &ignoreDataBefore)
+ if err != nil {
+ return err
+ }
+ dmc.IgnoreDataBefore = &ignoreDataBefore
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if dmc.AdditionalProperties == nil {
+ dmc.AdditionalProperties = make(map[string]interface{})
+ }
+ dmc.AdditionalProperties[k] = additionalProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ dmc.Name = &name
+ }
+ case "metricName":
+ if v != nil {
+ var metricName string
+ err = json.Unmarshal(*v, &metricName)
+ if err != nil {
+ return err
+ }
+ dmc.MetricName = &metricName
+ }
+ case "metricNamespace":
+ if v != nil {
+ var metricNamespace string
+ err = json.Unmarshal(*v, &metricNamespace)
+ if err != nil {
+ return err
+ }
+ dmc.MetricNamespace = &metricNamespace
+ }
+ case "timeAggregation":
+ if v != nil {
+ var timeAggregation interface{}
+ err = json.Unmarshal(*v, &timeAggregation)
+ if err != nil {
+ return err
+ }
+ dmc.TimeAggregation = timeAggregation
+ }
+ case "dimensions":
+ if v != nil {
+ var dimensions []MetricDimension
+ err = json.Unmarshal(*v, &dimensions)
+ if err != nil {
+ return err
+ }
+ dmc.Dimensions = &dimensions
+ }
+ case "criterionType":
+ if v != nil {
+ var criterionType CriterionType
+ err = json.Unmarshal(*v, &criterionType)
+ if err != nil {
+ return err
+ }
+ dmc.CriterionType = criterionType
+ }
+ }
+ }
+
+ return nil
+}
+
+// DynamicThresholdFailingPeriods the minimum number of violations required within the selected lookback
+// time window required to raise an alert.
+type DynamicThresholdFailingPeriods struct {
+ // NumberOfEvaluationPeriods - The number of aggregated lookback points. The lookback time window is calculated based on the aggregation granularity (windowSize) and the selected number of aggregated points.
+ NumberOfEvaluationPeriods *float64 `json:"numberOfEvaluationPeriods,omitempty"`
+ // MinFailingPeriodsToAlert - The number of violations to trigger an alert. Should be smaller or equal to numberOfEvaluationPeriods.
+ MinFailingPeriodsToAlert *float64 `json:"minFailingPeriodsToAlert,omitempty"`
+}
+
// EmailNotification email notification of an autoscale event.
type EmailNotification struct {
// SendToSubscriptionAdministrator - a value indicating whether to send email to subscription administrator.
@@ -2022,7 +2276,7 @@ type EmailReceiver struct {
Name *string `json:"name,omitempty"`
// EmailAddress - The email address of this receiver.
EmailAddress *string `json:"emailAddress,omitempty"`
- // Status - The receiver status of the e-mail. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled'
+ // Status - READ-ONLY; The receiver status of the e-mail. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled'
Status ReceiverStatus `json:"status,omitempty"`
}
@@ -2050,131 +2304,59 @@ type EventCategoryCollection struct {
// EventData the Azure event log entries are of type EventData
type EventData struct {
- // Authorization - The sender authorization information.
+ // Authorization - READ-ONLY; The sender authorization information.
Authorization *SenderAuthorization `json:"authorization,omitempty"`
- // Claims - key value pairs to identify ARM permissions.
+ // Claims - READ-ONLY; key value pairs to identify ARM permissions.
Claims map[string]*string `json:"claims"`
- // Caller - the email address of the user who has performed the operation, the UPN claim or SPN claim based on availability.
+ // Caller - READ-ONLY; the email address of the user who has performed the operation, the UPN claim or SPN claim based on availability.
Caller *string `json:"caller,omitempty"`
- // Description - the description of the event.
+ // Description - READ-ONLY; the description of the event.
Description *string `json:"description,omitempty"`
- // ID - the Id of this event as required by ARM for RBAC. It contains the EventDataID and a timestamp information.
+ // ID - READ-ONLY; the Id of this event as required by ARM for RBAC. It contains the EventDataID and a timestamp information.
ID *string `json:"id,omitempty"`
- // EventDataID - the event data Id. This is a unique identifier for an event.
+ // EventDataID - READ-ONLY; the event data Id. This is a unique identifier for an event.
EventDataID *string `json:"eventDataId,omitempty"`
- // CorrelationID - the correlation Id, usually a GUID in the string format. The correlation Id is shared among the events that belong to the same uber operation.
+ // CorrelationID - READ-ONLY; the correlation Id, usually a GUID in the string format. The correlation Id is shared among the events that belong to the same uber operation.
CorrelationID *string `json:"correlationId,omitempty"`
- // EventName - the event name. This value should not be confused with OperationName. For practical purposes, OperationName might be more appealing to end users.
+ // EventName - READ-ONLY; the event name. This value should not be confused with OperationName. For practical purposes, OperationName might be more appealing to end users.
EventName *LocalizableString `json:"eventName,omitempty"`
- // Category - the event category.
+ // Category - READ-ONLY; the event category.
Category *LocalizableString `json:"category,omitempty"`
- // HTTPRequest - the HTTP request info. Usually includes the 'clientRequestId', 'clientIpAddress' (IP address of the user who initiated the event) and 'method' (HTTP method e.g. PUT).
+ // HTTPRequest - READ-ONLY; the HTTP request info. Usually includes the 'clientRequestId', 'clientIpAddress' (IP address of the user who initiated the event) and 'method' (HTTP method e.g. PUT).
HTTPRequest *HTTPRequestInfo `json:"httpRequest,omitempty"`
- // Level - the event level. Possible values include: 'Critical', 'Error', 'Warning', 'Informational', 'Verbose'
+ // Level - READ-ONLY; the event level. Possible values include: 'Critical', 'Error', 'Warning', 'Informational', 'Verbose'
Level EventLevel `json:"level,omitempty"`
- // ResourceGroupName - the resource group name of the impacted resource.
+ // ResourceGroupName - READ-ONLY; the resource group name of the impacted resource.
ResourceGroupName *string `json:"resourceGroupName,omitempty"`
- // ResourceProviderName - the resource provider name of the impacted resource.
+ // ResourceProviderName - READ-ONLY; the resource provider name of the impacted resource.
ResourceProviderName *LocalizableString `json:"resourceProviderName,omitempty"`
- // ResourceID - the resource uri that uniquely identifies the resource that caused this event.
+ // ResourceID - READ-ONLY; the resource uri that uniquely identifies the resource that caused this event.
ResourceID *string `json:"resourceId,omitempty"`
- // ResourceType - the resource type
+ // ResourceType - READ-ONLY; the resource type
ResourceType *LocalizableString `json:"resourceType,omitempty"`
- // OperationID - It is usually a GUID shared among the events corresponding to single operation. This value should not be confused with EventName.
+ // OperationID - READ-ONLY; It is usually a GUID shared among the events corresponding to single operation. This value should not be confused with EventName.
OperationID *string `json:"operationId,omitempty"`
- // OperationName - the operation name.
+ // OperationName - READ-ONLY; the operation name.
OperationName *LocalizableString `json:"operationName,omitempty"`
- // Properties - the set of pairs (usually a Dictionary) that includes details about the event.
+ // Properties - READ-ONLY; the set of pairs (usually a Dictionary) that includes details about the event.
Properties map[string]*string `json:"properties"`
- // Status - a string describing the status of the operation. Some typical values are: Started, In progress, Succeeded, Failed, Resolved.
+ // Status - READ-ONLY; a string describing the status of the operation. Some typical values are: Started, In progress, Succeeded, Failed, Resolved.
Status *LocalizableString `json:"status,omitempty"`
- // SubStatus - the event sub status. Most of the time, when included, this captures the HTTP status code of the REST call. Common values are: OK (HTTP Status Code: 200), Created (HTTP Status Code: 201), Accepted (HTTP Status Code: 202), No Content (HTTP Status Code: 204), Bad Request(HTTP Status Code: 400), Not Found (HTTP Status Code: 404), Conflict (HTTP Status Code: 409), Internal Server Error (HTTP Status Code: 500), Service Unavailable (HTTP Status Code:503), Gateway Timeout (HTTP Status Code: 504)
+ // SubStatus - READ-ONLY; the event sub status. Most of the time, when included, this captures the HTTP status code of the REST call. Common values are: OK (HTTP Status Code: 200), Created (HTTP Status Code: 201), Accepted (HTTP Status Code: 202), No Content (HTTP Status Code: 204), Bad Request(HTTP Status Code: 400), Not Found (HTTP Status Code: 404), Conflict (HTTP Status Code: 409), Internal Server Error (HTTP Status Code: 500), Service Unavailable (HTTP Status Code:503), Gateway Timeout (HTTP Status Code: 504)
SubStatus *LocalizableString `json:"subStatus,omitempty"`
- // EventTimestamp - the timestamp of when the event was generated by the Azure service processing the request corresponding the event. It in ISO 8601 format.
+ // EventTimestamp - READ-ONLY; the timestamp of when the event was generated by the Azure service processing the request corresponding the event. It in ISO 8601 format.
EventTimestamp *date.Time `json:"eventTimestamp,omitempty"`
- // SubmissionTimestamp - the timestamp of when the event became available for querying via this API. It is in ISO 8601 format. This value should not be confused eventTimestamp. As there might be a delay between the occurrence time of the event, and the time that the event is submitted to the Azure logging infrastructure.
+ // SubmissionTimestamp - READ-ONLY; the timestamp of when the event became available for querying via this API. It is in ISO 8601 format. This value should not be confused eventTimestamp. As there might be a delay between the occurrence time of the event, and the time that the event is submitted to the Azure logging infrastructure.
SubmissionTimestamp *date.Time `json:"submissionTimestamp,omitempty"`
- // SubscriptionID - the Azure subscription Id usually a GUID.
+ // SubscriptionID - READ-ONLY; the Azure subscription Id usually a GUID.
SubscriptionID *string `json:"subscriptionId,omitempty"`
- // TenantID - the Azure tenant Id
+ // TenantID - READ-ONLY; the Azure tenant Id
TenantID *string `json:"tenantId,omitempty"`
}
// MarshalJSON is the custom marshaler for EventData.
func (ed EventData) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if ed.Authorization != nil {
- objectMap["authorization"] = ed.Authorization
- }
- if ed.Claims != nil {
- objectMap["claims"] = ed.Claims
- }
- if ed.Caller != nil {
- objectMap["caller"] = ed.Caller
- }
- if ed.Description != nil {
- objectMap["description"] = ed.Description
- }
- if ed.ID != nil {
- objectMap["id"] = ed.ID
- }
- if ed.EventDataID != nil {
- objectMap["eventDataId"] = ed.EventDataID
- }
- if ed.CorrelationID != nil {
- objectMap["correlationId"] = ed.CorrelationID
- }
- if ed.EventName != nil {
- objectMap["eventName"] = ed.EventName
- }
- if ed.Category != nil {
- objectMap["category"] = ed.Category
- }
- if ed.HTTPRequest != nil {
- objectMap["httpRequest"] = ed.HTTPRequest
- }
- if ed.Level != "" {
- objectMap["level"] = ed.Level
- }
- if ed.ResourceGroupName != nil {
- objectMap["resourceGroupName"] = ed.ResourceGroupName
- }
- if ed.ResourceProviderName != nil {
- objectMap["resourceProviderName"] = ed.ResourceProviderName
- }
- if ed.ResourceID != nil {
- objectMap["resourceId"] = ed.ResourceID
- }
- if ed.ResourceType != nil {
- objectMap["resourceType"] = ed.ResourceType
- }
- if ed.OperationID != nil {
- objectMap["operationId"] = ed.OperationID
- }
- if ed.OperationName != nil {
- objectMap["operationName"] = ed.OperationName
- }
- if ed.Properties != nil {
- objectMap["properties"] = ed.Properties
- }
- if ed.Status != nil {
- objectMap["status"] = ed.Status
- }
- if ed.SubStatus != nil {
- objectMap["subStatus"] = ed.SubStatus
- }
- if ed.EventTimestamp != nil {
- objectMap["eventTimestamp"] = ed.EventTimestamp
- }
- if ed.SubmissionTimestamp != nil {
- objectMap["submissionTimestamp"] = ed.SubmissionTimestamp
- }
- if ed.SubscriptionID != nil {
- objectMap["subscriptionId"] = ed.SubscriptionID
- }
- if ed.TenantID != nil {
- objectMap["tenantId"] = ed.TenantID
- }
return json.Marshal(objectMap)
}
@@ -2193,14 +2375,24 @@ type EventDataCollectionIterator struct {
page EventDataCollectionPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *EventDataCollectionIterator) Next() error {
+func (iter *EventDataCollectionIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventDataCollectionIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -2209,6 +2401,13 @@ func (iter *EventDataCollectionIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *EventDataCollectionIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter EventDataCollectionIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -2228,6 +2427,11 @@ func (iter EventDataCollectionIterator) Value() EventData {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the EventDataCollectionIterator type.
+func NewEventDataCollectionIterator(page EventDataCollectionPage) EventDataCollectionIterator {
+ return EventDataCollectionIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (edc EventDataCollection) IsEmpty() bool {
return edc.Value == nil || len(*edc.Value) == 0
@@ -2235,11 +2439,11 @@ func (edc EventDataCollection) IsEmpty() bool {
// eventDataCollectionPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (edc EventDataCollection) eventDataCollectionPreparer() (*http.Request, error) {
+func (edc EventDataCollection) eventDataCollectionPreparer(ctx context.Context) (*http.Request, error) {
if edc.NextLink == nil || len(to.String(edc.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(edc.NextLink)))
@@ -2247,14 +2451,24 @@ func (edc EventDataCollection) eventDataCollectionPreparer() (*http.Request, err
// EventDataCollectionPage contains a page of EventData values.
type EventDataCollectionPage struct {
- fn func(EventDataCollection) (EventDataCollection, error)
+ fn func(context.Context, EventDataCollection) (EventDataCollection, error)
edc EventDataCollection
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *EventDataCollectionPage) Next() error {
- next, err := page.fn(page.edc)
+func (page *EventDataCollectionPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventDataCollectionPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.edc)
if err != nil {
return err
}
@@ -2262,6 +2476,13 @@ func (page *EventDataCollectionPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *EventDataCollectionPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page EventDataCollectionPage) NotDone() bool {
return !page.edc.IsEmpty()
@@ -2280,6 +2501,11 @@ func (page EventDataCollectionPage) Values() []EventData {
return *page.edc.Value
}
+// Creates a new instance of the EventDataCollectionPage type.
+func NewEventDataCollectionPage(getNextPage func(context.Context, EventDataCollection) (EventDataCollection, error)) EventDataCollectionPage {
+ return EventDataCollectionPage{fn: getNextPage}
+}
+
// HTTPRequestInfo the Http request info.
type HTTPRequestInfo struct {
// ClientRequestID - the client request id.
@@ -2295,15 +2521,15 @@ type HTTPRequestInfo struct {
// Incident an alert incident indicates the activation status of an alert rule.
type Incident struct {
autorest.Response `json:"-"`
- // Name - Incident name.
+ // Name - READ-ONLY; Incident name.
Name *string `json:"name,omitempty"`
- // RuleName - Rule name that is associated with the incident.
+ // RuleName - READ-ONLY; Rule name that is associated with the incident.
RuleName *string `json:"ruleName,omitempty"`
- // IsActive - A boolean to indicate whether the incident is active or resolved.
+ // IsActive - READ-ONLY; A boolean to indicate whether the incident is active or resolved.
IsActive *bool `json:"isActive,omitempty"`
- // ActivatedTime - The time at which the incident was activated in ISO8601 format.
+ // ActivatedTime - READ-ONLY; The time at which the incident was activated in ISO8601 format.
ActivatedTime *date.Time `json:"activatedTime,omitempty"`
- // ResolvedTime - The time at which the incident was resolved in ISO8601 format. If null, it means the incident is still active.
+ // ResolvedTime - READ-ONLY; The time at which the incident was resolved in ISO8601 format. If null, it means the incident is still active.
ResolvedTime *date.Time `json:"resolvedTime,omitempty"`
}
@@ -2488,11 +2714,11 @@ type LogProfileResource struct {
autorest.Response `json:"-"`
// LogProfileProperties - The log profile properties of the resource.
*LogProfileProperties `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -2506,15 +2732,6 @@ func (lpr LogProfileResource) MarshalJSON() ([]byte, error) {
if lpr.LogProfileProperties != nil {
objectMap["properties"] = lpr.LogProfileProperties
}
- if lpr.ID != nil {
- objectMap["id"] = lpr.ID
- }
- if lpr.Name != nil {
- objectMap["name"] = lpr.Name
- }
- if lpr.Type != nil {
- objectMap["type"] = lpr.Type
- }
if lpr.Location != nil {
objectMap["location"] = lpr.Location
}
@@ -2652,13 +2869,13 @@ type LogSearchRule struct {
Description *string `json:"description,omitempty"`
// Enabled - The flag which indicates whether the Log Search rule is enabled. Value should be true or false. Possible values include: 'True', 'False'
Enabled Enabled `json:"enabled,omitempty"`
- // LastUpdatedTime - Last time the rule was updated in IS08601 format.
+ // LastUpdatedTime - READ-ONLY; Last time the rule was updated in IS08601 format.
LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"`
- // ProvisioningState - Provisioning state of the scheduledquery rule. Possible values include: 'Succeeded', 'Deploying', 'Canceled', 'Failed'
+ // ProvisioningState - READ-ONLY; Provisioning state of the scheduled query rule. Possible values include: 'Succeeded', 'Deploying', 'Canceled', 'Failed'
ProvisioningState ProvisioningState `json:"provisioningState,omitempty"`
// Source - Data Source against which rule will Query Data
Source *Source `json:"source,omitempty"`
- // Schedule - Schedule (Frequnecy, Time Window) for rule.
+ // Schedule - Schedule (Frequency, Time Window) for rule. Required for action type - AlertingAction
Schedule *Schedule `json:"schedule,omitempty"`
// Action - Action needs to be taken on rule execution.
Action BasicAction `json:"action,omitempty"`
@@ -2752,11 +2969,11 @@ type LogSearchRuleResource struct {
autorest.Response `json:"-"`
// LogSearchRule - The rule properties of the resource.
*LogSearchRule `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -2770,15 +2987,6 @@ func (lsrr LogSearchRuleResource) MarshalJSON() ([]byte, error) {
if lsrr.LogSearchRule != nil {
objectMap["properties"] = lsrr.LogSearchRule
}
- if lsrr.ID != nil {
- objectMap["id"] = lsrr.ID
- }
- if lsrr.Name != nil {
- objectMap["name"] = lsrr.Name
- }
- if lsrr.Type != nil {
- objectMap["type"] = lsrr.Type
- }
if lsrr.Location != nil {
objectMap["location"] = lsrr.Location
}
@@ -2927,23 +3135,64 @@ type LogSettings struct {
RetentionPolicy *RetentionPolicy `json:"retentionPolicy,omitempty"`
}
-// ManagementEventAggregationCondition how the data that is collected should be combined over time.
-type ManagementEventAggregationCondition struct {
- // Operator - the condition operator. Possible values include: 'ConditionOperatorGreaterThan', 'ConditionOperatorGreaterThanOrEqual', 'ConditionOperatorLessThan', 'ConditionOperatorLessThanOrEqual'
- Operator ConditionOperator `json:"operator,omitempty"`
- // Threshold - The threshold value that activates the alert.
- Threshold *float64 `json:"threshold,omitempty"`
- // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
- WindowSize *string `json:"windowSize,omitempty"`
+// LogToMetricAction specify action need to be taken when rule type is converting log to metric
+type LogToMetricAction struct {
+ // Criteria - Criteria of Metric
+ Criteria *[]Criteria `json:"criteria,omitempty"`
+ // OdataType - Possible values include: 'OdataTypeAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesAlertingAction', 'OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction'
+ OdataType OdataTypeBasicAction `json:"odata.type,omitempty"`
}
-// ManagementEventRuleCondition a management event rule condition.
-type ManagementEventRuleCondition struct {
- // Aggregation - How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
- Aggregation *ManagementEventAggregationCondition `json:"aggregation,omitempty"`
- // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
- DataSource BasicRuleDataSource `json:"dataSource,omitempty"`
- // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition'
+// MarshalJSON is the custom marshaler for LogToMetricAction.
+func (ltma LogToMetricAction) MarshalJSON() ([]byte, error) {
+ ltma.OdataType = OdataTypeMicrosoftWindowsAzureManagementMonitoringAlertsModelsMicrosoftAppInsightsNexusDataContractsResourcesScheduledQueryRulesLogToMetricAction
+ objectMap := make(map[string]interface{})
+ if ltma.Criteria != nil {
+ objectMap["criteria"] = ltma.Criteria
+ }
+ if ltma.OdataType != "" {
+ objectMap["odata.type"] = ltma.OdataType
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsAlertingAction is the BasicAction implementation for LogToMetricAction.
+func (ltma LogToMetricAction) AsAlertingAction() (*AlertingAction, bool) {
+ return nil, false
+}
+
+// AsLogToMetricAction is the BasicAction implementation for LogToMetricAction.
+func (ltma LogToMetricAction) AsLogToMetricAction() (*LogToMetricAction, bool) {
+ return <ma, true
+}
+
+// AsAction is the BasicAction implementation for LogToMetricAction.
+func (ltma LogToMetricAction) AsAction() (*Action, bool) {
+ return nil, false
+}
+
+// AsBasicAction is the BasicAction implementation for LogToMetricAction.
+func (ltma LogToMetricAction) AsBasicAction() (BasicAction, bool) {
+ return <ma, true
+}
+
+// ManagementEventAggregationCondition how the data that is collected should be combined over time.
+type ManagementEventAggregationCondition struct {
+ // Operator - the condition operator. Possible values include: 'ConditionOperatorGreaterThan', 'ConditionOperatorGreaterThanOrEqual', 'ConditionOperatorLessThan', 'ConditionOperatorLessThanOrEqual'
+ Operator ConditionOperator `json:"operator,omitempty"`
+ // Threshold - The threshold value that activates the alert.
+ Threshold *float64 `json:"threshold,omitempty"`
+ // WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold. If specified then it must be between 5 minutes and 1 day.
+ WindowSize *string `json:"windowSize,omitempty"`
+}
+
+// ManagementEventRuleCondition a management event rule condition.
+type ManagementEventRuleCondition struct {
+ // Aggregation - How the data that is collected should be combined over time and when the alert is activated. Note that for management event alerts aggregation is optional – if it is not provided then any event will cause the alert to activate.
+ Aggregation *ManagementEventAggregationCondition `json:"aggregation,omitempty"`
+ // DataSource - the resource from which the rule collects its data. For this type dataSource will always be of type RuleMetricDataSource.
+ DataSource BasicRuleDataSource `json:"dataSource,omitempty"`
+ // OdataType - Possible values include: 'OdataTypeRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsLocationThresholdRuleCondition', 'OdataTypeMicrosoftAzureManagementInsightsModelsManagementEventRuleCondition'
OdataType OdataTypeBasicRuleCondition `json:"odata.type,omitempty"`
}
@@ -3072,6 +3321,7 @@ func (maa MetricAlertAction) MarshalJSON() ([]byte, error) {
// BasicMetricAlertCriteria the rule criteria that defines the conditions of the alert rule.
type BasicMetricAlertCriteria interface {
AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool)
+ AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool)
AsMetricAlertCriteria() (*MetricAlertCriteria, bool)
}
@@ -3079,7 +3329,7 @@ type BasicMetricAlertCriteria interface {
type MetricAlertCriteria struct {
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria'
+ // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria'
OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"`
}
@@ -3095,6 +3345,10 @@ func unmarshalBasicMetricAlertCriteria(body []byte) (BasicMetricAlertCriteria, e
var masrmmc MetricAlertSingleResourceMultipleMetricCriteria
err := json.Unmarshal(body, &masrmmc)
return masrmmc, err
+ case string(OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria):
+ var mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria
+ err := json.Unmarshal(body, &mamrmmc)
+ return mamrmmc, err
default:
var mac MetricAlertCriteria
err := json.Unmarshal(body, &mac)
@@ -3138,6 +3392,11 @@ func (mac MetricAlertCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria
return nil, false
}
+// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria.
+func (mac MetricAlertCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) {
+ return nil, false
+}
+
// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertCriteria.
func (mac MetricAlertCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) {
return &mac, true
@@ -3184,6 +3443,97 @@ func (mac *MetricAlertCriteria) UnmarshalJSON(body []byte) error {
return nil
}
+// MetricAlertMultipleResourceMultipleMetricCriteria specifies the metric alert criteria for multiple
+// resource that has multiple metric criteria.
+type MetricAlertMultipleResourceMultipleMetricCriteria struct {
+ // AllOf - the list of multiple metric criteria for this 'all of' operation.
+ AllOf *[]BasicMultiMetricCriteria `json:"allOf,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria'
+ OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for MetricAlertMultipleResourceMultipleMetricCriteria.
+func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) MarshalJSON() ([]byte, error) {
+ mamrmmc.OdataType = OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria
+ objectMap := make(map[string]interface{})
+ if mamrmmc.AllOf != nil {
+ objectMap["allOf"] = mamrmmc.AllOf
+ }
+ if mamrmmc.OdataType != "" {
+ objectMap["odata.type"] = mamrmmc.OdataType
+ }
+ for k, v := range mamrmmc.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsMetricAlertSingleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria.
+func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertSingleResourceMultipleMetricCriteria() (*MetricAlertSingleResourceMultipleMetricCriteria, bool) {
+ return nil, false
+}
+
+// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria.
+func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) {
+ return &mamrmmc, true
+}
+
+// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria.
+func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) {
+ return nil, false
+}
+
+// AsBasicMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertMultipleResourceMultipleMetricCriteria.
+func (mamrmmc MetricAlertMultipleResourceMultipleMetricCriteria) AsBasicMetricAlertCriteria() (BasicMetricAlertCriteria, bool) {
+ return &mamrmmc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for MetricAlertMultipleResourceMultipleMetricCriteria struct.
+func (mamrmmc *MetricAlertMultipleResourceMultipleMetricCriteria) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "allOf":
+ if v != nil {
+ allOf, err := unmarshalBasicMultiMetricCriteriaArray(*v)
+ if err != nil {
+ return err
+ }
+ mamrmmc.AllOf = &allOf
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if mamrmmc.AdditionalProperties == nil {
+ mamrmmc.AdditionalProperties = make(map[string]interface{})
+ }
+ mamrmmc.AdditionalProperties[k] = additionalProperties
+ }
+ case "odata.type":
+ if v != nil {
+ var odataType OdataTypeBasicMetricAlertCriteria
+ err = json.Unmarshal(*v, &odataType)
+ if err != nil {
+ return err
+ }
+ mamrmmc.OdataType = odataType
+ }
+ }
+ }
+
+ return nil
+}
+
// MetricAlertProperties an alert rule.
type MetricAlertProperties struct {
// Description - the description of the metric alert that will be included in the alert email.
@@ -3198,13 +3548,17 @@ type MetricAlertProperties struct {
EvaluationFrequency *string `json:"evaluationFrequency,omitempty"`
// WindowSize - the period of time (in ISO 8601 duration format) that is used to monitor alert activity based on the threshold.
WindowSize *string `json:"windowSize,omitempty"`
+ // TargetResourceType - the resource type of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria.
+ TargetResourceType *string `json:"targetResourceType,omitempty"`
+ // TargetResourceRegion - the region of the target resource(s) on which the alert is created/updated. Mandatory for MultipleResourceMultipleMetricCriteria.
+ TargetResourceRegion *string `json:"targetResourceRegion,omitempty"`
// Criteria - defines the specific alert criteria information.
Criteria BasicMetricAlertCriteria `json:"criteria,omitempty"`
// AutoMitigate - the flag that indicates whether the alert should be auto resolved or not.
AutoMitigate *bool `json:"autoMitigate,omitempty"`
// Actions - the array of actions that are performed when the alert rule becomes active, and when an alert condition is resolved.
Actions *[]MetricAlertAction `json:"actions,omitempty"`
- // LastUpdatedTime - Last time the rule was updated in ISO8601 format.
+ // LastUpdatedTime - READ-ONLY; Last time the rule was updated in ISO8601 format.
LastUpdatedTime *date.Time `json:"lastUpdatedTime,omitempty"`
}
@@ -3271,6 +3625,24 @@ func (mapVar *MetricAlertProperties) UnmarshalJSON(body []byte) error {
}
mapVar.WindowSize = &windowSize
}
+ case "targetResourceType":
+ if v != nil {
+ var targetResourceType string
+ err = json.Unmarshal(*v, &targetResourceType)
+ if err != nil {
+ return err
+ }
+ mapVar.TargetResourceType = &targetResourceType
+ }
+ case "targetResourceRegion":
+ if v != nil {
+ var targetResourceRegion string
+ err = json.Unmarshal(*v, &targetResourceRegion)
+ if err != nil {
+ return err
+ }
+ mapVar.TargetResourceRegion = &targetResourceRegion
+ }
case "criteria":
if v != nil {
criteria, err := unmarshalBasicMetricAlertCriteria(*v)
@@ -3317,11 +3689,11 @@ type MetricAlertResource struct {
autorest.Response `json:"-"`
// MetricAlertProperties - The alert rule properties of the resource.
*MetricAlertProperties `json:"properties,omitempty"`
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -3335,15 +3707,6 @@ func (mar MetricAlertResource) MarshalJSON() ([]byte, error) {
if mar.MetricAlertProperties != nil {
objectMap["properties"] = mar.MetricAlertProperties
}
- if mar.ID != nil {
- objectMap["id"] = mar.ID
- }
- if mar.Name != nil {
- objectMap["name"] = mar.Name
- }
- if mar.Type != nil {
- objectMap["type"] = mar.Type
- }
if mar.Location != nil {
objectMap["location"] = mar.Location
}
@@ -3482,14 +3845,14 @@ func (marp *MetricAlertResourcePatch) UnmarshalJSON(body []byte) error {
return nil
}
-// MetricAlertSingleResourceMultipleMetricCriteria specifies the metric alert criteria for a single resource that
-// has multiple metric criteria.
+// MetricAlertSingleResourceMultipleMetricCriteria specifies the metric alert criteria for a single
+// resource that has multiple metric criteria.
type MetricAlertSingleResourceMultipleMetricCriteria struct {
// AllOf - The list of metric criteria for this 'all of' operation.
AllOf *[]MetricCriteria `json:"allOf,omitempty"`
// AdditionalProperties - Unmatched properties from the message are deserialized this collection
AdditionalProperties map[string]interface{} `json:""`
- // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria'
+ // OdataType - Possible values include: 'OdataTypeMetricAlertCriteria', 'OdataTypeMicrosoftAzureMonitorSingleResourceMultipleMetricCriteria', 'OdataTypeMicrosoftAzureMonitorMultipleResourceMultipleMetricCriteria'
OdataType OdataTypeBasicMetricAlertCriteria `json:"odata.type,omitempty"`
}
@@ -3514,6 +3877,11 @@ func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertSing
return &masrmmc, true
}
+// AsMetricAlertMultipleResourceMultipleMetricCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria.
+func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertMultipleResourceMultipleMetricCriteria() (*MetricAlertMultipleResourceMultipleMetricCriteria, bool) {
+ return nil, false
+}
+
// AsMetricAlertCriteria is the BasicMetricAlertCriteria implementation for MetricAlertSingleResourceMultipleMetricCriteria.
func (masrmmc MetricAlertSingleResourceMultipleMetricCriteria) AsMetricAlertCriteria() (*MetricAlertCriteria, bool) {
return nil, false
@@ -3613,8 +3981,8 @@ func (masp MetricAlertStatusProperties) MarshalJSON() ([]byte, error) {
return json.Marshal(objectMap)
}
-// MetricAvailability metric availability specifies the time grain (aggregation interval or frequency) and the
-// retention period for that time grain.
+// MetricAvailability metric availability specifies the time grain (aggregation interval or frequency) and
+// the retention period for that time grain.
type MetricAvailability struct {
// TimeGrain - the time grain specifies the aggregation interval for the metric. Expressed as a duration 'PT1M', 'P1D', etc.
TimeGrain *string `json:"timeGrain,omitempty"`
@@ -3624,20 +3992,177 @@ type MetricAvailability struct {
// MetricCriteria criterion to filter metrics.
type MetricCriteria struct {
+ // Operator - the criteria operator.
+ Operator interface{} `json:"operator,omitempty"`
+ // Threshold - the criteria threshold value that activates the alert.
+ Threshold *float64 `json:"threshold,omitempty"`
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
// Name - Name of the criteria.
Name *string `json:"name,omitempty"`
// MetricName - Name of the metric.
MetricName *string `json:"metricName,omitempty"`
// MetricNamespace - Namespace of the metric.
MetricNamespace *string `json:"metricNamespace,omitempty"`
- // Operator - the criteria operator.
- Operator interface{} `json:"operator,omitempty"`
// TimeAggregation - the criteria time aggregation types.
TimeAggregation interface{} `json:"timeAggregation,omitempty"`
- // Threshold - the criteria threshold value that activates the alert.
- Threshold *float64 `json:"threshold,omitempty"`
// Dimensions - List of dimension conditions.
Dimensions *[]MetricDimension `json:"dimensions,omitempty"`
+ // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion'
+ CriterionType CriterionType `json:"criterionType,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for MetricCriteria.
+func (mc MetricCriteria) MarshalJSON() ([]byte, error) {
+ mc.CriterionType = CriterionTypeStaticThresholdCriterion
+ objectMap := make(map[string]interface{})
+ if mc.Operator != nil {
+ objectMap["operator"] = mc.Operator
+ }
+ if mc.Threshold != nil {
+ objectMap["threshold"] = mc.Threshold
+ }
+ if mc.Name != nil {
+ objectMap["name"] = mc.Name
+ }
+ if mc.MetricName != nil {
+ objectMap["metricName"] = mc.MetricName
+ }
+ if mc.MetricNamespace != nil {
+ objectMap["metricNamespace"] = mc.MetricNamespace
+ }
+ if mc.TimeAggregation != nil {
+ objectMap["timeAggregation"] = mc.TimeAggregation
+ }
+ if mc.Dimensions != nil {
+ objectMap["dimensions"] = mc.Dimensions
+ }
+ if mc.CriterionType != "" {
+ objectMap["criterionType"] = mc.CriterionType
+ }
+ for k, v := range mc.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria.
+func (mc MetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) {
+ return &mc, true
+}
+
+// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria.
+func (mc MetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) {
+ return nil, false
+}
+
+// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria.
+func (mc MetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) {
+ return nil, false
+}
+
+// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MetricCriteria.
+func (mc MetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) {
+ return &mc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for MetricCriteria struct.
+func (mc *MetricCriteria) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "operator":
+ if v != nil {
+ var operator interface{}
+ err = json.Unmarshal(*v, &operator)
+ if err != nil {
+ return err
+ }
+ mc.Operator = operator
+ }
+ case "threshold":
+ if v != nil {
+ var threshold float64
+ err = json.Unmarshal(*v, &threshold)
+ if err != nil {
+ return err
+ }
+ mc.Threshold = &threshold
+ }
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if mc.AdditionalProperties == nil {
+ mc.AdditionalProperties = make(map[string]interface{})
+ }
+ mc.AdditionalProperties[k] = additionalProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mc.Name = &name
+ }
+ case "metricName":
+ if v != nil {
+ var metricName string
+ err = json.Unmarshal(*v, &metricName)
+ if err != nil {
+ return err
+ }
+ mc.MetricName = &metricName
+ }
+ case "metricNamespace":
+ if v != nil {
+ var metricNamespace string
+ err = json.Unmarshal(*v, &metricNamespace)
+ if err != nil {
+ return err
+ }
+ mc.MetricNamespace = &metricNamespace
+ }
+ case "timeAggregation":
+ if v != nil {
+ var timeAggregation interface{}
+ err = json.Unmarshal(*v, &timeAggregation)
+ if err != nil {
+ return err
+ }
+ mc.TimeAggregation = timeAggregation
+ }
+ case "dimensions":
+ if v != nil {
+ var dimensions []MetricDimension
+ err = json.Unmarshal(*v, &dimensions)
+ if err != nil {
+ return err
+ }
+ mc.Dimensions = &dimensions
+ }
+ case "criterionType":
+ if v != nil {
+ var criterionType CriterionType
+ err = json.Unmarshal(*v, &criterionType)
+ if err != nil {
+ return err
+ }
+ mc.CriterionType = criterionType
+ }
+ }
+ }
+
+ return nil
}
// MetricDefinition metric definition class specifies the metadata for a metric.
@@ -3646,7 +4171,7 @@ type MetricDefinition struct {
IsDimensionRequired *bool `json:"isDimensionRequired,omitempty"`
// ResourceID - the resource identifier of the resource that emitted the metric.
ResourceID *string `json:"resourceId,omitempty"`
- // Namespace - the namespace the metric blongs to.
+ // Namespace - the namespace the metric belongs to.
Namespace *string `json:"namespace,omitempty"`
// Name - the name and the display name of the metric, i.e. it is a localizable string.
Name *LocalizableString `json:"name,omitempty"`
@@ -3675,7 +4200,7 @@ type MetricDefinitionCollection struct {
type MetricDimension struct {
// Name - Name of the dimension.
Name *string `json:"name,omitempty"`
- // Operator - the dimension operator.
+ // Operator - the dimension operator. Only 'Include' and 'Exclude' are supported
Operator *string `json:"operator,omitempty"`
// Values - list of dimension values.
Values *[]string `json:"values,omitempty"`
@@ -3729,6 +4254,201 @@ type MetricValue struct {
Count *int64 `json:"count,omitempty"`
}
+// BasicMultiMetricCriteria the types of conditions for a multi resource alert.
+type BasicMultiMetricCriteria interface {
+ AsMetricCriteria() (*MetricCriteria, bool)
+ AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool)
+ AsMultiMetricCriteria() (*MultiMetricCriteria, bool)
+}
+
+// MultiMetricCriteria the types of conditions for a multi resource alert.
+type MultiMetricCriteria struct {
+ // AdditionalProperties - Unmatched properties from the message are deserialized this collection
+ AdditionalProperties map[string]interface{} `json:""`
+ // Name - Name of the criteria.
+ Name *string `json:"name,omitempty"`
+ // MetricName - Name of the metric.
+ MetricName *string `json:"metricName,omitempty"`
+ // MetricNamespace - Namespace of the metric.
+ MetricNamespace *string `json:"metricNamespace,omitempty"`
+ // TimeAggregation - the criteria time aggregation types.
+ TimeAggregation interface{} `json:"timeAggregation,omitempty"`
+ // Dimensions - List of dimension conditions.
+ Dimensions *[]MetricDimension `json:"dimensions,omitempty"`
+ // CriterionType - Possible values include: 'CriterionTypeMultiMetricCriteria', 'CriterionTypeStaticThresholdCriterion', 'CriterionTypeDynamicThresholdCriterion'
+ CriterionType CriterionType `json:"criterionType,omitempty"`
+}
+
+func unmarshalBasicMultiMetricCriteria(body []byte) (BasicMultiMetricCriteria, error) {
+ var m map[string]interface{}
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return nil, err
+ }
+
+ switch m["criterionType"] {
+ case string(CriterionTypeStaticThresholdCriterion):
+ var mc MetricCriteria
+ err := json.Unmarshal(body, &mc)
+ return mc, err
+ case string(CriterionTypeDynamicThresholdCriterion):
+ var dmc DynamicMetricCriteria
+ err := json.Unmarshal(body, &dmc)
+ return dmc, err
+ default:
+ var mmc MultiMetricCriteria
+ err := json.Unmarshal(body, &mmc)
+ return mmc, err
+ }
+}
+func unmarshalBasicMultiMetricCriteriaArray(body []byte) ([]BasicMultiMetricCriteria, error) {
+ var rawMessages []*json.RawMessage
+ err := json.Unmarshal(body, &rawMessages)
+ if err != nil {
+ return nil, err
+ }
+
+ mmcArray := make([]BasicMultiMetricCriteria, len(rawMessages))
+
+ for index, rawMessage := range rawMessages {
+ mmc, err := unmarshalBasicMultiMetricCriteria(*rawMessage)
+ if err != nil {
+ return nil, err
+ }
+ mmcArray[index] = mmc
+ }
+ return mmcArray, nil
+}
+
+// MarshalJSON is the custom marshaler for MultiMetricCriteria.
+func (mmc MultiMetricCriteria) MarshalJSON() ([]byte, error) {
+ mmc.CriterionType = CriterionTypeMultiMetricCriteria
+ objectMap := make(map[string]interface{})
+ if mmc.Name != nil {
+ objectMap["name"] = mmc.Name
+ }
+ if mmc.MetricName != nil {
+ objectMap["metricName"] = mmc.MetricName
+ }
+ if mmc.MetricNamespace != nil {
+ objectMap["metricNamespace"] = mmc.MetricNamespace
+ }
+ if mmc.TimeAggregation != nil {
+ objectMap["timeAggregation"] = mmc.TimeAggregation
+ }
+ if mmc.Dimensions != nil {
+ objectMap["dimensions"] = mmc.Dimensions
+ }
+ if mmc.CriterionType != "" {
+ objectMap["criterionType"] = mmc.CriterionType
+ }
+ for k, v := range mmc.AdditionalProperties {
+ objectMap[k] = v
+ }
+ return json.Marshal(objectMap)
+}
+
+// AsMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria.
+func (mmc MultiMetricCriteria) AsMetricCriteria() (*MetricCriteria, bool) {
+ return nil, false
+}
+
+// AsDynamicMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria.
+func (mmc MultiMetricCriteria) AsDynamicMetricCriteria() (*DynamicMetricCriteria, bool) {
+ return nil, false
+}
+
+// AsMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria.
+func (mmc MultiMetricCriteria) AsMultiMetricCriteria() (*MultiMetricCriteria, bool) {
+ return &mmc, true
+}
+
+// AsBasicMultiMetricCriteria is the BasicMultiMetricCriteria implementation for MultiMetricCriteria.
+func (mmc MultiMetricCriteria) AsBasicMultiMetricCriteria() (BasicMultiMetricCriteria, bool) {
+ return &mmc, true
+}
+
+// UnmarshalJSON is the custom unmarshaler for MultiMetricCriteria struct.
+func (mmc *MultiMetricCriteria) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ default:
+ if v != nil {
+ var additionalProperties interface{}
+ err = json.Unmarshal(*v, &additionalProperties)
+ if err != nil {
+ return err
+ }
+ if mmc.AdditionalProperties == nil {
+ mmc.AdditionalProperties = make(map[string]interface{})
+ }
+ mmc.AdditionalProperties[k] = additionalProperties
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ mmc.Name = &name
+ }
+ case "metricName":
+ if v != nil {
+ var metricName string
+ err = json.Unmarshal(*v, &metricName)
+ if err != nil {
+ return err
+ }
+ mmc.MetricName = &metricName
+ }
+ case "metricNamespace":
+ if v != nil {
+ var metricNamespace string
+ err = json.Unmarshal(*v, &metricNamespace)
+ if err != nil {
+ return err
+ }
+ mmc.MetricNamespace = &metricNamespace
+ }
+ case "timeAggregation":
+ if v != nil {
+ var timeAggregation interface{}
+ err = json.Unmarshal(*v, &timeAggregation)
+ if err != nil {
+ return err
+ }
+ mmc.TimeAggregation = timeAggregation
+ }
+ case "dimensions":
+ if v != nil {
+ var dimensions []MetricDimension
+ err = json.Unmarshal(*v, &dimensions)
+ if err != nil {
+ return err
+ }
+ mmc.Dimensions = &dimensions
+ }
+ case "criterionType":
+ if v != nil {
+ var criterionType CriterionType
+ err = json.Unmarshal(*v, &criterionType)
+ if err != nil {
+ return err
+ }
+ mmc.CriterionType = criterionType
+ }
+ }
+ }
+
+ return nil
+}
+
// Operation microsoft Insights API operation definition.
type Operation struct {
// Name - Operation name: {provider}/{resource}/{operation}
@@ -3759,16 +4479,16 @@ type OperationListResult struct {
// ProxyOnlyResource a proxy only azure resource object
type ProxyOnlyResource struct {
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
}
-// Recurrence the repeating times at which this profile begins. This element is not used if the FixedDate element
-// is used.
+// Recurrence the repeating times at which this profile begins. This element is not used if the FixedDate
+// element is used.
type Recurrence struct {
// Frequency - the recurrence frequency. How often the schedule profile should take effect. This value must be Week, meaning each week will have the same set of profiles. For example, to set a daily schedule, set **schedule** to every day of the week. The frequency property specifies that the schedule is repeated weekly. Possible values include: 'RecurrenceFrequencyNone', 'RecurrenceFrequencySecond', 'RecurrenceFrequencyMinute', 'RecurrenceFrequencyHour', 'RecurrenceFrequencyDay', 'RecurrenceFrequencyWeek', 'RecurrenceFrequencyMonth', 'RecurrenceFrequencyYear'
Frequency RecurrenceFrequency `json:"frequency,omitempty"`
@@ -3778,7 +4498,7 @@ type Recurrence struct {
// RecurrentSchedule the scheduling constraints for when the profile begins.
type RecurrentSchedule struct {
- // TimeZone - the timezone for the hours of the profile. Some examples of valid timezones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time
+ // TimeZone - the timezone for the hours of the profile. Some examples of valid time zones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time
TimeZone *string `json:"timeZone,omitempty"`
// Days - the collection of days that the profile takes effect on. Possible values are Sunday through Saturday.
Days *[]string `json:"days,omitempty"`
@@ -3790,11 +4510,11 @@ type RecurrentSchedule struct {
// Resource an azure resource object
type Resource struct {
- // ID - Azure resource Id
+ // ID - READ-ONLY; Azure resource Id
ID *string `json:"id,omitempty"`
- // Name - Azure resource name
+ // Name - READ-ONLY; Azure resource name
Name *string `json:"name,omitempty"`
- // Type - Azure resource type
+ // Type - READ-ONLY; Azure resource type
Type *string `json:"type,omitempty"`
// Location - Resource location
Location *string `json:"location,omitempty"`
@@ -3805,15 +4525,6 @@ type Resource struct {
// MarshalJSON is the custom marshaler for Resource.
func (r Resource) MarshalJSON() ([]byte, error) {
objectMap := make(map[string]interface{})
- if r.ID != nil {
- objectMap["id"] = r.ID
- }
- if r.Name != nil {
- objectMap["name"] = r.Name
- }
- if r.Type != nil {
- objectMap["type"] = r.Type
- }
if r.Location != nil {
objectMap["location"] = r.Location
}
@@ -3828,7 +4539,7 @@ type Response struct {
autorest.Response `json:"-"`
// Cost - The integer value representing the cost of the query, for data case.
Cost *float64 `json:"cost,omitempty"`
- // Timespan - The timespan for which the data was retrieved. Its value consists of two datatimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested.
+ // Timespan - The timespan for which the data was retrieved. Its value consists of two datetimes concatenated, separated by '/'. This may be adjusted in the future and returned back from what was originally requested.
Timespan *string `json:"timespan,omitempty"`
// Interval - The interval (window size) for which the metric data was returned in. This may be adjusted in the future and returned back from what was originally requested. This is not present if a metadata request was made.
Interval *string `json:"interval,omitempty"`
@@ -4152,8 +4863,8 @@ func (rds RuleDataSource) AsBasicRuleDataSource() (BasicRuleDataSource, bool) {
return &rds, true
}
-// RuleEmailAction specifies the action to send email when the rule condition is evaluated. The discriminator is
-// always RuleEmailAction in this case.
+// RuleEmailAction specifies the action to send email when the rule condition is evaluated. The
+// discriminator is always RuleEmailAction in this case.
type RuleEmailAction struct {
// SendToServiceOwners - Whether the administrators (service and co-administrators) of the service should be notified when the alert is activated.
SendToServiceOwners *bool `json:"sendToServiceOwners,omitempty"`
@@ -4292,8 +5003,8 @@ func (rmeds RuleManagementEventDataSource) AsBasicRuleDataSource() (BasicRuleDat
return &rmeds, true
}
-// RuleMetricDataSource a rule metric data source. The discriminator value is always RuleMetricDataSource in this
-// case.
+// RuleMetricDataSource a rule metric data source. The discriminator value is always RuleMetricDataSource
+// in this case.
type RuleMetricDataSource struct {
// MetricName - the name of the metric that defines what the rule monitors.
MetricName *string `json:"metricName,omitempty"`
@@ -4424,8 +5135,9 @@ type Schedule struct {
TimeWindowInMinutes *int32 `json:"timeWindowInMinutes,omitempty"`
}
-// SenderAuthorization the authorization used by the user who has performed the operation that led to this event.
-// This captures the RBAC properties of the event. These usually include the 'action', 'role' and the 'scope'
+// SenderAuthorization the authorization used by the user who has performed the operation that led to this
+// event. This captures the RBAC properties of the event. These usually include the 'action', 'role' and
+// the 'scope'
type SenderAuthorization struct {
// Action - the permissible actions. For instance: microsoft.support/supporttickets/write
Action *string `json:"action,omitempty"`
@@ -4443,13 +5155,13 @@ type SmsReceiver struct {
CountryCode *string `json:"countryCode,omitempty"`
// PhoneNumber - The phone number of the SMS receiver.
PhoneNumber *string `json:"phoneNumber,omitempty"`
- // Status - The status of the receiver. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled'
+ // Status - READ-ONLY; The status of the receiver. Possible values include: 'ReceiverStatusNotSpecified', 'ReceiverStatusEnabled', 'ReceiverStatusDisabled'
Status ReceiverStatus `json:"status,omitempty"`
}
// Source specifies the log search query.
type Source struct {
- // Query - Log search query.
+ // Query - Log search query. Required for action type - AlertingAction
Query *string `json:"query,omitempty"`
// AuthorizedResources - List of Resource referred into query
AuthorizedResources *[]string `json:"authorizedResources,omitempty"`
@@ -4611,7 +5323,7 @@ type TimeSeriesInformation struct {
// TimeWindow a specific date-time for the profile.
type TimeWindow struct {
- // TimeZone - the timezone of the start and end times for the profile. Some examples of valid timezones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time
+ // TimeZone - the timezone of the start and end times for the profile. Some examples of valid time zones are: Dateline Standard Time, UTC-11, Hawaiian Standard Time, Alaskan Standard Time, Pacific Standard Time (Mexico), Pacific Standard Time, US Mountain Standard Time, Mountain Standard Time (Mexico), Mountain Standard Time, Central America Standard Time, Central Standard Time, Central Standard Time (Mexico), Canada Central Standard Time, SA Pacific Standard Time, Eastern Standard Time, US Eastern Standard Time, Venezuela Standard Time, Paraguay Standard Time, Atlantic Standard Time, Central Brazilian Standard Time, SA Western Standard Time, Pacific SA Standard Time, Newfoundland Standard Time, E. South America Standard Time, Argentina Standard Time, SA Eastern Standard Time, Greenland Standard Time, Montevideo Standard Time, Bahia Standard Time, UTC-02, Mid-Atlantic Standard Time, Azores Standard Time, Cape Verde Standard Time, Morocco Standard Time, UTC, GMT Standard Time, Greenwich Standard Time, W. Europe Standard Time, Central Europe Standard Time, Romance Standard Time, Central European Standard Time, W. Central Africa Standard Time, Namibia Standard Time, Jordan Standard Time, GTB Standard Time, Middle East Standard Time, Egypt Standard Time, Syria Standard Time, E. Europe Standard Time, South Africa Standard Time, FLE Standard Time, Turkey Standard Time, Israel Standard Time, Kaliningrad Standard Time, Libya Standard Time, Arabic Standard Time, Arab Standard Time, Belarus Standard Time, Russian Standard Time, E. Africa Standard Time, Iran Standard Time, Arabian Standard Time, Azerbaijan Standard Time, Russia Time Zone 3, Mauritius Standard Time, Georgian Standard Time, Caucasus Standard Time, Afghanistan Standard Time, West Asia Standard Time, Ekaterinburg Standard Time, Pakistan Standard Time, India Standard Time, Sri Lanka Standard Time, Nepal Standard Time, Central Asia Standard Time, Bangladesh Standard Time, N. Central Asia Standard Time, Myanmar Standard Time, SE Asia Standard Time, North Asia Standard Time, China Standard Time, North Asia East Standard Time, Singapore Standard Time, W. Australia Standard Time, Taipei Standard Time, Ulaanbaatar Standard Time, Tokyo Standard Time, Korea Standard Time, Yakutsk Standard Time, Cen. Australia Standard Time, AUS Central Standard Time, E. Australia Standard Time, AUS Eastern Standard Time, West Pacific Standard Time, Tasmania Standard Time, Magadan Standard Time, Vladivostok Standard Time, Russia Time Zone 10, Central Pacific Standard Time, Russia Time Zone 11, New Zealand Standard Time, UTC+12, Fiji Standard Time, Kamchatka Standard Time, Tonga Standard Time, Samoa Standard Time, Line Islands Standard Time
TimeZone *string `json:"timeZone,omitempty"`
// Start - the start time for the profile in ISO 8601 format.
Start *date.Time `json:"start,omitempty"`
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go
index bcfbfcf9..2d0faf13 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/operations.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -41,6 +42,16 @@ func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) Opera
// List lists all of the available operations from Microsoft.Insights provider.
func (client OperationsClient) List(ctx context.Context) (result OperationListResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListPreparer(ctx)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.OperationsClient", "List", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go
index d8cc7649..67495b64 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/scheduledqueryrules.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -46,14 +47,22 @@ func NewScheduledQueryRulesClientWithBaseURI(baseURI string, subscriptionID stri
// ruleName - the name of the rule.
// parameters - the parameters of the rule to create or update.
func (client ScheduledQueryRulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResource) (result LogSearchRuleResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.LogSearchRule", Name: validation.Null, Rule: true,
Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source", Name: validation.Null, Rule: true,
- Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source.Query", Name: validation.Null, Rule: true, Chain: nil},
- {Target: "parameters.LogSearchRule.Source.DataSourceID", Name: validation.Null, Rule: true, Chain: nil},
- }},
- {Target: "parameters.LogSearchRule.Schedule", Name: validation.Null, Rule: true,
+ Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Source.DataSourceID", Name: validation.Null, Rule: true, Chain: nil}}},
+ {Target: "parameters.LogSearchRule.Schedule", Name: validation.Null, Rule: false,
Chain: []validation.Constraint{{Target: "parameters.LogSearchRule.Schedule.FrequencyInMinutes", Name: validation.Null, Rule: true, Chain: nil},
{Target: "parameters.LogSearchRule.Schedule.TimeWindowInMinutes", Name: validation.Null, Rule: true, Chain: nil},
}},
@@ -131,6 +140,16 @@ func (client ScheduledQueryRulesClient) CreateOrUpdateResponder(resp *http.Respo
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client ScheduledQueryRulesClient) Delete(ctx context.Context, resourceGroupName string, ruleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.DeletePreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Delete", nil, "Failure preparing request")
@@ -197,6 +216,16 @@ func (client ScheduledQueryRulesClient) DeleteResponder(resp *http.Response) (re
// resourceGroupName - the name of the resource group.
// ruleName - the name of the rule.
func (client ScheduledQueryRulesClient) Get(ctx context.Context, resourceGroupName string, ruleName string) (result LogSearchRuleResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.GetPreparer(ctx, resourceGroupName, ruleName)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Get", nil, "Failure preparing request")
@@ -265,6 +294,16 @@ func (client ScheduledQueryRulesClient) GetResponder(resp *http.Response) (resul
// filter - the filter to apply on the operation. For more information please see
// https://msdn.microsoft.com/en-us/library/azure/dn931934.aspx
func (client ScheduledQueryRulesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string, filter string) (result LogSearchRuleResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListByResourceGroupPreparer(ctx, resourceGroupName, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListByResourceGroup", nil, "Failure preparing request")
@@ -334,6 +373,16 @@ func (client ScheduledQueryRulesClient) ListByResourceGroupResponder(resp *http.
// filter - the filter to apply on the operation. For more information please see
// https://msdn.microsoft.com/en-us/library/azure/dn931934.aspx
func (client ScheduledQueryRulesClient) ListBySubscription(ctx context.Context, filter string) (result LogSearchRuleResourceCollection, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.ListBySubscription")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.ListBySubscriptionPreparer(ctx, filter)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "ListBySubscription", nil, "Failure preparing request")
@@ -403,6 +452,16 @@ func (client ScheduledQueryRulesClient) ListBySubscriptionResponder(resp *http.R
// ruleName - the name of the rule.
// parameters - the parameters of the rule to update.
func (client ScheduledQueryRulesClient) Update(ctx context.Context, resourceGroupName string, ruleName string, parameters LogSearchRuleResourcePatch) (result LogSearchRuleResource, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ScheduledQueryRulesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
req, err := client.UpdatePreparer(ctx, resourceGroupName, ruleName, parameters)
if err != nil {
err = autorest.NewErrorWithError(err, "insights.ScheduledQueryRulesClient", "Update", nil, "Failure preparing request")
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go
index 78a629f7..14e2c2b1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/preview/monitor/mgmt/2018-03-01/insights/tenantactivitylogs.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -62,6 +63,16 @@ func NewTenantActivityLogsClientWithBaseURI(baseURI string, subscriptionID strin
// *operationId*, *operationName*, *properties*, *resourceGroupName*, *resourceProviderName*, *resourceId*,
// *status*, *submissionTimestamp*, *subStatus*, *subscriptionId*
func (client TenantActivityLogsClient) List(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TenantActivityLogsClient.List")
+ defer func() {
+ sc := -1
+ if result.edc.Response.Response != nil {
+ sc = result.edc.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, filter, selectParameter)
if err != nil {
@@ -126,8 +137,8 @@ func (client TenantActivityLogsClient) ListResponder(resp *http.Response) (resul
}
// listNextResults retrieves the next set of results, if any.
-func (client TenantActivityLogsClient) listNextResults(lastResults EventDataCollection) (result EventDataCollection, err error) {
- req, err := lastResults.eventDataCollectionPreparer()
+func (client TenantActivityLogsClient) listNextResults(ctx context.Context, lastResults EventDataCollection) (result EventDataCollection, err error) {
+ req, err := lastResults.eventDataCollectionPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "insights.TenantActivityLogsClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -148,6 +159,16 @@ func (client TenantActivityLogsClient) listNextResults(lastResults EventDataColl
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client TenantActivityLogsClient) ListComplete(ctx context.Context, filter string, selectParameter string) (result EventDataCollectionIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TenantActivityLogsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx, filter, selectParameter)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go
index 5c5ea8b8..f96329e6 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/disasterrecoveryconfigs.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -47,6 +48,16 @@ func NewDisasterRecoveryConfigsClientWithBaseURI(baseURI string, subscriptionID
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
func (client DisasterRecoveryConfigsClient) BreakPairing(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.BreakPairing")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -128,6 +139,16 @@ func (client DisasterRecoveryConfigsClient) BreakPairingResponder(resp *http.Res
// namespaceName - the namespace name
// parameters - parameters to check availability of the given namespace name
func (client DisasterRecoveryConfigsClient) CheckNameAvailabilityMethod(ctx context.Context, resourceGroupName string, namespaceName string, parameters CheckNameAvailability) (result CheckNameAvailabilityResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.CheckNameAvailabilityMethod")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -211,6 +232,16 @@ func (client DisasterRecoveryConfigsClient) CheckNameAvailabilityMethodResponder
// alias - the Disaster Recovery configuration name
// parameters - parameters required to create an Alias(Disaster Recovery configuration)
func (client DisasterRecoveryConfigsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, alias string, parameters ArmDisasterRecovery) (result ArmDisasterRecovery, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -295,6 +326,16 @@ func (client DisasterRecoveryConfigsClient) CreateOrUpdateResponder(resp *http.R
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
func (client DisasterRecoveryConfigsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -370,12 +411,22 @@ func (client DisasterRecoveryConfigsClient) DeleteResponder(resp *http.Response)
return
}
-// FailOver envokes GEO DR failover and reconfigure the alias to point to the secondary namespace
+// FailOver invokes GEO DR failover and reconfigure the alias to point to the secondary namespace
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
func (client DisasterRecoveryConfigsClient) FailOver(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.FailOver")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -457,6 +508,16 @@ func (client DisasterRecoveryConfigsClient) FailOverResponder(resp *http.Respons
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
func (client DisasterRecoveryConfigsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result ArmDisasterRecovery, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -538,8 +599,18 @@ func (client DisasterRecoveryConfigsClient) GetResponder(resp *http.Response) (r
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client DisasterRecoveryConfigsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.GetAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -625,6 +696,16 @@ func (client DisasterRecoveryConfigsClient) GetAuthorizationRuleResponder(resp *
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client DisasterRecoveryConfigsClient) List(ctx context.Context, resourceGroupName string, namespaceName string) (result ArmDisasterRecoveryListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.List")
+ defer func() {
+ sc := -1
+ if result.adrlr.Response.Response != nil {
+ sc = result.adrlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -699,8 +780,8 @@ func (client DisasterRecoveryConfigsClient) ListResponder(resp *http.Response) (
}
// listNextResults retrieves the next set of results, if any.
-func (client DisasterRecoveryConfigsClient) listNextResults(lastResults ArmDisasterRecoveryListResult) (result ArmDisasterRecoveryListResult, err error) {
- req, err := lastResults.armDisasterRecoveryListResultPreparer()
+func (client DisasterRecoveryConfigsClient) listNextResults(ctx context.Context, lastResults ArmDisasterRecoveryListResult) (result ArmDisasterRecoveryListResult, err error) {
+ req, err := lastResults.armDisasterRecoveryListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.DisasterRecoveryConfigsClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -721,6 +802,16 @@ func (client DisasterRecoveryConfigsClient) listNextResults(lastResults ArmDisas
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client DisasterRecoveryConfigsClient) ListComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result ArmDisasterRecoveryListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx, resourceGroupName, namespaceName)
return
}
@@ -731,6 +822,16 @@ func (client DisasterRecoveryConfigsClient) ListComplete(ctx context.Context, re
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
func (client DisasterRecoveryConfigsClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result SBAuthorizationRuleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.sarlr.Response.Response != nil {
+ sc = result.sarlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -809,8 +910,8 @@ func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesResponder(resp
}
// listAuthorizationRulesNextResults retrieves the next set of results, if any.
-func (client DisasterRecoveryConfigsClient) listAuthorizationRulesNextResults(lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
- req, err := lastResults.sBAuthorizationRuleListResultPreparer()
+func (client DisasterRecoveryConfigsClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
+ req, err := lastResults.sBAuthorizationRuleListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.DisasterRecoveryConfigsClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request")
}
@@ -831,6 +932,16 @@ func (client DisasterRecoveryConfigsClient) listAuthorizationRulesNextResults(la
// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required.
func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string, alias string) (result SBAuthorizationRuleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName, alias)
return
}
@@ -840,8 +951,18 @@ func (client DisasterRecoveryConfigsClient) ListAuthorizationRulesComplete(ctx c
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// alias - the Disaster Recovery configuration name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client DisasterRecoveryConfigsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, alias string, authorizationRuleName string) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/DisasterRecoveryConfigsClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/eventhubs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/eventhubs.go
index c0b9ea63..7eab9951 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/eventhubs.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/eventhubs.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -45,6 +46,16 @@ func NewEventHubsClientWithBaseURI(baseURI string, subscriptionID string) EventH
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client EventHubsClient) ListByNamespace(ctx context.Context, resourceGroupName string, namespaceName string) (result EventHubListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListByNamespace")
+ defer func() {
+ sc := -1
+ if result.ehlr.Response.Response != nil {
+ sc = result.ehlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -119,8 +130,8 @@ func (client EventHubsClient) ListByNamespaceResponder(resp *http.Response) (res
}
// listByNamespaceNextResults retrieves the next set of results, if any.
-func (client EventHubsClient) listByNamespaceNextResults(lastResults EventHubListResult) (result EventHubListResult, err error) {
- req, err := lastResults.eventHubListResultPreparer()
+func (client EventHubsClient) listByNamespaceNextResults(ctx context.Context, lastResults EventHubListResult) (result EventHubListResult, err error) {
+ req, err := lastResults.eventHubListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.EventHubsClient", "listByNamespaceNextResults", nil, "Failure preparing next results request")
}
@@ -141,6 +152,16 @@ func (client EventHubsClient) listByNamespaceNextResults(lastResults EventHubLis
// ListByNamespaceComplete enumerates all values, automatically crossing page boundaries as required.
func (client EventHubsClient) ListByNamespaceComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result EventHubListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventHubsClient.ListByNamespace")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListByNamespace(ctx, resourceGroupName, namespaceName)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go
index 0849340f..208f2c86 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/migrationconfigs.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -41,12 +42,22 @@ func NewMigrationConfigsClientWithBaseURI(baseURI string, subscriptionID string)
}
// CompleteMigration this operation Completes Migration of entities by pointing the connection strings to Premium
-// namespace and any enties created after the operation will be under Premium Namespace. CompleteMigration operation
+// namespace and any entities created after the operation will be under Premium Namespace. CompleteMigration operation
// will fail when entity migration is in-progress.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client MigrationConfigsClient) CompleteMigration(ctx context.Context, resourceGroupName string, namespaceName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.CompleteMigration")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -119,13 +130,23 @@ func (client MigrationConfigsClient) CompleteMigrationResponder(resp *http.Respo
return
}
-// CreateAndStartMigration creates Migration configuration and starts migration of enties from Standard to Premium
+// CreateAndStartMigration creates Migration configuration and starts migration of entities from Standard to Premium
// namespace
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// parameters - parameters required to create Migration Configuration
func (client MigrationConfigsClient) CreateAndStartMigration(ctx context.Context, resourceGroupName string, namespaceName string, parameters MigrationConfigProperties) (result MigrationConfigsCreateAndStartMigrationFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.CreateAndStartMigration")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -189,10 +210,6 @@ func (client MigrationConfigsClient) CreateAndStartMigrationSender(req *http.Req
if err != nil {
return
}
- err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated))
- if err != nil {
- return
- }
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@@ -215,6 +232,16 @@ func (client MigrationConfigsClient) CreateAndStartMigrationResponder(resp *http
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client MigrationConfigsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -292,6 +319,16 @@ func (client MigrationConfigsClient) DeleteResponder(resp *http.Response) (resul
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client MigrationConfigsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string) (result MigrationConfigProperties, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -370,6 +407,16 @@ func (client MigrationConfigsClient) GetResponder(resp *http.Response) (result M
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client MigrationConfigsClient) List(ctx context.Context, resourceGroupName string, namespaceName string) (result MigrationConfigListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.List")
+ defer func() {
+ sc := -1
+ if result.mclr.Response.Response != nil {
+ sc = result.mclr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -444,8 +491,8 @@ func (client MigrationConfigsClient) ListResponder(resp *http.Response) (result
}
// listNextResults retrieves the next set of results, if any.
-func (client MigrationConfigsClient) listNextResults(lastResults MigrationConfigListResult) (result MigrationConfigListResult, err error) {
- req, err := lastResults.migrationConfigListResultPreparer()
+func (client MigrationConfigsClient) listNextResults(ctx context.Context, lastResults MigrationConfigListResult) (result MigrationConfigListResult, err error) {
+ req, err := lastResults.migrationConfigListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.MigrationConfigsClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -466,6 +513,16 @@ func (client MigrationConfigsClient) listNextResults(lastResults MigrationConfig
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client MigrationConfigsClient) ListComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result MigrationConfigListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx, resourceGroupName, namespaceName)
return
}
@@ -475,6 +532,16 @@ func (client MigrationConfigsClient) ListComplete(ctx context.Context, resourceG
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client MigrationConfigsClient) Revert(ctx context.Context, resourceGroupName string, namespaceName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigsClient.Revert")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go
index be823f3a..8af2acb1 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/models.go
@@ -18,14 +18,19 @@ package servicebus
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
+ "context"
"encoding/json"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/date"
"github.com/Azure/go-autorest/autorest/to"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
+// The package's fully qualified name.
+const fqdn = "github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus"
+
// AccessRights enumerates the values for access rights.
type AccessRights string
@@ -43,6 +48,21 @@ func PossibleAccessRightsValues() []AccessRights {
return []AccessRights{Listen, Manage, Send}
}
+// DefaultAction enumerates the values for default action.
+type DefaultAction string
+
+const (
+ // Allow ...
+ Allow DefaultAction = "Allow"
+ // Deny ...
+ Deny DefaultAction = "Deny"
+)
+
+// PossibleDefaultActionValues returns an array of possible values for the DefaultAction const type.
+func PossibleDefaultActionValues() []DefaultAction {
+ return []DefaultAction{Allow, Deny}
+}
+
// EncodingCaptureDescription enumerates the values for encoding capture description.
type EncodingCaptureDescription string
@@ -102,6 +122,21 @@ func PossibleFilterTypeValues() []FilterType {
return []FilterType{FilterTypeCorrelationFilter, FilterTypeSQLFilter}
}
+// IPAction enumerates the values for ip action.
+type IPAction string
+
+const (
+ // Accept ...
+ Accept IPAction = "Accept"
+ // Reject ...
+ Reject IPAction = "Reject"
+)
+
+// PossibleIPActionValues returns an array of possible values for the IPAction const type.
+func PossibleIPActionValues() []IPAction {
+ return []IPAction{Accept, Reject}
+}
+
// KeyType enumerates the values for key type.
type KeyType string
@@ -117,6 +152,40 @@ func PossibleKeyTypeValues() []KeyType {
return []KeyType{PrimaryKey, SecondaryKey}
}
+// NameSpaceType enumerates the values for name space type.
+type NameSpaceType string
+
+const (
+ // EventHub ...
+ EventHub NameSpaceType = "EventHub"
+ // Messaging ...
+ Messaging NameSpaceType = "Messaging"
+ // Mixed ...
+ Mixed NameSpaceType = "Mixed"
+ // NotificationHub ...
+ NotificationHub NameSpaceType = "NotificationHub"
+ // Relay ...
+ Relay NameSpaceType = "Relay"
+)
+
+// PossibleNameSpaceTypeValues returns an array of possible values for the NameSpaceType const type.
+func PossibleNameSpaceTypeValues() []NameSpaceType {
+ return []NameSpaceType{EventHub, Messaging, Mixed, NotificationHub, Relay}
+}
+
+// NetworkRuleIPAction enumerates the values for network rule ip action.
+type NetworkRuleIPAction string
+
+const (
+ // NetworkRuleIPActionAllow ...
+ NetworkRuleIPActionAllow NetworkRuleIPAction = "Allow"
+)
+
+// PossibleNetworkRuleIPActionValues returns an array of possible values for the NetworkRuleIPAction const type.
+func PossibleNetworkRuleIPActionValues() []NetworkRuleIPAction {
+ return []NetworkRuleIPAction{NetworkRuleIPActionAllow}
+}
+
// ProvisioningStateDR enumerates the values for provisioning state dr.
type ProvisioningStateDR string
@@ -211,24 +280,24 @@ func PossibleUnavailableReasonValues() []UnavailableReason {
// AccessKeys namespace/ServiceBus Connection String
type AccessKeys struct {
autorest.Response `json:"-"`
- // PrimaryConnectionString - Primary connection string of the created namespace authorization rule.
+ // PrimaryConnectionString - READ-ONLY; Primary connection string of the created namespace authorization rule.
PrimaryConnectionString *string `json:"primaryConnectionString,omitempty"`
- // SecondaryConnectionString - Secondary connection string of the created namespace authorization rule.
+ // SecondaryConnectionString - READ-ONLY; Secondary connection string of the created namespace authorization rule.
SecondaryConnectionString *string `json:"secondaryConnectionString,omitempty"`
- // AliasPrimaryConnectionString - Primary connection string of the alias if GEO DR is enabled
+ // AliasPrimaryConnectionString - READ-ONLY; Primary connection string of the alias if GEO DR is enabled
AliasPrimaryConnectionString *string `json:"aliasPrimaryConnectionString,omitempty"`
- // AliasSecondaryConnectionString - Secondary connection string of the alias if GEO DR is enabled
+ // AliasSecondaryConnectionString - READ-ONLY; Secondary connection string of the alias if GEO DR is enabled
AliasSecondaryConnectionString *string `json:"aliasSecondaryConnectionString,omitempty"`
- // PrimaryKey - A base64-encoded 256-bit primary key for signing and validating the SAS token.
+ // PrimaryKey - READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token.
PrimaryKey *string `json:"primaryKey,omitempty"`
- // SecondaryKey - A base64-encoded 256-bit primary key for signing and validating the SAS token.
+ // SecondaryKey - READ-ONLY; A base64-encoded 256-bit primary key for signing and validating the SAS token.
SecondaryKey *string `json:"secondaryKey,omitempty"`
- // KeyName - A string that describes the authorization rule.
+ // KeyName - READ-ONLY; A string that describes the authorization rule.
KeyName *string `json:"keyName,omitempty"`
}
-// Action represents the filter actions which are allowed for the transformation of a message that have been
-// matched by a filter expression.
+// Action represents the filter actions which are allowed for the transformation of a message that have
+// been matched by a filter expression.
type Action struct {
// SQLExpression - SQL expression. e.g. MyProperty='ABC'
SQLExpression *string `json:"sqlExpression,omitempty"`
@@ -243,11 +312,11 @@ type ArmDisasterRecovery struct {
autorest.Response `json:"-"`
// ArmDisasterRecoveryProperties - Properties required to the Create Or Update Alias(Disaster Recovery configurations)
*ArmDisasterRecoveryProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -257,15 +326,6 @@ func (adr ArmDisasterRecovery) MarshalJSON() ([]byte, error) {
if adr.ArmDisasterRecoveryProperties != nil {
objectMap["properties"] = adr.ArmDisasterRecoveryProperties
}
- if adr.ID != nil {
- objectMap["id"] = adr.ID
- }
- if adr.Name != nil {
- objectMap["name"] = adr.Name
- }
- if adr.Type != nil {
- objectMap["type"] = adr.Type
- }
return json.Marshal(objectMap)
}
@@ -325,24 +385,35 @@ type ArmDisasterRecoveryListResult struct {
autorest.Response `json:"-"`
// Value - List of Alias(Disaster Recovery configurations)
Value *[]ArmDisasterRecovery `json:"value,omitempty"`
- // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of Alias(Disaster Recovery configuration)
+ // NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of Alias(Disaster Recovery configuration)
NextLink *string `json:"nextLink,omitempty"`
}
-// ArmDisasterRecoveryListResultIterator provides access to a complete listing of ArmDisasterRecovery values.
+// ArmDisasterRecoveryListResultIterator provides access to a complete listing of ArmDisasterRecovery
+// values.
type ArmDisasterRecoveryListResultIterator struct {
i int
page ArmDisasterRecoveryListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *ArmDisasterRecoveryListResultIterator) Next() error {
+func (iter *ArmDisasterRecoveryListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ArmDisasterRecoveryListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -351,6 +422,13 @@ func (iter *ArmDisasterRecoveryListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *ArmDisasterRecoveryListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter ArmDisasterRecoveryListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -370,6 +448,11 @@ func (iter ArmDisasterRecoveryListResultIterator) Value() ArmDisasterRecovery {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the ArmDisasterRecoveryListResultIterator type.
+func NewArmDisasterRecoveryListResultIterator(page ArmDisasterRecoveryListResultPage) ArmDisasterRecoveryListResultIterator {
+ return ArmDisasterRecoveryListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (adrlr ArmDisasterRecoveryListResult) IsEmpty() bool {
return adrlr.Value == nil || len(*adrlr.Value) == 0
@@ -377,11 +460,11 @@ func (adrlr ArmDisasterRecoveryListResult) IsEmpty() bool {
// armDisasterRecoveryListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (adrlr ArmDisasterRecoveryListResult) armDisasterRecoveryListResultPreparer() (*http.Request, error) {
+func (adrlr ArmDisasterRecoveryListResult) armDisasterRecoveryListResultPreparer(ctx context.Context) (*http.Request, error) {
if adrlr.NextLink == nil || len(to.String(adrlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(adrlr.NextLink)))
@@ -389,14 +472,24 @@ func (adrlr ArmDisasterRecoveryListResult) armDisasterRecoveryListResultPreparer
// ArmDisasterRecoveryListResultPage contains a page of ArmDisasterRecovery values.
type ArmDisasterRecoveryListResultPage struct {
- fn func(ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error)
+ fn func(context.Context, ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error)
adrlr ArmDisasterRecoveryListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *ArmDisasterRecoveryListResultPage) Next() error {
- next, err := page.fn(page.adrlr)
+func (page *ArmDisasterRecoveryListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/ArmDisasterRecoveryListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.adrlr)
if err != nil {
return err
}
@@ -404,6 +497,13 @@ func (page *ArmDisasterRecoveryListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *ArmDisasterRecoveryListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page ArmDisasterRecoveryListResultPage) NotDone() bool {
return !page.adrlr.IsEmpty()
@@ -422,18 +522,23 @@ func (page ArmDisasterRecoveryListResultPage) Values() []ArmDisasterRecovery {
return *page.adrlr.Value
}
+// Creates a new instance of the ArmDisasterRecoveryListResultPage type.
+func NewArmDisasterRecoveryListResultPage(getNextPage func(context.Context, ArmDisasterRecoveryListResult) (ArmDisasterRecoveryListResult, error)) ArmDisasterRecoveryListResultPage {
+ return ArmDisasterRecoveryListResultPage{fn: getNextPage}
+}
+
// ArmDisasterRecoveryProperties properties required to the Create Or Update Alias(Disaster Recovery
// configurations)
type ArmDisasterRecoveryProperties struct {
- // ProvisioningState - Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted' or 'Succeeded' or 'Failed'. Possible values include: 'Accepted', 'Succeeded', 'Failed'
+ // ProvisioningState - READ-ONLY; Provisioning state of the Alias(Disaster Recovery configuration) - possible values 'Accepted' or 'Succeeded' or 'Failed'. Possible values include: 'Accepted', 'Succeeded', 'Failed'
ProvisioningState ProvisioningStateDR `json:"provisioningState,omitempty"`
- // PendingReplicationOperationsCount - Number of entities pending to be replicated.
+ // PendingReplicationOperationsCount - READ-ONLY; Number of entities pending to be replicated.
PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"`
- // PartnerNamespace - ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairning
+ // PartnerNamespace - ARM Id of the Primary/Secondary eventhub namespace name, which is part of GEO DR pairing
PartnerNamespace *string `json:"partnerNamespace,omitempty"`
- // AlternateName - Primary/Secondary eventhub namespace name, which is part of GEO DR pairning
+ // AlternateName - Primary/Secondary eventhub namespace name, which is part of GEO DR pairing
AlternateName *string `json:"alternateName,omitempty"`
- // Role - role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary'. Possible values include: 'Primary', 'PrimaryNotReplicating', 'Secondary'
+ // Role - READ-ONLY; role of namespace in GEO DR - possible values 'Primary' or 'PrimaryNotReplicating' or 'Secondary'. Possible values include: 'Primary', 'PrimaryNotReplicating', 'Secondary'
Role RoleDisasterRecovery `json:"role,omitempty"`
}
@@ -459,14 +564,14 @@ type CaptureDescription struct {
// CheckNameAvailability description of a Check Name availability request properties.
type CheckNameAvailability struct {
- // Name - The Name to check the namespce name availability and The namespace name can contain only letters, numbers, and hyphens. The namespace must start with a letter, and it must end with a letter or number.
+ // Name - The Name to check the namespace name availability and The namespace name can contain only letters, numbers, and hyphens. The namespace must start with a letter, and it must end with a letter or number.
Name *string `json:"name,omitempty"`
}
// CheckNameAvailabilityResult description of a Check Name availability request properties.
type CheckNameAvailabilityResult struct {
autorest.Response `json:"-"`
- // Message - The detailed info regarding the reason associated with the namespace.
+ // Message - READ-ONLY; The detailed info regarding the reason associated with the namespace.
Message *string `json:"message,omitempty"`
// NameAvailable - Value indicating namespace is availability, true if the namespace is available; otherwise, false.
NameAvailable *bool `json:"nameAvailable,omitempty"`
@@ -538,7 +643,7 @@ func (cf CorrelationFilter) MarshalJSON() ([]byte, error) {
type Destination struct {
// Name - Name for capture destination
Name *string `json:"name,omitempty"`
- // DestinationProperties - Properties describing the storage account, blob container and acrchive name format for capture destination
+ // DestinationProperties - Properties describing the storage account, blob container and archive name format for capture destination
*DestinationProperties `json:"properties,omitempty"`
}
@@ -587,8 +692,8 @@ func (d *Destination) UnmarshalJSON(body []byte) error {
return nil
}
-// DestinationProperties properties describing the storage account, blob container and acrchive name format for
-// capture destination
+// DestinationProperties properties describing the storage account, blob container and archive name format
+// for capture destination
type DestinationProperties struct {
// StorageAccountResourceID - Resource id of the storage account to be used to create the blobs
StorageAccountResourceID *string `json:"storageAccountResourceId,omitempty"`
@@ -598,8 +703,8 @@ type DestinationProperties struct {
ArchiveNameFormat *string `json:"archiveNameFormat,omitempty"`
}
-// ErrorResponse error reponse indicates ServiceBus service is not able to process the incoming request. The reason
-// is provided in the error message.
+// ErrorResponse error response indicates ServiceBus service is not able to process the incoming request.
+// The reason is provided in the error message.
type ErrorResponse struct {
// Code - Error code.
Code *string `json:"code,omitempty"`
@@ -611,11 +716,11 @@ type ErrorResponse struct {
type Eventhub struct {
// EventhubProperties - Properties supplied to the Create Or Update Event Hub operation.
*EventhubProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -625,15 +730,6 @@ func (e Eventhub) MarshalJSON() ([]byte, error) {
if e.EventhubProperties != nil {
objectMap["properties"] = e.EventhubProperties
}
- if e.ID != nil {
- objectMap["id"] = e.ID
- }
- if e.Name != nil {
- objectMap["name"] = e.Name
- }
- if e.Type != nil {
- objectMap["type"] = e.Type
- }
return json.Marshal(objectMap)
}
@@ -693,7 +789,7 @@ type EventHubListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List EventHubs operation.
Value *[]Eventhub `json:"value,omitempty"`
- // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of EventHubs.
+ // NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of EventHubs.
NextLink *string `json:"nextLink,omitempty"`
}
@@ -703,14 +799,24 @@ type EventHubListResultIterator struct {
page EventHubListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *EventHubListResultIterator) Next() error {
+func (iter *EventHubListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventHubListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -719,6 +825,13 @@ func (iter *EventHubListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *EventHubListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter EventHubListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -738,6 +851,11 @@ func (iter EventHubListResultIterator) Value() Eventhub {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the EventHubListResultIterator type.
+func NewEventHubListResultIterator(page EventHubListResultPage) EventHubListResultIterator {
+ return EventHubListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (ehlr EventHubListResult) IsEmpty() bool {
return ehlr.Value == nil || len(*ehlr.Value) == 0
@@ -745,11 +863,11 @@ func (ehlr EventHubListResult) IsEmpty() bool {
// eventHubListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (ehlr EventHubListResult) eventHubListResultPreparer() (*http.Request, error) {
+func (ehlr EventHubListResult) eventHubListResultPreparer(ctx context.Context) (*http.Request, error) {
if ehlr.NextLink == nil || len(to.String(ehlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(ehlr.NextLink)))
@@ -757,14 +875,24 @@ func (ehlr EventHubListResult) eventHubListResultPreparer() (*http.Request, erro
// EventHubListResultPage contains a page of Eventhub values.
type EventHubListResultPage struct {
- fn func(EventHubListResult) (EventHubListResult, error)
+ fn func(context.Context, EventHubListResult) (EventHubListResult, error)
ehlr EventHubListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *EventHubListResultPage) Next() error {
- next, err := page.fn(page.ehlr)
+func (page *EventHubListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/EventHubListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.ehlr)
if err != nil {
return err
}
@@ -772,6 +900,13 @@ func (page *EventHubListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *EventHubListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page EventHubListResultPage) NotDone() bool {
return !page.ehlr.IsEmpty()
@@ -790,13 +925,18 @@ func (page EventHubListResultPage) Values() []Eventhub {
return *page.ehlr.Value
}
+// Creates a new instance of the EventHubListResultPage type.
+func NewEventHubListResultPage(getNextPage func(context.Context, EventHubListResult) (EventHubListResult, error)) EventHubListResultPage {
+ return EventHubListResultPage{fn: getNextPage}
+}
+
// EventhubProperties properties supplied to the Create Or Update Event Hub operation.
type EventhubProperties struct {
- // PartitionIds - Current number of shards on the Event Hub.
+ // PartitionIds - READ-ONLY; Current number of shards on the Event Hub.
PartitionIds *[]string `json:"partitionIds,omitempty"`
- // CreatedAt - Exact time the Event Hub was created.
+ // CreatedAt - READ-ONLY; Exact time the Event Hub was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
- // UpdatedAt - The exact time the message was updated.
+ // UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
// MessageRetentionInDays - Number of days to retain the events for this Event Hub, value should be 1 to 7 days
MessageRetentionInDays *int64 `json:"messageRetentionInDays,omitempty"`
@@ -808,17 +948,211 @@ type EventhubProperties struct {
CaptureDescription *CaptureDescription `json:"captureDescription,omitempty"`
}
+// IPFilterRule single item in a List or Get IpFilterRules operation
+type IPFilterRule struct {
+ autorest.Response `json:"-"`
+ // IPFilterRuleProperties - Properties supplied to create or update IpFilterRules
+ *IPFilterRuleProperties `json:"properties,omitempty"`
+ // ID - Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - Resource type
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for IPFilterRule.
+func (ifr IPFilterRule) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if ifr.IPFilterRuleProperties != nil {
+ objectMap["properties"] = ifr.IPFilterRuleProperties
+ }
+ if ifr.ID != nil {
+ objectMap["id"] = ifr.ID
+ }
+ if ifr.Name != nil {
+ objectMap["name"] = ifr.Name
+ }
+ if ifr.Type != nil {
+ objectMap["type"] = ifr.Type
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for IPFilterRule struct.
+func (ifr *IPFilterRule) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var IPFilterRuleProperties IPFilterRuleProperties
+ err = json.Unmarshal(*v, &IPFilterRuleProperties)
+ if err != nil {
+ return err
+ }
+ ifr.IPFilterRuleProperties = &IPFilterRuleProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ ifr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ ifr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ ifr.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// IPFilterRuleListResult the response from the List namespace operation.
+type IPFilterRuleListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Result of the List IpFilter Rules operation.
+ Value *[]IPFilterRule `json:"value,omitempty"`
+ // NextLink - Link to the next set of results. Not empty if Value contains an incomplete list of IpFilter Rules
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// IPFilterRuleListResultIterator provides access to a complete listing of IPFilterRule values.
+type IPFilterRuleListResultIterator struct {
+ i int
+ page IPFilterRuleListResultPage
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *IPFilterRuleListResultIterator) Next() error {
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err := iter.page.Next()
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter IPFilterRuleListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter IPFilterRuleListResultIterator) Response() IPFilterRuleListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter IPFilterRuleListResultIterator) Value() IPFilterRule {
+ if !iter.page.NotDone() {
+ return IPFilterRule{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (ifrlr IPFilterRuleListResult) IsEmpty() bool {
+ return ifrlr.Value == nil || len(*ifrlr.Value) == 0
+}
+
+// iPFilterRuleListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (ifrlr IPFilterRuleListResult) iPFilterRuleListResultPreparer() (*http.Request, error) {
+ if ifrlr.NextLink == nil || len(to.String(ifrlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare(&http.Request{},
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(ifrlr.NextLink)))
+}
+
+// IPFilterRuleListResultPage contains a page of IPFilterRule values.
+type IPFilterRuleListResultPage struct {
+ fn func(IPFilterRuleListResult) (IPFilterRuleListResult, error)
+ ifrlr IPFilterRuleListResult
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *IPFilterRuleListResultPage) Next() error {
+ next, err := page.fn(page.ifrlr)
+ if err != nil {
+ return err
+ }
+ page.ifrlr = next
+ return nil
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page IPFilterRuleListResultPage) NotDone() bool {
+ return !page.ifrlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page IPFilterRuleListResultPage) Response() IPFilterRuleListResult {
+ return page.ifrlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page IPFilterRuleListResultPage) Values() []IPFilterRule {
+ if page.ifrlr.IsEmpty() {
+ return nil
+ }
+ return *page.ifrlr.Value
+}
+
+// IPFilterRuleProperties properties supplied to create or update IpFilterRules
+type IPFilterRuleProperties struct {
+ // IPMask - IP Mask
+ IPMask *string `json:"ipMask,omitempty"`
+ // Action - The IP Filter Action. Possible values include: 'Accept', 'Reject'
+ Action IPAction `json:"action,omitempty"`
+ // FilterName - IP Filter name
+ FilterName *string `json:"filterName,omitempty"`
+}
+
// MessageCountDetails message Count Details.
type MessageCountDetails struct {
- // ActiveMessageCount - Number of active messages in the queue, topic, or subscription.
+ // ActiveMessageCount - READ-ONLY; Number of active messages in the queue, topic, or subscription.
ActiveMessageCount *int64 `json:"activeMessageCount,omitempty"`
- // DeadLetterMessageCount - Number of messages that are dead lettered.
+ // DeadLetterMessageCount - READ-ONLY; Number of messages that are dead lettered.
DeadLetterMessageCount *int64 `json:"deadLetterMessageCount,omitempty"`
- // ScheduledMessageCount - Number of scheduled messages.
+ // ScheduledMessageCount - READ-ONLY; Number of scheduled messages.
ScheduledMessageCount *int64 `json:"scheduledMessageCount,omitempty"`
- // TransferMessageCount - Number of messages transferred to another queue, topic, or subscription.
+ // TransferMessageCount - READ-ONLY; Number of messages transferred to another queue, topic, or subscription.
TransferMessageCount *int64 `json:"transferMessageCount,omitempty"`
- // TransferDeadLetterMessageCount - Number of messages transferred into dead letters.
+ // TransferDeadLetterMessageCount - READ-ONLY; Number of messages transferred into dead letters.
TransferDeadLetterMessageCount *int64 `json:"transferDeadLetterMessageCount,omitempty"`
}
@@ -827,24 +1161,35 @@ type MigrationConfigListResult struct {
autorest.Response `json:"-"`
// Value - List of Migration Configs
Value *[]MigrationConfigProperties `json:"value,omitempty"`
- // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of migrationConfigurations
+ // NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of migrationConfigurations
NextLink *string `json:"nextLink,omitempty"`
}
-// MigrationConfigListResultIterator provides access to a complete listing of MigrationConfigProperties values.
+// MigrationConfigListResultIterator provides access to a complete listing of MigrationConfigProperties
+// values.
type MigrationConfigListResultIterator struct {
i int
page MigrationConfigListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *MigrationConfigListResultIterator) Next() error {
+func (iter *MigrationConfigListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -853,6 +1198,13 @@ func (iter *MigrationConfigListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *MigrationConfigListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter MigrationConfigListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -872,6 +1224,11 @@ func (iter MigrationConfigListResultIterator) Value() MigrationConfigProperties
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the MigrationConfigListResultIterator type.
+func NewMigrationConfigListResultIterator(page MigrationConfigListResultPage) MigrationConfigListResultIterator {
+ return MigrationConfigListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (mclr MigrationConfigListResult) IsEmpty() bool {
return mclr.Value == nil || len(*mclr.Value) == 0
@@ -879,11 +1236,11 @@ func (mclr MigrationConfigListResult) IsEmpty() bool {
// migrationConfigListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (mclr MigrationConfigListResult) migrationConfigListResultPreparer() (*http.Request, error) {
+func (mclr MigrationConfigListResult) migrationConfigListResultPreparer(ctx context.Context) (*http.Request, error) {
if mclr.NextLink == nil || len(to.String(mclr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(mclr.NextLink)))
@@ -891,14 +1248,24 @@ func (mclr MigrationConfigListResult) migrationConfigListResultPreparer() (*http
// MigrationConfigListResultPage contains a page of MigrationConfigProperties values.
type MigrationConfigListResultPage struct {
- fn func(MigrationConfigListResult) (MigrationConfigListResult, error)
+ fn func(context.Context, MigrationConfigListResult) (MigrationConfigListResult, error)
mclr MigrationConfigListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *MigrationConfigListResultPage) Next() error {
- next, err := page.fn(page.mclr)
+func (page *MigrationConfigListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/MigrationConfigListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.mclr)
if err != nil {
return err
}
@@ -906,6 +1273,13 @@ func (page *MigrationConfigListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *MigrationConfigListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page MigrationConfigListResultPage) NotDone() bool {
return !page.mclr.IsEmpty()
@@ -924,16 +1298,21 @@ func (page MigrationConfigListResultPage) Values() []MigrationConfigProperties {
return *page.mclr.Value
}
+// Creates a new instance of the MigrationConfigListResultPage type.
+func NewMigrationConfigListResultPage(getNextPage func(context.Context, MigrationConfigListResult) (MigrationConfigListResult, error)) MigrationConfigListResultPage {
+ return MigrationConfigListResultPage{fn: getNextPage}
+}
+
// MigrationConfigProperties single item in List or Get Migration Config operation
type MigrationConfigProperties struct {
autorest.Response `json:"-"`
// MigrationConfigPropertiesProperties - Properties required to the Create Migration Configuration
*MigrationConfigPropertiesProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -943,15 +1322,6 @@ func (mcp MigrationConfigProperties) MarshalJSON() ([]byte, error) {
if mcp.MigrationConfigPropertiesProperties != nil {
objectMap["properties"] = mcp.MigrationConfigPropertiesProperties
}
- if mcp.ID != nil {
- objectMap["id"] = mcp.ID
- }
- if mcp.Name != nil {
- objectMap["name"] = mcp.Name
- }
- if mcp.Type != nil {
- objectMap["type"] = mcp.Type
- }
return json.Marshal(objectMap)
}
@@ -1008,18 +1378,20 @@ func (mcp *MigrationConfigProperties) UnmarshalJSON(body []byte) error {
// MigrationConfigPropertiesProperties properties required to the Create Migration Configuration
type MigrationConfigPropertiesProperties struct {
- // ProvisioningState - Provisioning state of Migration Configuration
+ // ProvisioningState - READ-ONLY; Provisioning state of Migration Configuration
ProvisioningState *string `json:"provisioningState,omitempty"`
- // PendingReplicationOperationsCount - Number of entities pending to be replicated.
+ // PendingReplicationOperationsCount - READ-ONLY; Number of entities pending to be replicated.
PendingReplicationOperationsCount *int64 `json:"pendingReplicationOperationsCount,omitempty"`
// TargetNamespace - Existing premium Namespace ARM Id name which has no entities, will be used for migration
TargetNamespace *string `json:"targetNamespace,omitempty"`
// PostMigrationName - Name to access Standard Namespace after migration
PostMigrationName *string `json:"postMigrationName,omitempty"`
+ // MigrationState - READ-ONLY; State in which Standard to Premium Migration is, possible values : Unknown, Reverting, Completing, Initiating, Syncing, Active
+ MigrationState *string `json:"migrationState,omitempty"`
}
-// MigrationConfigsCreateAndStartMigrationFuture an abstraction for monitoring and retrieving the results of a
-// long-running operation.
+// MigrationConfigsCreateAndStartMigrationFuture an abstraction for monitoring and retrieving the results
+// of a long-running operation.
type MigrationConfigsCreateAndStartMigrationFuture struct {
azure.Future
}
@@ -1028,7 +1400,7 @@ type MigrationConfigsCreateAndStartMigrationFuture struct {
// If the operation has not completed it will return an error.
func (future *MigrationConfigsCreateAndStartMigrationFuture) Result(client MigrationConfigsClient) (mcp MigrationConfigProperties, err error) {
var done bool
- done, err = future.Done(client)
+ done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.MigrationConfigsCreateAndStartMigrationFuture", "Result", future.Response(), "Polling failure")
return
@@ -1047,8 +1419,8 @@ func (future *MigrationConfigsCreateAndStartMigrationFuture) Result(client Migra
return
}
-// NamespacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a long-running
-// operation.
+// NamespacesCreateOrUpdateFuture an abstraction for monitoring and retrieving the results of a
+// long-running operation.
type NamespacesCreateOrUpdateFuture struct {
azure.Future
}
@@ -1057,7 +1429,7 @@ type NamespacesCreateOrUpdateFuture struct {
// If the operation has not completed it will return an error.
func (future *NamespacesCreateOrUpdateFuture) Result(client NamespacesClient) (sn SBNamespace, err error) {
var done bool
- done, err = future.Done(client)
+ done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.NamespacesCreateOrUpdateFuture", "Result", future.Response(), "Polling failure")
return
@@ -1076,7 +1448,8 @@ func (future *NamespacesCreateOrUpdateFuture) Result(client NamespacesClient) (s
return
}
-// NamespacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running operation.
+// NamespacesDeleteFuture an abstraction for monitoring and retrieving the results of a long-running
+// operation.
type NamespacesDeleteFuture struct {
azure.Future
}
@@ -1085,7 +1458,7 @@ type NamespacesDeleteFuture struct {
// If the operation has not completed it will return an error.
func (future *NamespacesDeleteFuture) Result(client NamespacesClient) (ar autorest.Response, err error) {
var done bool
- done, err = future.Done(client)
+ done, err = future.DoneWithContext(context.Background(), client)
if err != nil {
err = autorest.NewErrorWithError(err, "servicebus.NamespacesDeleteFuture", "Result", future.Response(), "Polling failure")
return
@@ -1098,9 +1471,108 @@ func (future *NamespacesDeleteFuture) Result(client NamespacesClient) (ar autore
return
}
-// Operation a ServiceBus REST API operation
-type Operation struct {
- // Name - Operation name: {provider}/{resource}/{operation}
+// NetworkRuleSet description of NetworkRuleSet resource.
+type NetworkRuleSet struct {
+ autorest.Response `json:"-"`
+ // NetworkRuleSetProperties - NetworkRuleSet properties
+ *NetworkRuleSetProperties `json:"properties,omitempty"`
+ // ID - READ-ONLY; Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - READ-ONLY; Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - READ-ONLY; Resource type
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for NetworkRuleSet.
+func (nrs NetworkRuleSet) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if nrs.NetworkRuleSetProperties != nil {
+ objectMap["properties"] = nrs.NetworkRuleSetProperties
+ }
+ return json.Marshal(objectMap)
+}
+
+// UnmarshalJSON is the custom unmarshaler for NetworkRuleSet struct.
+func (nrs *NetworkRuleSet) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var networkRuleSetProperties NetworkRuleSetProperties
+ err = json.Unmarshal(*v, &networkRuleSetProperties)
+ if err != nil {
+ return err
+ }
+ nrs.NetworkRuleSetProperties = &networkRuleSetProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ nrs.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ nrs.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ nrs.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// NetworkRuleSetProperties networkRuleSet properties
+type NetworkRuleSetProperties struct {
+ // DefaultAction - Default Action for Network Rule Set. Possible values include: 'Allow', 'Deny'
+ DefaultAction DefaultAction `json:"defaultAction,omitempty"`
+ // VirtualNetworkRules - List VirtualNetwork Rules
+ VirtualNetworkRules *[]NWRuleSetVirtualNetworkRules `json:"virtualNetworkRules,omitempty"`
+ // IPRules - List of IpRules
+ IPRules *[]NWRuleSetIPRules `json:"ipRules,omitempty"`
+}
+
+// NWRuleSetIPRules description of NetWorkRuleSet - IpRules resource.
+type NWRuleSetIPRules struct {
+ // IPMask - IP Mask
+ IPMask *string `json:"ipMask,omitempty"`
+ // Action - The IP Filter Action. Possible values include: 'NetworkRuleIPActionAllow'
+ Action NetworkRuleIPAction `json:"action,omitempty"`
+}
+
+// NWRuleSetVirtualNetworkRules description of VirtualNetworkRules - NetworkRules resource.
+type NWRuleSetVirtualNetworkRules struct {
+ // Subnet - Subnet properties
+ Subnet *Subnet `json:"subnet,omitempty"`
+ // IgnoreMissingVnetServiceEndpoint - Value that indicates whether to ignore missing VNet Service Endpoint
+ IgnoreMissingVnetServiceEndpoint *bool `json:"ignoreMissingVnetServiceEndpoint,omitempty"`
+}
+
+// Operation a ServiceBus REST API operation
+type Operation struct {
+ // Name - READ-ONLY; Operation name: {provider}/{resource}/{operation}
Name *string `json:"name,omitempty"`
// Display - The object that represents the operation.
Display *OperationDisplay `json:"display,omitempty"`
@@ -1108,21 +1580,21 @@ type Operation struct {
// OperationDisplay the object that represents the operation.
type OperationDisplay struct {
- // Provider - Service provider: Microsoft.ServiceBus
+ // Provider - READ-ONLY; Service provider: Microsoft.ServiceBus
Provider *string `json:"provider,omitempty"`
- // Resource - Resource on which the operation is performed: Invoice, etc.
+ // Resource - READ-ONLY; Resource on which the operation is performed: Invoice, etc.
Resource *string `json:"resource,omitempty"`
- // Operation - Operation type: Read, write, delete, etc.
+ // Operation - READ-ONLY; Operation type: Read, write, delete, etc.
Operation *string `json:"operation,omitempty"`
}
-// OperationListResult result of the request to list ServiceBus operations. It contains a list of operations and a
-// URL link to get the next set of results.
+// OperationListResult result of the request to list ServiceBus operations. It contains a list of
+// operations and a URL link to get the next set of results.
type OperationListResult struct {
autorest.Response `json:"-"`
- // Value - List of ServiceBus operations supported by the Microsoft.ServiceBus resource provider.
+ // Value - READ-ONLY; List of ServiceBus operations supported by the Microsoft.ServiceBus resource provider.
Value *[]Operation `json:"value,omitempty"`
- // NextLink - URL to get the next set of operation list results if there are any.
+ // NextLink - READ-ONLY; URL to get the next set of operation list results if there are any.
NextLink *string `json:"nextLink,omitempty"`
}
@@ -1132,14 +1604,24 @@ type OperationListResultIterator struct {
page OperationListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *OperationListResultIterator) Next() error {
+func (iter *OperationListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -1148,6 +1630,13 @@ func (iter *OperationListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *OperationListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter OperationListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -1167,6 +1656,11 @@ func (iter OperationListResultIterator) Value() Operation {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the OperationListResultIterator type.
+func NewOperationListResultIterator(page OperationListResultPage) OperationListResultIterator {
+ return OperationListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (olr OperationListResult) IsEmpty() bool {
return olr.Value == nil || len(*olr.Value) == 0
@@ -1174,11 +1668,11 @@ func (olr OperationListResult) IsEmpty() bool {
// operationListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (olr OperationListResult) operationListResultPreparer() (*http.Request, error) {
+func (olr OperationListResult) operationListResultPreparer(ctx context.Context) (*http.Request, error) {
if olr.NextLink == nil || len(to.String(olr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(olr.NextLink)))
@@ -1186,14 +1680,24 @@ func (olr OperationListResult) operationListResultPreparer() (*http.Request, err
// OperationListResultPage contains a page of Operation values.
type OperationListResultPage struct {
- fn func(OperationListResult) (OperationListResult, error)
+ fn func(context.Context, OperationListResult) (OperationListResult, error)
olr OperationListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *OperationListResultPage) Next() error {
- next, err := page.fn(page.olr)
+func (page *OperationListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.olr)
if err != nil {
return err
}
@@ -1201,6 +1705,13 @@ func (page *OperationListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *OperationListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page OperationListResultPage) NotDone() bool {
return !page.olr.IsEmpty()
@@ -1219,6 +1730,11 @@ func (page OperationListResultPage) Values() []Operation {
return *page.olr.Value
}
+// Creates a new instance of the OperationListResultPage type.
+func NewOperationListResultPage(getNextPage func(context.Context, OperationListResult) (OperationListResult, error)) OperationListResultPage {
+ return OperationListResultPage{fn: getNextPage}
+}
+
// PremiumMessagingRegions premium Messaging Region
type PremiumMessagingRegions struct {
Properties *PremiumMessagingRegionsProperties `json:"properties,omitempty"`
@@ -1226,11 +1742,11 @@ type PremiumMessagingRegions struct {
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -1246,15 +1762,6 @@ func (pmr PremiumMessagingRegions) MarshalJSON() ([]byte, error) {
if pmr.Tags != nil {
objectMap["tags"] = pmr.Tags
}
- if pmr.ID != nil {
- objectMap["id"] = pmr.ID
- }
- if pmr.Name != nil {
- objectMap["name"] = pmr.Name
- }
- if pmr.Type != nil {
- objectMap["type"] = pmr.Type
- }
return json.Marshal(objectMap)
}
@@ -1263,25 +1770,35 @@ type PremiumMessagingRegionsListResult struct {
autorest.Response `json:"-"`
// Value - Result of the List PremiumMessagingRegions type.
Value *[]PremiumMessagingRegions `json:"value,omitempty"`
- // NextLink - Link to the next set of results. Not empty if Value contains incomplete list of PremiumMessagingRegions.
+ // NextLink - READ-ONLY; Link to the next set of results. Not empty if Value contains incomplete list of PremiumMessagingRegions.
NextLink *string `json:"nextLink,omitempty"`
}
-// PremiumMessagingRegionsListResultIterator provides access to a complete listing of PremiumMessagingRegions
-// values.
+// PremiumMessagingRegionsListResultIterator provides access to a complete listing of
+// PremiumMessagingRegions values.
type PremiumMessagingRegionsListResultIterator struct {
i int
page PremiumMessagingRegionsListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *PremiumMessagingRegionsListResultIterator) Next() error {
+func (iter *PremiumMessagingRegionsListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PremiumMessagingRegionsListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -1290,6 +1807,13 @@ func (iter *PremiumMessagingRegionsListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *PremiumMessagingRegionsListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter PremiumMessagingRegionsListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -1309,6 +1833,11 @@ func (iter PremiumMessagingRegionsListResultIterator) Value() PremiumMessagingRe
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the PremiumMessagingRegionsListResultIterator type.
+func NewPremiumMessagingRegionsListResultIterator(page PremiumMessagingRegionsListResultPage) PremiumMessagingRegionsListResultIterator {
+ return PremiumMessagingRegionsListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (pmrlr PremiumMessagingRegionsListResult) IsEmpty() bool {
return pmrlr.Value == nil || len(*pmrlr.Value) == 0
@@ -1316,11 +1845,11 @@ func (pmrlr PremiumMessagingRegionsListResult) IsEmpty() bool {
// premiumMessagingRegionsListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (pmrlr PremiumMessagingRegionsListResult) premiumMessagingRegionsListResultPreparer() (*http.Request, error) {
+func (pmrlr PremiumMessagingRegionsListResult) premiumMessagingRegionsListResultPreparer(ctx context.Context) (*http.Request, error) {
if pmrlr.NextLink == nil || len(to.String(pmrlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(pmrlr.NextLink)))
@@ -1328,14 +1857,24 @@ func (pmrlr PremiumMessagingRegionsListResult) premiumMessagingRegionsListResult
// PremiumMessagingRegionsListResultPage contains a page of PremiumMessagingRegions values.
type PremiumMessagingRegionsListResultPage struct {
- fn func(PremiumMessagingRegionsListResult) (PremiumMessagingRegionsListResult, error)
+ fn func(context.Context, PremiumMessagingRegionsListResult) (PremiumMessagingRegionsListResult, error)
pmrlr PremiumMessagingRegionsListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *PremiumMessagingRegionsListResultPage) Next() error {
- next, err := page.fn(page.pmrlr)
+func (page *PremiumMessagingRegionsListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PremiumMessagingRegionsListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.pmrlr)
if err != nil {
return err
}
@@ -1343,6 +1882,13 @@ func (page *PremiumMessagingRegionsListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *PremiumMessagingRegionsListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page PremiumMessagingRegionsListResultPage) NotDone() bool {
return !page.pmrlr.IsEmpty()
@@ -1361,16 +1907,21 @@ func (page PremiumMessagingRegionsListResultPage) Values() []PremiumMessagingReg
return *page.pmrlr.Value
}
+// Creates a new instance of the PremiumMessagingRegionsListResultPage type.
+func NewPremiumMessagingRegionsListResultPage(getNextPage func(context.Context, PremiumMessagingRegionsListResult) (PremiumMessagingRegionsListResult, error)) PremiumMessagingRegionsListResultPage {
+ return PremiumMessagingRegionsListResultPage{fn: getNextPage}
+}
+
// PremiumMessagingRegionsProperties ...
type PremiumMessagingRegionsProperties struct {
- // Code - Region code
+ // Code - READ-ONLY; Region code
Code *string `json:"code,omitempty"`
- // FullName - Full name of the region
+ // FullName - READ-ONLY; Full name of the region
FullName *string `json:"fullName,omitempty"`
}
-// RegenerateAccessKeyParameters parameters supplied to the Regenerate Authorization Rule operation, specifies
-// which key neeeds to be reset.
+// RegenerateAccessKeyParameters parameters supplied to the Regenerate Authorization Rule operation,
+// specifies which key needs to be reset.
type RegenerateAccessKeyParameters struct {
// KeyType - The access key to regenerate. Possible values include: 'PrimaryKey', 'SecondaryKey'
KeyType KeyType `json:"keyType,omitempty"`
@@ -1380,11 +1931,11 @@ type RegenerateAccessKeyParameters struct {
// Resource the Resource definition for other than namespace.
type Resource struct {
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -1394,11 +1945,11 @@ type ResourceNamespacePatch struct {
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -1411,15 +1962,6 @@ func (rnp ResourceNamespacePatch) MarshalJSON() ([]byte, error) {
if rnp.Tags != nil {
objectMap["tags"] = rnp.Tags
}
- if rnp.ID != nil {
- objectMap["id"] = rnp.ID
- }
- if rnp.Name != nil {
- objectMap["name"] = rnp.Name
- }
- if rnp.Type != nil {
- objectMap["type"] = rnp.Type
- }
return json.Marshal(objectMap)
}
@@ -1428,11 +1970,11 @@ type Rule struct {
autorest.Response `json:"-"`
// Ruleproperties - Properties of Rule resource
*Ruleproperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -1442,15 +1984,6 @@ func (r Rule) MarshalJSON() ([]byte, error) {
if r.Ruleproperties != nil {
objectMap["properties"] = r.Ruleproperties
}
- if r.ID != nil {
- objectMap["id"] = r.ID
- }
- if r.Name != nil {
- objectMap["name"] = r.Name
- }
- if r.Type != nil {
- objectMap["type"] = r.Type
- }
return json.Marshal(objectMap)
}
@@ -1520,14 +2053,24 @@ type RuleListResultIterator struct {
page RuleListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *RuleListResultIterator) Next() error {
+func (iter *RuleListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RuleListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -1536,6 +2079,13 @@ func (iter *RuleListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *RuleListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter RuleListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -1555,6 +2105,11 @@ func (iter RuleListResultIterator) Value() Rule {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the RuleListResultIterator type.
+func NewRuleListResultIterator(page RuleListResultPage) RuleListResultIterator {
+ return RuleListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (rlr RuleListResult) IsEmpty() bool {
return rlr.Value == nil || len(*rlr.Value) == 0
@@ -1562,11 +2117,11 @@ func (rlr RuleListResult) IsEmpty() bool {
// ruleListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (rlr RuleListResult) ruleListResultPreparer() (*http.Request, error) {
+func (rlr RuleListResult) ruleListResultPreparer(ctx context.Context) (*http.Request, error) {
if rlr.NextLink == nil || len(to.String(rlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(rlr.NextLink)))
@@ -1574,14 +2129,24 @@ func (rlr RuleListResult) ruleListResultPreparer() (*http.Request, error) {
// RuleListResultPage contains a page of Rule values.
type RuleListResultPage struct {
- fn func(RuleListResult) (RuleListResult, error)
+ fn func(context.Context, RuleListResult) (RuleListResult, error)
rlr RuleListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *RuleListResultPage) Next() error {
- next, err := page.fn(page.rlr)
+func (page *RuleListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RuleListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.rlr)
if err != nil {
return err
}
@@ -1589,6 +2154,13 @@ func (page *RuleListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *RuleListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page RuleListResultPage) NotDone() bool {
return !page.rlr.IsEmpty()
@@ -1607,6 +2179,11 @@ func (page RuleListResultPage) Values() []Rule {
return *page.rlr.Value
}
+// Creates a new instance of the RuleListResultPage type.
+func NewRuleListResultPage(getNextPage func(context.Context, RuleListResult) (RuleListResult, error)) RuleListResultPage {
+ return RuleListResultPage{fn: getNextPage}
+}
+
// Ruleproperties description of Rule Resource.
type Ruleproperties struct {
// Action - Represents the filter actions which are allowed for the transformation of a message that have been matched by a filter expression.
@@ -1624,11 +2201,11 @@ type SBAuthorizationRule struct {
autorest.Response `json:"-"`
// SBAuthorizationRuleProperties - AuthorizationRule properties.
*SBAuthorizationRuleProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -1638,15 +2215,6 @@ func (sar SBAuthorizationRule) MarshalJSON() ([]byte, error) {
if sar.SBAuthorizationRuleProperties != nil {
objectMap["properties"] = sar.SBAuthorizationRuleProperties
}
- if sar.ID != nil {
- objectMap["id"] = sar.ID
- }
- if sar.Name != nil {
- objectMap["name"] = sar.Name
- }
- if sar.Type != nil {
- objectMap["type"] = sar.Type
- }
return json.Marshal(objectMap)
}
@@ -1710,20 +2278,31 @@ type SBAuthorizationRuleListResult struct {
NextLink *string `json:"nextLink,omitempty"`
}
-// SBAuthorizationRuleListResultIterator provides access to a complete listing of SBAuthorizationRule values.
+// SBAuthorizationRuleListResultIterator provides access to a complete listing of SBAuthorizationRule
+// values.
type SBAuthorizationRuleListResultIterator struct {
i int
page SBAuthorizationRuleListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *SBAuthorizationRuleListResultIterator) Next() error {
+func (iter *SBAuthorizationRuleListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBAuthorizationRuleListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -1732,6 +2311,13 @@ func (iter *SBAuthorizationRuleListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *SBAuthorizationRuleListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBAuthorizationRuleListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -1751,6 +2337,11 @@ func (iter SBAuthorizationRuleListResultIterator) Value() SBAuthorizationRule {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the SBAuthorizationRuleListResultIterator type.
+func NewSBAuthorizationRuleListResultIterator(page SBAuthorizationRuleListResultPage) SBAuthorizationRuleListResultIterator {
+ return SBAuthorizationRuleListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (sarlr SBAuthorizationRuleListResult) IsEmpty() bool {
return sarlr.Value == nil || len(*sarlr.Value) == 0
@@ -1758,11 +2349,11 @@ func (sarlr SBAuthorizationRuleListResult) IsEmpty() bool {
// sBAuthorizationRuleListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (sarlr SBAuthorizationRuleListResult) sBAuthorizationRuleListResultPreparer() (*http.Request, error) {
+func (sarlr SBAuthorizationRuleListResult) sBAuthorizationRuleListResultPreparer(ctx context.Context) (*http.Request, error) {
if sarlr.NextLink == nil || len(to.String(sarlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(sarlr.NextLink)))
@@ -1770,14 +2361,24 @@ func (sarlr SBAuthorizationRuleListResult) sBAuthorizationRuleListResultPreparer
// SBAuthorizationRuleListResultPage contains a page of SBAuthorizationRule values.
type SBAuthorizationRuleListResultPage struct {
- fn func(SBAuthorizationRuleListResult) (SBAuthorizationRuleListResult, error)
+ fn func(context.Context, SBAuthorizationRuleListResult) (SBAuthorizationRuleListResult, error)
sarlr SBAuthorizationRuleListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *SBAuthorizationRuleListResultPage) Next() error {
- next, err := page.fn(page.sarlr)
+func (page *SBAuthorizationRuleListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBAuthorizationRuleListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.sarlr)
if err != nil {
return err
}
@@ -1785,6 +2386,13 @@ func (page *SBAuthorizationRuleListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SBAuthorizationRuleListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBAuthorizationRuleListResultPage) NotDone() bool {
return !page.sarlr.IsEmpty()
@@ -1803,6 +2411,11 @@ func (page SBAuthorizationRuleListResultPage) Values() []SBAuthorizationRule {
return *page.sarlr.Value
}
+// Creates a new instance of the SBAuthorizationRuleListResultPage type.
+func NewSBAuthorizationRuleListResultPage(getNextPage func(context.Context, SBAuthorizationRuleListResult) (SBAuthorizationRuleListResult, error)) SBAuthorizationRuleListResultPage {
+ return SBAuthorizationRuleListResultPage{fn: getNextPage}
+}
+
// SBAuthorizationRuleProperties authorizationRule properties.
type SBAuthorizationRuleProperties struct {
// Rights - The rights associated with the rule.
@@ -1812,7 +2425,7 @@ type SBAuthorizationRuleProperties struct {
// SBNamespace description of a namespace resource.
type SBNamespace struct {
autorest.Response `json:"-"`
- // Sku - Porperties of Sku
+ // Sku - Properties of Sku
Sku *SBSku `json:"sku,omitempty"`
// SBNamespaceProperties - Properties of the namespace.
*SBNamespaceProperties `json:"properties,omitempty"`
@@ -1820,11 +2433,11 @@ type SBNamespace struct {
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -1843,15 +2456,6 @@ func (sn SBNamespace) MarshalJSON() ([]byte, error) {
if sn.Tags != nil {
objectMap["tags"] = sn.Tags
}
- if sn.ID != nil {
- objectMap["id"] = sn.ID
- }
- if sn.Name != nil {
- objectMap["name"] = sn.Name
- }
- if sn.Type != nil {
- objectMap["type"] = sn.Type
- }
return json.Marshal(objectMap)
}
@@ -1948,14 +2552,24 @@ type SBNamespaceListResultIterator struct {
page SBNamespaceListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *SBNamespaceListResultIterator) Next() error {
+func (iter *SBNamespaceListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBNamespaceListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -1964,6 +2578,13 @@ func (iter *SBNamespaceListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *SBNamespaceListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBNamespaceListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -1983,6 +2604,11 @@ func (iter SBNamespaceListResultIterator) Value() SBNamespace {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the SBNamespaceListResultIterator type.
+func NewSBNamespaceListResultIterator(page SBNamespaceListResultPage) SBNamespaceListResultIterator {
+ return SBNamespaceListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (snlr SBNamespaceListResult) IsEmpty() bool {
return snlr.Value == nil || len(*snlr.Value) == 0
@@ -1990,11 +2616,11 @@ func (snlr SBNamespaceListResult) IsEmpty() bool {
// sBNamespaceListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (snlr SBNamespaceListResult) sBNamespaceListResultPreparer() (*http.Request, error) {
+func (snlr SBNamespaceListResult) sBNamespaceListResultPreparer(ctx context.Context) (*http.Request, error) {
if snlr.NextLink == nil || len(to.String(snlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(snlr.NextLink)))
@@ -2002,14 +2628,24 @@ func (snlr SBNamespaceListResult) sBNamespaceListResultPreparer() (*http.Request
// SBNamespaceListResultPage contains a page of SBNamespace values.
type SBNamespaceListResultPage struct {
- fn func(SBNamespaceListResult) (SBNamespaceListResult, error)
+ fn func(context.Context, SBNamespaceListResult) (SBNamespaceListResult, error)
snlr SBNamespaceListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *SBNamespaceListResultPage) Next() error {
- next, err := page.fn(page.snlr)
+func (page *SBNamespaceListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBNamespaceListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.snlr)
if err != nil {
return err
}
@@ -2017,6 +2653,13 @@ func (page *SBNamespaceListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SBNamespaceListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBNamespaceListResultPage) NotDone() bool {
return !page.snlr.IsEmpty()
@@ -2035,23 +2678,34 @@ func (page SBNamespaceListResultPage) Values() []SBNamespace {
return *page.snlr.Value
}
+// Creates a new instance of the SBNamespaceListResultPage type.
+func NewSBNamespaceListResultPage(getNextPage func(context.Context, SBNamespaceListResult) (SBNamespaceListResult, error)) SBNamespaceListResultPage {
+ return SBNamespaceListResultPage{fn: getNextPage}
+}
+
+// SBNamespaceMigrate namespace Migrate Object
+type SBNamespaceMigrate struct {
+ // TargetNamespaceType - Type of namespaces. Possible values include: 'Messaging', 'NotificationHub', 'Mixed', 'EventHub', 'Relay'
+ TargetNamespaceType NameSpaceType `json:"targetNamespaceType,omitempty"`
+}
+
// SBNamespaceProperties properties of the namespace.
type SBNamespaceProperties struct {
- // ProvisioningState - Provisioning state of the namespace.
+ // ProvisioningState - READ-ONLY; Provisioning state of the namespace.
ProvisioningState *string `json:"provisioningState,omitempty"`
- // CreatedAt - The time the namespace was created.
+ // CreatedAt - READ-ONLY; The time the namespace was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
- // UpdatedAt - The time the namespace was updated.
+ // UpdatedAt - READ-ONLY; The time the namespace was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
- // ServiceBusEndpoint - Endpoint you can use to perform Service Bus operations.
+ // ServiceBusEndpoint - READ-ONLY; Endpoint you can use to perform Service Bus operations.
ServiceBusEndpoint *string `json:"serviceBusEndpoint,omitempty"`
- // MetricID - Identifier for Azure Insights metrics
+ // MetricID - READ-ONLY; Identifier for Azure Insights metrics
MetricID *string `json:"metricId,omitempty"`
}
// SBNamespaceUpdateParameters description of a namespace resource.
type SBNamespaceUpdateParameters struct {
- // Sku - Porperties of Sku
+ // Sku - Properties of Sku
Sku *SBSku `json:"sku,omitempty"`
// SBNamespaceProperties - Properties of the namespace.
*SBNamespaceProperties `json:"properties,omitempty"`
@@ -2059,11 +2713,11 @@ type SBNamespaceUpdateParameters struct {
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -2082,15 +2736,6 @@ func (snup SBNamespaceUpdateParameters) MarshalJSON() ([]byte, error) {
if snup.Tags != nil {
objectMap["tags"] = snup.Tags
}
- if snup.ID != nil {
- objectMap["id"] = snup.ID
- }
- if snup.Name != nil {
- objectMap["name"] = snup.Name
- }
- if snup.Type != nil {
- objectMap["type"] = snup.Type
- }
return json.Marshal(objectMap)
}
@@ -2177,11 +2822,11 @@ type SBQueue struct {
autorest.Response `json:"-"`
// SBQueueProperties - Queue Properties
*SBQueueProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -2191,15 +2836,6 @@ func (sq SBQueue) MarshalJSON() ([]byte, error) {
if sq.SBQueueProperties != nil {
objectMap["properties"] = sq.SBQueueProperties
}
- if sq.ID != nil {
- objectMap["id"] = sq.ID
- }
- if sq.Name != nil {
- objectMap["name"] = sq.Name
- }
- if sq.Type != nil {
- objectMap["type"] = sq.Type
- }
return json.Marshal(objectMap)
}
@@ -2269,14 +2905,24 @@ type SBQueueListResultIterator struct {
page SBQueueListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *SBQueueListResultIterator) Next() error {
+func (iter *SBQueueListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBQueueListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -2285,6 +2931,13 @@ func (iter *SBQueueListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *SBQueueListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBQueueListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -2304,6 +2957,11 @@ func (iter SBQueueListResultIterator) Value() SBQueue {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the SBQueueListResultIterator type.
+func NewSBQueueListResultIterator(page SBQueueListResultPage) SBQueueListResultIterator {
+ return SBQueueListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (sqlr SBQueueListResult) IsEmpty() bool {
return sqlr.Value == nil || len(*sqlr.Value) == 0
@@ -2311,11 +2969,11 @@ func (sqlr SBQueueListResult) IsEmpty() bool {
// sBQueueListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (sqlr SBQueueListResult) sBQueueListResultPreparer() (*http.Request, error) {
+func (sqlr SBQueueListResult) sBQueueListResultPreparer(ctx context.Context) (*http.Request, error) {
if sqlr.NextLink == nil || len(to.String(sqlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(sqlr.NextLink)))
@@ -2323,14 +2981,24 @@ func (sqlr SBQueueListResult) sBQueueListResultPreparer() (*http.Request, error)
// SBQueueListResultPage contains a page of SBQueue values.
type SBQueueListResultPage struct {
- fn func(SBQueueListResult) (SBQueueListResult, error)
+ fn func(context.Context, SBQueueListResult) (SBQueueListResult, error)
sqlr SBQueueListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *SBQueueListResultPage) Next() error {
- next, err := page.fn(page.sqlr)
+func (page *SBQueueListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBQueueListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.sqlr)
if err != nil {
return err
}
@@ -2338,6 +3006,13 @@ func (page *SBQueueListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SBQueueListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBQueueListResultPage) NotDone() bool {
return !page.sqlr.IsEmpty()
@@ -2356,19 +3031,24 @@ func (page SBQueueListResultPage) Values() []SBQueue {
return *page.sqlr.Value
}
+// Creates a new instance of the SBQueueListResultPage type.
+func NewSBQueueListResultPage(getNextPage func(context.Context, SBQueueListResult) (SBQueueListResult, error)) SBQueueListResultPage {
+ return SBQueueListResultPage{fn: getNextPage}
+}
+
// SBQueueProperties the Queue Properties definition.
type SBQueueProperties struct {
- // CountDetails - Message Count Details.
+ // CountDetails - READ-ONLY; Message Count Details.
CountDetails *MessageCountDetails `json:"countDetails,omitempty"`
- // CreatedAt - The exact time the message was created.
+ // CreatedAt - READ-ONLY; The exact time the message was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
- // UpdatedAt - The exact time the message was updated.
+ // UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
- // AccessedAt - Last time a message was sent, or the last time there was a receive request to this queue.
+ // AccessedAt - READ-ONLY; Last time a message was sent, or the last time there was a receive request to this queue.
AccessedAt *date.Time `json:"accessedAt,omitempty"`
- // SizeInBytes - The size of the queue, in bytes.
+ // SizeInBytes - READ-ONLY; The size of the queue, in bytes.
SizeInBytes *int64 `json:"sizeInBytes,omitempty"`
- // MessageCount - The number of messages in the queue.
+ // MessageCount - READ-ONLY; The number of messages in the queue.
MessageCount *int64 `json:"messageCount,omitempty"`
// LockDuration - ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default value is 1 minute.
LockDuration *string `json:"lockDuration,omitempty"`
@@ -2417,11 +3097,11 @@ type SBSubscription struct {
autorest.Response `json:"-"`
// SBSubscriptionProperties - Properties of subscriptions resource.
*SBSubscriptionProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -2431,15 +3111,6 @@ func (ss SBSubscription) MarshalJSON() ([]byte, error) {
if ss.SBSubscriptionProperties != nil {
objectMap["properties"] = ss.SBSubscriptionProperties
}
- if ss.ID != nil {
- objectMap["id"] = ss.ID
- }
- if ss.Name != nil {
- objectMap["name"] = ss.Name
- }
- if ss.Type != nil {
- objectMap["type"] = ss.Type
- }
return json.Marshal(objectMap)
}
@@ -2509,14 +3180,24 @@ type SBSubscriptionListResultIterator struct {
page SBSubscriptionListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *SBSubscriptionListResultIterator) Next() error {
+func (iter *SBSubscriptionListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBSubscriptionListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -2525,6 +3206,13 @@ func (iter *SBSubscriptionListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *SBSubscriptionListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBSubscriptionListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -2544,6 +3232,11 @@ func (iter SBSubscriptionListResultIterator) Value() SBSubscription {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the SBSubscriptionListResultIterator type.
+func NewSBSubscriptionListResultIterator(page SBSubscriptionListResultPage) SBSubscriptionListResultIterator {
+ return SBSubscriptionListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (sslr SBSubscriptionListResult) IsEmpty() bool {
return sslr.Value == nil || len(*sslr.Value) == 0
@@ -2551,11 +3244,11 @@ func (sslr SBSubscriptionListResult) IsEmpty() bool {
// sBSubscriptionListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (sslr SBSubscriptionListResult) sBSubscriptionListResultPreparer() (*http.Request, error) {
+func (sslr SBSubscriptionListResult) sBSubscriptionListResultPreparer(ctx context.Context) (*http.Request, error) {
if sslr.NextLink == nil || len(to.String(sslr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(sslr.NextLink)))
@@ -2563,14 +3256,24 @@ func (sslr SBSubscriptionListResult) sBSubscriptionListResultPreparer() (*http.R
// SBSubscriptionListResultPage contains a page of SBSubscription values.
type SBSubscriptionListResultPage struct {
- fn func(SBSubscriptionListResult) (SBSubscriptionListResult, error)
+ fn func(context.Context, SBSubscriptionListResult) (SBSubscriptionListResult, error)
sslr SBSubscriptionListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *SBSubscriptionListResultPage) Next() error {
- next, err := page.fn(page.sslr)
+func (page *SBSubscriptionListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBSubscriptionListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.sslr)
if err != nil {
return err
}
@@ -2578,6 +3281,13 @@ func (page *SBSubscriptionListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SBSubscriptionListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBSubscriptionListResultPage) NotDone() bool {
return !page.sslr.IsEmpty()
@@ -2596,17 +3306,22 @@ func (page SBSubscriptionListResultPage) Values() []SBSubscription {
return *page.sslr.Value
}
+// Creates a new instance of the SBSubscriptionListResultPage type.
+func NewSBSubscriptionListResultPage(getNextPage func(context.Context, SBSubscriptionListResult) (SBSubscriptionListResult, error)) SBSubscriptionListResultPage {
+ return SBSubscriptionListResultPage{fn: getNextPage}
+}
+
// SBSubscriptionProperties description of Subscription Resource.
type SBSubscriptionProperties struct {
- // MessageCount - Number of messages.
+ // MessageCount - READ-ONLY; Number of messages.
MessageCount *int64 `json:"messageCount,omitempty"`
- // CreatedAt - Exact time the message was created.
+ // CreatedAt - READ-ONLY; Exact time the message was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
- // AccessedAt - Last time there was a receive request to this subscription.
+ // AccessedAt - READ-ONLY; Last time there was a receive request to this subscription.
AccessedAt *date.Time `json:"accessedAt,omitempty"`
- // UpdatedAt - The exact time the message was updated.
+ // UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
- // CountDetails - Message count details
+ // CountDetails - READ-ONLY; Message count details
CountDetails *MessageCountDetails `json:"countDetails,omitempty"`
// LockDuration - ISO 8061 lock duration timespan for the subscription. The default value is 1 minute.
LockDuration *string `json:"lockDuration,omitempty"`
@@ -2639,11 +3354,11 @@ type SBTopic struct {
autorest.Response `json:"-"`
// SBTopicProperties - Properties of topic resource.
*SBTopicProperties `json:"properties,omitempty"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -2653,15 +3368,6 @@ func (st SBTopic) MarshalJSON() ([]byte, error) {
if st.SBTopicProperties != nil {
objectMap["properties"] = st.SBTopicProperties
}
- if st.ID != nil {
- objectMap["id"] = st.ID
- }
- if st.Name != nil {
- objectMap["name"] = st.Name
- }
- if st.Type != nil {
- objectMap["type"] = st.Type
- }
return json.Marshal(objectMap)
}
@@ -2731,14 +3437,24 @@ type SBTopicListResultIterator struct {
page SBTopicListResultPage
}
-// Next advances to the next value. If there was an error making
+// NextWithContext advances to the next value. If there was an error making
// the request the iterator does not advance and the error is returned.
-func (iter *SBTopicListResultIterator) Next() error {
+func (iter *SBTopicListResultIterator) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBTopicListResultIterator.NextWithContext")
+ defer func() {
+ sc := -1
+ if iter.Response().Response.Response != nil {
+ sc = iter.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
iter.i++
if iter.i < len(iter.page.Values()) {
return nil
}
- err := iter.page.Next()
+ err = iter.page.NextWithContext(ctx)
if err != nil {
iter.i--
return err
@@ -2747,6 +3463,13 @@ func (iter *SBTopicListResultIterator) Next() error {
return nil
}
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (iter *SBTopicListResultIterator) Next() error {
+ return iter.NextWithContext(context.Background())
+}
+
// NotDone returns true if the enumeration should be started or is not yet complete.
func (iter SBTopicListResultIterator) NotDone() bool {
return iter.page.NotDone() && iter.i < len(iter.page.Values())
@@ -2766,6 +3489,11 @@ func (iter SBTopicListResultIterator) Value() SBTopic {
return iter.page.Values()[iter.i]
}
+// Creates a new instance of the SBTopicListResultIterator type.
+func NewSBTopicListResultIterator(page SBTopicListResultPage) SBTopicListResultIterator {
+ return SBTopicListResultIterator{page: page}
+}
+
// IsEmpty returns true if the ListResult contains no values.
func (stlr SBTopicListResult) IsEmpty() bool {
return stlr.Value == nil || len(*stlr.Value) == 0
@@ -2773,11 +3501,11 @@ func (stlr SBTopicListResult) IsEmpty() bool {
// sBTopicListResultPreparer prepares a request to retrieve the next set of results.
// It returns nil if no more results exist.
-func (stlr SBTopicListResult) sBTopicListResultPreparer() (*http.Request, error) {
+func (stlr SBTopicListResult) sBTopicListResultPreparer(ctx context.Context) (*http.Request, error) {
if stlr.NextLink == nil || len(to.String(stlr.NextLink)) < 1 {
return nil, nil
}
- return autorest.Prepare(&http.Request{},
+ return autorest.Prepare((&http.Request{}).WithContext(ctx),
autorest.AsJSON(),
autorest.AsGet(),
autorest.WithBaseURL(to.String(stlr.NextLink)))
@@ -2785,14 +3513,24 @@ func (stlr SBTopicListResult) sBTopicListResultPreparer() (*http.Request, error)
// SBTopicListResultPage contains a page of SBTopic values.
type SBTopicListResultPage struct {
- fn func(SBTopicListResult) (SBTopicListResult, error)
+ fn func(context.Context, SBTopicListResult) (SBTopicListResult, error)
stlr SBTopicListResult
}
-// Next advances to the next page of values. If there was an error making
+// NextWithContext advances to the next page of values. If there was an error making
// the request the page does not advance and the error is returned.
-func (page *SBTopicListResultPage) Next() error {
- next, err := page.fn(page.stlr)
+func (page *SBTopicListResultPage) NextWithContext(ctx context.Context) (err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SBTopicListResultPage.NextWithContext")
+ defer func() {
+ sc := -1
+ if page.Response().Response.Response != nil {
+ sc = page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ next, err := page.fn(ctx, page.stlr)
if err != nil {
return err
}
@@ -2800,6 +3538,13 @@ func (page *SBTopicListResultPage) Next() error {
return nil
}
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+// Deprecated: Use NextWithContext() instead.
+func (page *SBTopicListResultPage) Next() error {
+ return page.NextWithContext(context.Background())
+}
+
// NotDone returns true if the page enumeration should be started or is not yet complete.
func (page SBTopicListResultPage) NotDone() bool {
return !page.stlr.IsEmpty()
@@ -2818,19 +3563,24 @@ func (page SBTopicListResultPage) Values() []SBTopic {
return *page.stlr.Value
}
-// SBTopicProperties the Tpoic Properties definition.
+// Creates a new instance of the SBTopicListResultPage type.
+func NewSBTopicListResultPage(getNextPage func(context.Context, SBTopicListResult) (SBTopicListResult, error)) SBTopicListResultPage {
+ return SBTopicListResultPage{fn: getNextPage}
+}
+
+// SBTopicProperties the Topic Properties definition.
type SBTopicProperties struct {
- // SizeInBytes - Size of the topic, in bytes.
+ // SizeInBytes - READ-ONLY; Size of the topic, in bytes.
SizeInBytes *int64 `json:"sizeInBytes,omitempty"`
- // CreatedAt - Exact time the message was created.
+ // CreatedAt - READ-ONLY; Exact time the message was created.
CreatedAt *date.Time `json:"createdAt,omitempty"`
- // UpdatedAt - The exact time the message was updated.
+ // UpdatedAt - READ-ONLY; The exact time the message was updated.
UpdatedAt *date.Time `json:"updatedAt,omitempty"`
- // AccessedAt - Last time the message was sent, or a request was received, for this topic.
+ // AccessedAt - READ-ONLY; Last time the message was sent, or a request was received, for this topic.
AccessedAt *date.Time `json:"accessedAt,omitempty"`
- // SubscriptionCount - Number of subscriptions.
+ // SubscriptionCount - READ-ONLY; Number of subscriptions.
SubscriptionCount *int32 `json:"subscriptionCount,omitempty"`
- // CountDetails - Message count deatils
+ // CountDetails - READ-ONLY; Message count details
CountDetails *MessageCountDetails `json:"countDetails,omitempty"`
// DefaultMessageTimeToLive - ISO 8601 Default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
DefaultMessageTimeToLive *string `json:"defaultMessageTimeToLive,omitempty"`
@@ -2854,12 +3604,12 @@ type SBTopicProperties struct {
EnableExpress *bool `json:"enableExpress,omitempty"`
}
-// SQLFilter represents a filter which is a composition of an expression and an action that is executed in the
-// pub/sub pipeline.
+// SQLFilter represents a filter which is a composition of an expression and an action that is executed in
+// the pub/sub pipeline.
type SQLFilter struct {
// SQLExpression - The SQL expression. e.g. MyProperty='ABC'
SQLExpression *string `json:"sqlExpression,omitempty"`
- // CompatibilityLevel - This property is reserved for future use. An integer value showing the compatibility level, currently hard-coded to 20.
+ // CompatibilityLevel - READ-ONLY; This property is reserved for future use. An integer value showing the compatibility level, currently hard-coded to 20.
CompatibilityLevel *int32 `json:"compatibilityLevel,omitempty"`
// RequiresPreprocessing - Value that indicates whether the rule action requires preprocessing.
RequiresPreprocessing *bool `json:"requiresPreprocessing,omitempty"`
@@ -2876,17 +3626,23 @@ type SQLRuleAction struct {
RequiresPreprocessing *bool `json:"requiresPreprocessing,omitempty"`
}
+// Subnet properties supplied for Subnet
+type Subnet struct {
+ // ID - Resource ID of Virtual Network Subnet
+ ID *string `json:"id,omitempty"`
+}
+
// TrackedResource the Resource definition.
type TrackedResource struct {
// Location - The Geo-location where the resource lives
Location *string `json:"location,omitempty"`
// Tags - Resource tags
Tags map[string]*string `json:"tags"`
- // ID - Resource Id
+ // ID - READ-ONLY; Resource Id
ID *string `json:"id,omitempty"`
- // Name - Resource name
+ // Name - READ-ONLY; Resource name
Name *string `json:"name,omitempty"`
- // Type - Resource type
+ // Type - READ-ONLY; Resource type
Type *string `json:"type,omitempty"`
}
@@ -2899,14 +3655,195 @@ func (tr TrackedResource) MarshalJSON() ([]byte, error) {
if tr.Tags != nil {
objectMap["tags"] = tr.Tags
}
- if tr.ID != nil {
- objectMap["id"] = tr.ID
+ return json.Marshal(objectMap)
+}
+
+// VirtualNetworkRule single item in a List or Get VirtualNetworkRules operation
+type VirtualNetworkRule struct {
+ autorest.Response `json:"-"`
+ // VirtualNetworkRuleProperties - Properties supplied to create or update VirtualNetworkRules
+ *VirtualNetworkRuleProperties `json:"properties,omitempty"`
+ // ID - Resource Id
+ ID *string `json:"id,omitempty"`
+ // Name - Resource name
+ Name *string `json:"name,omitempty"`
+ // Type - Resource type
+ Type *string `json:"type,omitempty"`
+}
+
+// MarshalJSON is the custom marshaler for VirtualNetworkRule.
+func (vnr VirtualNetworkRule) MarshalJSON() ([]byte, error) {
+ objectMap := make(map[string]interface{})
+ if vnr.VirtualNetworkRuleProperties != nil {
+ objectMap["properties"] = vnr.VirtualNetworkRuleProperties
+ }
+ if vnr.ID != nil {
+ objectMap["id"] = vnr.ID
}
- if tr.Name != nil {
- objectMap["name"] = tr.Name
+ if vnr.Name != nil {
+ objectMap["name"] = vnr.Name
}
- if tr.Type != nil {
- objectMap["type"] = tr.Type
+ if vnr.Type != nil {
+ objectMap["type"] = vnr.Type
}
return json.Marshal(objectMap)
}
+
+// UnmarshalJSON is the custom unmarshaler for VirtualNetworkRule struct.
+func (vnr *VirtualNetworkRule) UnmarshalJSON(body []byte) error {
+ var m map[string]*json.RawMessage
+ err := json.Unmarshal(body, &m)
+ if err != nil {
+ return err
+ }
+ for k, v := range m {
+ switch k {
+ case "properties":
+ if v != nil {
+ var virtualNetworkRuleProperties VirtualNetworkRuleProperties
+ err = json.Unmarshal(*v, &virtualNetworkRuleProperties)
+ if err != nil {
+ return err
+ }
+ vnr.VirtualNetworkRuleProperties = &virtualNetworkRuleProperties
+ }
+ case "id":
+ if v != nil {
+ var ID string
+ err = json.Unmarshal(*v, &ID)
+ if err != nil {
+ return err
+ }
+ vnr.ID = &ID
+ }
+ case "name":
+ if v != nil {
+ var name string
+ err = json.Unmarshal(*v, &name)
+ if err != nil {
+ return err
+ }
+ vnr.Name = &name
+ }
+ case "type":
+ if v != nil {
+ var typeVar string
+ err = json.Unmarshal(*v, &typeVar)
+ if err != nil {
+ return err
+ }
+ vnr.Type = &typeVar
+ }
+ }
+ }
+
+ return nil
+}
+
+// VirtualNetworkRuleListResult the response from the List namespace operation.
+type VirtualNetworkRuleListResult struct {
+ autorest.Response `json:"-"`
+ // Value - Result of the List VirtualNetwork Rules operation.
+ Value *[]VirtualNetworkRule `json:"value,omitempty"`
+ // NextLink - Link to the next set of results. Not empty if Value contains an incomplete list of VirtualNetwork Rules
+ NextLink *string `json:"nextLink,omitempty"`
+}
+
+// VirtualNetworkRuleListResultIterator provides access to a complete listing of VirtualNetworkRule values.
+type VirtualNetworkRuleListResultIterator struct {
+ i int
+ page VirtualNetworkRuleListResultPage
+}
+
+// Next advances to the next value. If there was an error making
+// the request the iterator does not advance and the error is returned.
+func (iter *VirtualNetworkRuleListResultIterator) Next() error {
+ iter.i++
+ if iter.i < len(iter.page.Values()) {
+ return nil
+ }
+ err := iter.page.Next()
+ if err != nil {
+ iter.i--
+ return err
+ }
+ iter.i = 0
+ return nil
+}
+
+// NotDone returns true if the enumeration should be started or is not yet complete.
+func (iter VirtualNetworkRuleListResultIterator) NotDone() bool {
+ return iter.page.NotDone() && iter.i < len(iter.page.Values())
+}
+
+// Response returns the raw server response from the last page request.
+func (iter VirtualNetworkRuleListResultIterator) Response() VirtualNetworkRuleListResult {
+ return iter.page.Response()
+}
+
+// Value returns the current value or a zero-initialized value if the
+// iterator has advanced beyond the end of the collection.
+func (iter VirtualNetworkRuleListResultIterator) Value() VirtualNetworkRule {
+ if !iter.page.NotDone() {
+ return VirtualNetworkRule{}
+ }
+ return iter.page.Values()[iter.i]
+}
+
+// IsEmpty returns true if the ListResult contains no values.
+func (vnrlr VirtualNetworkRuleListResult) IsEmpty() bool {
+ return vnrlr.Value == nil || len(*vnrlr.Value) == 0
+}
+
+// virtualNetworkRuleListResultPreparer prepares a request to retrieve the next set of results.
+// It returns nil if no more results exist.
+func (vnrlr VirtualNetworkRuleListResult) virtualNetworkRuleListResultPreparer() (*http.Request, error) {
+ if vnrlr.NextLink == nil || len(to.String(vnrlr.NextLink)) < 1 {
+ return nil, nil
+ }
+ return autorest.Prepare(&http.Request{},
+ autorest.AsJSON(),
+ autorest.AsGet(),
+ autorest.WithBaseURL(to.String(vnrlr.NextLink)))
+}
+
+// VirtualNetworkRuleListResultPage contains a page of VirtualNetworkRule values.
+type VirtualNetworkRuleListResultPage struct {
+ fn func(VirtualNetworkRuleListResult) (VirtualNetworkRuleListResult, error)
+ vnrlr VirtualNetworkRuleListResult
+}
+
+// Next advances to the next page of values. If there was an error making
+// the request the page does not advance and the error is returned.
+func (page *VirtualNetworkRuleListResultPage) Next() error {
+ next, err := page.fn(page.vnrlr)
+ if err != nil {
+ return err
+ }
+ page.vnrlr = next
+ return nil
+}
+
+// NotDone returns true if the page enumeration should be started or is not yet complete.
+func (page VirtualNetworkRuleListResultPage) NotDone() bool {
+ return !page.vnrlr.IsEmpty()
+}
+
+// Response returns the raw server response from the last page request.
+func (page VirtualNetworkRuleListResultPage) Response() VirtualNetworkRuleListResult {
+ return page.vnrlr
+}
+
+// Values returns the slice of values for the current page or nil if there are no values.
+func (page VirtualNetworkRuleListResultPage) Values() []VirtualNetworkRule {
+ if page.vnrlr.IsEmpty() {
+ return nil
+ }
+ return *page.vnrlr.Value
+}
+
+// VirtualNetworkRuleProperties properties supplied to create or update VirtualNetworkRules
+type VirtualNetworkRuleProperties struct {
+ // VirtualNetworkSubnetID - Resource ID of Virtual Network Subnet
+ VirtualNetworkSubnetID *string `json:"virtualNetworkSubnetId,omitempty"`
+}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go
index 1bbefa9b..e8bcc473 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/namespaces.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -44,6 +45,16 @@ func NewNamespacesClientWithBaseURI(baseURI string, subscriptionID string) Names
// Parameters:
// parameters - parameters to check availability of the given namespace name
func (client NamespacesClient) CheckNameAvailabilityMethod(ctx context.Context, parameters CheckNameAvailability) (result CheckNameAvailabilityResult, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CheckNameAvailabilityMethod")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: parameters,
Constraints: []validation.Constraint{{Target: "parameters.Name", Name: validation.Null, Rule: true, Chain: nil}}}}); err != nil {
@@ -119,6 +130,16 @@ func (client NamespacesClient) CheckNameAvailabilityMethodResponder(resp *http.R
// namespaceName - the namespace name.
// parameters - parameters supplied to create a namespace resource.
func (client NamespacesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, parameters SBNamespace) (result NamespacesCreateOrUpdateFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -173,10 +194,6 @@ func (client NamespacesClient) CreateOrUpdateSender(req *http.Request) (future N
if err != nil {
return
}
- err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated, http.StatusAccepted))
- if err != nil {
- return
- }
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@@ -198,9 +215,19 @@ func (client NamespacesClient) CreateOrUpdateResponder(resp *http.Response) (res
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
// parameters - the shared access authorization rule.
func (client NamespacesClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters SBAuthorizationRule) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CreateOrUpdateAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -282,11 +309,111 @@ func (client NamespacesClient) CreateOrUpdateAuthorizationRuleResponder(resp *ht
return
}
+// CreateOrUpdateNetworkRuleSet create or update NetworkRuleSet for a Namespace.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// namespaceName - the namespace name
+// parameters - the Namespace IpFilterRule.
+func (client NamespacesClient) CreateOrUpdateNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string, parameters NetworkRuleSet) (result NetworkRuleSet, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.CreateOrUpdateNetworkRuleSet")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: namespaceName,
+ Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("servicebus.NamespacesClient", "CreateOrUpdateNetworkRuleSet", err.Error())
+ }
+
+ req, err := client.CreateOrUpdateNetworkRuleSetPreparer(ctx, resourceGroupName, namespaceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateNetworkRuleSet", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.CreateOrUpdateNetworkRuleSetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateNetworkRuleSet", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.CreateOrUpdateNetworkRuleSetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "CreateOrUpdateNetworkRuleSet", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// CreateOrUpdateNetworkRuleSetPreparer prepares the CreateOrUpdateNetworkRuleSet request.
+func (client NamespacesClient) CreateOrUpdateNetworkRuleSetPreparer(ctx context.Context, resourceGroupName string, namespaceName string, parameters NetworkRuleSet) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "namespaceName": autorest.Encode("path", namespaceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPut(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/networkRuleSets/default", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// CreateOrUpdateNetworkRuleSetSender sends the CreateOrUpdateNetworkRuleSet request. The method will close the
+// http.Response Body if it receives an error.
+func (client NamespacesClient) CreateOrUpdateNetworkRuleSetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req,
+ azure.DoRetryWithRegistration(client.Client))
+}
+
+// CreateOrUpdateNetworkRuleSetResponder handles the response to the CreateOrUpdateNetworkRuleSet request. The method always
+// closes the http.Response Body.
+func (client NamespacesClient) CreateOrUpdateNetworkRuleSetResponder(resp *http.Response) (result NetworkRuleSet, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// Delete deletes an existing namespace. This operation also removes all associated resources under the namespace.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client NamespacesClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string) (result NamespacesDeleteFuture, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response() != nil {
+ sc = result.Response().StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -342,10 +469,6 @@ func (client NamespacesClient) DeleteSender(req *http.Request) (future Namespace
if err != nil {
return
}
- err = autorest.Respond(resp, azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusAccepted, http.StatusNoContent))
- if err != nil {
- return
- }
future.Future, err = azure.NewFutureFromResponse(resp)
return
}
@@ -366,8 +489,18 @@ func (client NamespacesClient) DeleteResponder(resp *http.Response) (result auto
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client NamespacesClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.DeleteAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -448,6 +581,16 @@ func (client NamespacesClient) DeleteAuthorizationRuleResponder(resp *http.Respo
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client NamespacesClient) Get(ctx context.Context, resourceGroupName string, namespaceName string) (result SBNamespace, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -524,8 +667,18 @@ func (client NamespacesClient) GetResponder(resp *http.Response) (result SBNames
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client NamespacesClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.GetAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -602,8 +755,105 @@ func (client NamespacesClient) GetAuthorizationRuleResponder(resp *http.Response
return
}
+// GetNetworkRuleSet gets NetworkRuleSet for a Namespace.
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// namespaceName - the namespace name
+func (client NamespacesClient) GetNetworkRuleSet(ctx context.Context, resourceGroupName string, namespaceName string) (result NetworkRuleSet, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.GetNetworkRuleSet")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: namespaceName,
+ Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("servicebus.NamespacesClient", "GetNetworkRuleSet", err.Error())
+ }
+
+ req, err := client.GetNetworkRuleSetPreparer(ctx, resourceGroupName, namespaceName)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetNetworkRuleSet", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.GetNetworkRuleSetSender(req)
+ if err != nil {
+ result.Response = autorest.Response{Response: resp}
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetNetworkRuleSet", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.GetNetworkRuleSetResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "GetNetworkRuleSet", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// GetNetworkRuleSetPreparer prepares the GetNetworkRuleSet request.
+func (client NamespacesClient) GetNetworkRuleSetPreparer(ctx context.Context, resourceGroupName string, namespaceName string) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "namespaceName": autorest.Encode("path", namespaceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsGet(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/networkRuleSets/default", pathParameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// GetNetworkRuleSetSender sends the GetNetworkRuleSet request. The method will close the
+// http.Response Body if it receives an error.
+func (client NamespacesClient) GetNetworkRuleSetSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req,
+ azure.DoRetryWithRegistration(client.Client))
+}
+
+// GetNetworkRuleSetResponder handles the response to the GetNetworkRuleSet request. The method always
+// closes the http.Response Body.
+func (client NamespacesClient) GetNetworkRuleSetResponder(resp *http.Response) (result NetworkRuleSet, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByUnmarshallingJSON(&result),
+ autorest.ByClosing())
+ result.Response = autorest.Response{Response: resp}
+ return
+}
+
// List gets all the available namespaces within the subscription, irrespective of the resource groups.
func (client NamespacesClient) List(ctx context.Context) (result SBNamespaceListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.List")
+ defer func() {
+ sc := -1
+ if result.snlr.Response.Response != nil {
+ sc = result.snlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
@@ -666,8 +916,8 @@ func (client NamespacesClient) ListResponder(resp *http.Response) (result SBName
}
// listNextResults retrieves the next set of results, if any.
-func (client NamespacesClient) listNextResults(lastResults SBNamespaceListResult) (result SBNamespaceListResult, err error) {
- req, err := lastResults.sBNamespaceListResultPreparer()
+func (client NamespacesClient) listNextResults(ctx context.Context, lastResults SBNamespaceListResult) (result SBNamespaceListResult, err error) {
+ req, err := lastResults.sBNamespaceListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -688,6 +938,16 @@ func (client NamespacesClient) listNextResults(lastResults SBNamespaceListResult
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client NamespacesClient) ListComplete(ctx context.Context) (result SBNamespaceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx)
return
}
@@ -697,6 +957,16 @@ func (client NamespacesClient) ListComplete(ctx context.Context) (result SBNames
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
func (client NamespacesClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string) (result SBAuthorizationRuleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.sarlr.Response.Response != nil {
+ sc = result.sarlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -771,8 +1041,8 @@ func (client NamespacesClient) ListAuthorizationRulesResponder(resp *http.Respon
}
// listAuthorizationRulesNextResults retrieves the next set of results, if any.
-func (client NamespacesClient) listAuthorizationRulesNextResults(lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
- req, err := lastResults.sBAuthorizationRuleListResultPreparer()
+func (client NamespacesClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
+ req, err := lastResults.sBAuthorizationRuleListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request")
}
@@ -793,6 +1063,16 @@ func (client NamespacesClient) listAuthorizationRulesNextResults(lastResults SBA
// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required.
func (client NamespacesClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string) (result SBAuthorizationRuleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName)
return
}
@@ -801,6 +1081,16 @@ func (client NamespacesClient) ListAuthorizationRulesComplete(ctx context.Contex
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
func (client NamespacesClient) ListByResourceGroup(ctx context.Context, resourceGroupName string) (result SBNamespaceListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.snlr.Response.Response != nil {
+ sc = result.snlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -871,8 +1161,8 @@ func (client NamespacesClient) ListByResourceGroupResponder(resp *http.Response)
}
// listByResourceGroupNextResults retrieves the next set of results, if any.
-func (client NamespacesClient) listByResourceGroupNextResults(lastResults SBNamespaceListResult) (result SBNamespaceListResult, err error) {
- req, err := lastResults.sBNamespaceListResultPreparer()
+func (client NamespacesClient) listByResourceGroupNextResults(ctx context.Context, lastResults SBNamespaceListResult) (result SBNamespaceListResult, err error) {
+ req, err := lastResults.sBNamespaceListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "listByResourceGroupNextResults", nil, "Failure preparing next results request")
}
@@ -893,6 +1183,16 @@ func (client NamespacesClient) listByResourceGroupNextResults(lastResults SBName
// ListByResourceGroupComplete enumerates all values, automatically crossing page boundaries as required.
func (client NamespacesClient) ListByResourceGroupComplete(ctx context.Context, resourceGroupName string) (result SBNamespaceListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListByResourceGroup")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListByResourceGroup(ctx, resourceGroupName)
return
}
@@ -901,8 +1201,18 @@ func (client NamespacesClient) ListByResourceGroupComplete(ctx context.Context,
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client NamespacesClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -979,13 +1289,112 @@ func (client NamespacesClient) ListKeysResponder(resp *http.Response) (result Ac
return
}
+// Migrate this operation Migrate the given namespace to provided name type
+// Parameters:
+// resourceGroupName - name of the Resource group within the Azure subscription.
+// namespaceName - the namespace name
+// parameters - parameters supplied to migrate namespace type.
+func (client NamespacesClient) Migrate(ctx context.Context, resourceGroupName string, namespaceName string, parameters SBNamespaceMigrate) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Migrate")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
+ if err := validation.Validate([]validation.Validation{
+ {TargetValue: resourceGroupName,
+ Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
+ {Target: "resourceGroupName", Name: validation.MinLength, Rule: 1, Chain: nil}}},
+ {TargetValue: namespaceName,
+ Constraints: []validation.Constraint{{Target: "namespaceName", Name: validation.MaxLength, Rule: 50, Chain: nil},
+ {Target: "namespaceName", Name: validation.MinLength, Rule: 6, Chain: nil}}}}); err != nil {
+ return result, validation.NewError("servicebus.NamespacesClient", "Migrate", err.Error())
+ }
+
+ req, err := client.MigratePreparer(ctx, resourceGroupName, namespaceName, parameters)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Migrate", nil, "Failure preparing request")
+ return
+ }
+
+ resp, err := client.MigrateSender(req)
+ if err != nil {
+ result.Response = resp
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Migrate", resp, "Failure sending request")
+ return
+ }
+
+ result, err = client.MigrateResponder(resp)
+ if err != nil {
+ err = autorest.NewErrorWithError(err, "servicebus.NamespacesClient", "Migrate", resp, "Failure responding to request")
+ }
+
+ return
+}
+
+// MigratePreparer prepares the Migrate request.
+func (client NamespacesClient) MigratePreparer(ctx context.Context, resourceGroupName string, namespaceName string, parameters SBNamespaceMigrate) (*http.Request, error) {
+ pathParameters := map[string]interface{}{
+ "namespaceName": autorest.Encode("path", namespaceName),
+ "resourceGroupName": autorest.Encode("path", resourceGroupName),
+ "subscriptionId": autorest.Encode("path", client.SubscriptionID),
+ }
+
+ const APIVersion = "2017-04-01"
+ queryParameters := map[string]interface{}{
+ "api-version": APIVersion,
+ }
+
+ preparer := autorest.CreatePreparer(
+ autorest.AsContentType("application/json; charset=utf-8"),
+ autorest.AsPost(),
+ autorest.WithBaseURL(client.BaseURI),
+ autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ServiceBus/namespaces/{namespaceName}/migrate", pathParameters),
+ autorest.WithJSON(parameters),
+ autorest.WithQueryParameters(queryParameters))
+ return preparer.Prepare((&http.Request{}).WithContext(ctx))
+}
+
+// MigrateSender sends the Migrate request. The method will close the
+// http.Response Body if it receives an error.
+func (client NamespacesClient) MigrateSender(req *http.Request) (*http.Response, error) {
+ return autorest.SendWithSender(client, req,
+ azure.DoRetryWithRegistration(client.Client))
+}
+
+// MigrateResponder handles the response to the Migrate request. The method always
+// closes the http.Response Body.
+func (client NamespacesClient) MigrateResponder(resp *http.Response) (result autorest.Response, err error) {
+ err = autorest.Respond(
+ resp,
+ client.ByInspecting(),
+ azure.WithErrorUnlessStatusCode(http.StatusOK),
+ autorest.ByClosing())
+ result.Response = resp
+ return
+}
+
// RegenerateKeys regenerates the primary or secondary connection strings for the namespace.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
// parameters - parameters supplied to regenerate the authorization rule.
func (client NamespacesClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.RegenerateKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -1071,6 +1480,16 @@ func (client NamespacesClient) RegenerateKeysResponder(resp *http.Response) (res
// namespaceName - the namespace name
// parameters - parameters supplied to update a namespace resource.
func (client NamespacesClient) Update(ctx context.Context, resourceGroupName string, namespaceName string, parameters SBNamespaceUpdateParameters) (result SBNamespace, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/NamespacesClient.Update")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/operations.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/operations.go
index 3de1195f..a0de46f5 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/operations.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/operations.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -41,6 +42,16 @@ func NewOperationsClientWithBaseURI(baseURI string, subscriptionID string) Opera
// List lists all of the available ServiceBus REST API operations.
func (client OperationsClient) List(ctx context.Context) (result OperationListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.olr.Response.Response != nil {
+ sc = result.olr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
@@ -99,8 +110,8 @@ func (client OperationsClient) ListResponder(resp *http.Response) (result Operat
}
// listNextResults retrieves the next set of results, if any.
-func (client OperationsClient) listNextResults(lastResults OperationListResult) (result OperationListResult, err error) {
- req, err := lastResults.operationListResultPreparer()
+func (client OperationsClient) listNextResults(ctx context.Context, lastResults OperationListResult) (result OperationListResult, err error) {
+ req, err := lastResults.operationListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.OperationsClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -121,6 +132,16 @@ func (client OperationsClient) listNextResults(lastResults OperationListResult)
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client OperationsClient) ListComplete(ctx context.Context) (result OperationListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/OperationsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/premiummessagingregions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/premiummessagingregions.go
index d7c02676..8a1e4b12 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/premiummessagingregions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/premiummessagingregions.go
@@ -21,6 +21,7 @@ import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -41,6 +42,16 @@ func NewPremiumMessagingRegionsClientWithBaseURI(baseURI string, subscriptionID
// List gets the available premium messaging regions for servicebus
func (client PremiumMessagingRegionsClient) List(ctx context.Context) (result PremiumMessagingRegionsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PremiumMessagingRegionsClient.List")
+ defer func() {
+ sc := -1
+ if result.pmrlr.Response.Response != nil {
+ sc = result.pmrlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx)
if err != nil {
@@ -103,8 +114,8 @@ func (client PremiumMessagingRegionsClient) ListResponder(resp *http.Response) (
}
// listNextResults retrieves the next set of results, if any.
-func (client PremiumMessagingRegionsClient) listNextResults(lastResults PremiumMessagingRegionsListResult) (result PremiumMessagingRegionsListResult, err error) {
- req, err := lastResults.premiumMessagingRegionsListResultPreparer()
+func (client PremiumMessagingRegionsClient) listNextResults(ctx context.Context, lastResults PremiumMessagingRegionsListResult) (result PremiumMessagingRegionsListResult, err error) {
+ req, err := lastResults.premiumMessagingRegionsListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.PremiumMessagingRegionsClient", "listNextResults", nil, "Failure preparing next results request")
}
@@ -125,6 +136,16 @@ func (client PremiumMessagingRegionsClient) listNextResults(lastResults PremiumM
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client PremiumMessagingRegionsClient) ListComplete(ctx context.Context) (result PremiumMessagingRegionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/PremiumMessagingRegionsClient.List")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.List(ctx)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go
index 78843ee9..983277f0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/queues.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -47,6 +48,16 @@ func NewQueuesClientWithBaseURI(baseURI string, subscriptionID string) QueuesCli
// queueName - the queue name.
// parameters - parameters supplied to create or update a queue resource.
func (client QueuesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, parameters SBQueue) (result SBQueue, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -129,9 +140,19 @@ func (client QueuesClient) CreateOrUpdateResponder(resp *http.Response) (result
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// queueName - the queue name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
// parameters - the shared access authorization rule.
func (client QueuesClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters SBAuthorizationRule) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.CreateOrUpdateAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -222,6 +243,16 @@ func (client QueuesClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.R
// namespaceName - the namespace name
// queueName - the queue name.
func (client QueuesClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, queueName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -301,8 +332,18 @@ func (client QueuesClient) DeleteResponder(resp *http.Response) (result autorest
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// queueName - the queue name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client QueuesClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.DeleteAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -387,6 +428,16 @@ func (client QueuesClient) DeleteAuthorizationRuleResponder(resp *http.Response)
// namespaceName - the namespace name
// queueName - the queue name.
func (client QueuesClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, queueName string) (result SBQueue, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -467,8 +518,18 @@ func (client QueuesClient) GetResponder(resp *http.Response) (result SBQueue, er
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// queueName - the queue name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client QueuesClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.GetAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -554,6 +615,16 @@ func (client QueuesClient) GetAuthorizationRuleResponder(resp *http.Response) (r
// namespaceName - the namespace name
// queueName - the queue name.
func (client QueuesClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string, queueName string) (result SBAuthorizationRuleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.sarlr.Response.Response != nil {
+ sc = result.sarlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -631,8 +702,8 @@ func (client QueuesClient) ListAuthorizationRulesResponder(resp *http.Response)
}
// listAuthorizationRulesNextResults retrieves the next set of results, if any.
-func (client QueuesClient) listAuthorizationRulesNextResults(lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
- req, err := lastResults.sBAuthorizationRuleListResultPreparer()
+func (client QueuesClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
+ req, err := lastResults.sBAuthorizationRuleListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request")
}
@@ -653,6 +724,16 @@ func (client QueuesClient) listAuthorizationRulesNextResults(lastResults SBAutho
// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required.
func (client QueuesClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string, queueName string) (result SBAuthorizationRuleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName, queueName)
return
}
@@ -666,6 +747,16 @@ func (client QueuesClient) ListAuthorizationRulesComplete(ctx context.Context, r
// starting point to use for subsequent calls.
// top - may be used to limit the number of results to the most recent N usageDetails.
func (client QueuesClient) ListByNamespace(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (result SBQueueListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.ListByNamespace")
+ defer func() {
+ sc := -1
+ if result.sqlr.Response.Response != nil {
+ sc = result.sqlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -756,8 +847,8 @@ func (client QueuesClient) ListByNamespaceResponder(resp *http.Response) (result
}
// listByNamespaceNextResults retrieves the next set of results, if any.
-func (client QueuesClient) listByNamespaceNextResults(lastResults SBQueueListResult) (result SBQueueListResult, err error) {
- req, err := lastResults.sBQueueListResultPreparer()
+func (client QueuesClient) listByNamespaceNextResults(ctx context.Context, lastResults SBQueueListResult) (result SBQueueListResult, err error) {
+ req, err := lastResults.sBQueueListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.QueuesClient", "listByNamespaceNextResults", nil, "Failure preparing next results request")
}
@@ -778,6 +869,16 @@ func (client QueuesClient) listByNamespaceNextResults(lastResults SBQueueListRes
// ListByNamespaceComplete enumerates all values, automatically crossing page boundaries as required.
func (client QueuesClient) ListByNamespaceComplete(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (result SBQueueListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.ListByNamespace")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListByNamespace(ctx, resourceGroupName, namespaceName, skip, top)
return
}
@@ -787,8 +888,18 @@ func (client QueuesClient) ListByNamespaceComplete(ctx context.Context, resource
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// queueName - the queue name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client QueuesClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -873,9 +984,19 @@ func (client QueuesClient) ListKeysResponder(resp *http.Response) (result Access
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// queueName - the queue name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
// parameters - parameters supplied to regenerate the authorization rule.
func (client QueuesClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, queueName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/QueuesClient.RegenerateKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/regions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/regions.go
index ac86b2c6..a1fac2ae 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/regions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/regions.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -44,6 +45,16 @@ func NewRegionsClientWithBaseURI(baseURI string, subscriptionID string) RegionsC
// Parameters:
// sku - the sku type.
func (client RegionsClient) ListBySku(ctx context.Context, sku string) (result PremiumMessagingRegionsListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RegionsClient.ListBySku")
+ defer func() {
+ sc := -1
+ if result.pmrlr.Response.Response != nil {
+ sc = result.pmrlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: sku,
Constraints: []validation.Constraint{{Target: "sku", Name: validation.MaxLength, Rule: 50, Chain: nil},
@@ -114,8 +125,8 @@ func (client RegionsClient) ListBySkuResponder(resp *http.Response) (result Prem
}
// listBySkuNextResults retrieves the next set of results, if any.
-func (client RegionsClient) listBySkuNextResults(lastResults PremiumMessagingRegionsListResult) (result PremiumMessagingRegionsListResult, err error) {
- req, err := lastResults.premiumMessagingRegionsListResultPreparer()
+func (client RegionsClient) listBySkuNextResults(ctx context.Context, lastResults PremiumMessagingRegionsListResult) (result PremiumMessagingRegionsListResult, err error) {
+ req, err := lastResults.premiumMessagingRegionsListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.RegionsClient", "listBySkuNextResults", nil, "Failure preparing next results request")
}
@@ -136,6 +147,16 @@ func (client RegionsClient) listBySkuNextResults(lastResults PremiumMessagingReg
// ListBySkuComplete enumerates all values, automatically crossing page boundaries as required.
func (client RegionsClient) ListBySkuComplete(ctx context.Context, sku string) (result PremiumMessagingRegionsListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RegionsClient.ListBySku")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListBySku(ctx, sku)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/rules.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/rules.go
index 4cbe705f..9d0725cf 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/rules.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/rules.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -49,6 +50,16 @@ func NewRulesClientWithBaseURI(baseURI string, subscriptionID string) RulesClien
// ruleName - the rule name.
// parameters - parameters supplied to create a rule.
func (client RulesClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string, ruleName string, parameters Rule) (result Rule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RulesClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -142,6 +153,16 @@ func (client RulesClient) CreateOrUpdateResponder(resp *http.Response) (result R
// subscriptionName - the subscription name.
// ruleName - the rule name.
func (client RulesClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string, ruleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RulesClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -232,6 +253,16 @@ func (client RulesClient) DeleteResponder(resp *http.Response) (result autorest.
// subscriptionName - the subscription name.
// ruleName - the rule name.
func (client RulesClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string, ruleName string) (result Rule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RulesClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -326,6 +357,16 @@ func (client RulesClient) GetResponder(resp *http.Response) (result Rule, err er
// starting point to use for subsequent calls.
// top - may be used to limit the number of results to the most recent N usageDetails.
func (client RulesClient) ListBySubscriptions(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string, skip *int32, top *int32) (result RuleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RulesClient.ListBySubscriptions")
+ defer func() {
+ sc := -1
+ if result.rlr.Response.Response != nil {
+ sc = result.rlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -423,8 +464,8 @@ func (client RulesClient) ListBySubscriptionsResponder(resp *http.Response) (res
}
// listBySubscriptionsNextResults retrieves the next set of results, if any.
-func (client RulesClient) listBySubscriptionsNextResults(lastResults RuleListResult) (result RuleListResult, err error) {
- req, err := lastResults.ruleListResultPreparer()
+func (client RulesClient) listBySubscriptionsNextResults(ctx context.Context, lastResults RuleListResult) (result RuleListResult, err error) {
+ req, err := lastResults.ruleListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.RulesClient", "listBySubscriptionsNextResults", nil, "Failure preparing next results request")
}
@@ -445,6 +486,16 @@ func (client RulesClient) listBySubscriptionsNextResults(lastResults RuleListRes
// ListBySubscriptionsComplete enumerates all values, automatically crossing page boundaries as required.
func (client RulesClient) ListBySubscriptionsComplete(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string, skip *int32, top *int32) (result RuleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/RulesClient.ListBySubscriptions")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListBySubscriptions(ctx, resourceGroupName, namespaceName, topicName, subscriptionName, skip, top)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/subscriptions.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/subscriptions.go
index 1a087cb8..64af03c0 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/subscriptions.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/subscriptions.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -48,6 +49,16 @@ func NewSubscriptionsClientWithBaseURI(baseURI string, subscriptionID string) Su
// subscriptionName - the subscription name.
// parameters - parameters supplied to create a subscription resource.
func (client SubscriptionsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string, parameters SBSubscription) (result SBSubscription, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -136,6 +147,16 @@ func (client SubscriptionsClient) CreateOrUpdateResponder(resp *http.Response) (
// topicName - the topic name.
// subscriptionName - the subscription name.
func (client SubscriptionsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -221,6 +242,16 @@ func (client SubscriptionsClient) DeleteResponder(resp *http.Response) (result a
// topicName - the topic name.
// subscriptionName - the subscription name.
func (client SubscriptionsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, subscriptionName string) (result SBSubscription, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -310,6 +341,16 @@ func (client SubscriptionsClient) GetResponder(resp *http.Response) (result SBSu
// starting point to use for subsequent calls.
// top - may be used to limit the number of results to the most recent N usageDetails.
func (client SubscriptionsClient) ListByTopic(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, skip *int32, top *int32) (result SBSubscriptionListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByTopic")
+ defer func() {
+ sc := -1
+ if result.sslr.Response.Response != nil {
+ sc = result.sslr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -403,8 +444,8 @@ func (client SubscriptionsClient) ListByTopicResponder(resp *http.Response) (res
}
// listByTopicNextResults retrieves the next set of results, if any.
-func (client SubscriptionsClient) listByTopicNextResults(lastResults SBSubscriptionListResult) (result SBSubscriptionListResult, err error) {
- req, err := lastResults.sBSubscriptionListResultPreparer()
+func (client SubscriptionsClient) listByTopicNextResults(ctx context.Context, lastResults SBSubscriptionListResult) (result SBSubscriptionListResult, err error) {
+ req, err := lastResults.sBSubscriptionListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.SubscriptionsClient", "listByTopicNextResults", nil, "Failure preparing next results request")
}
@@ -425,6 +466,16 @@ func (client SubscriptionsClient) listByTopicNextResults(lastResults SBSubscript
// ListByTopicComplete enumerates all values, automatically crossing page boundaries as required.
func (client SubscriptionsClient) ListByTopicComplete(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, skip *int32, top *int32) (result SBSubscriptionListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/SubscriptionsClient.ListByTopic")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListByTopic(ctx, resourceGroupName, namespaceName, topicName, skip, top)
return
}
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go
index ce8b0c1a..d8838672 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/services/servicebus/mgmt/2017-04-01/servicebus/topics.go
@@ -22,6 +22,7 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/validation"
+ "github.com/Azure/go-autorest/tracing"
"net/http"
)
@@ -47,6 +48,16 @@ func NewTopicsClientWithBaseURI(baseURI string, subscriptionID string) TopicsCli
// topicName - the topic name.
// parameters - parameters supplied to create a topic resource.
func (client TopicsClient) CreateOrUpdate(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, parameters SBTopic) (result SBTopic, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.CreateOrUpdate")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -124,14 +135,24 @@ func (client TopicsClient) CreateOrUpdateResponder(resp *http.Response) (result
return
}
-// CreateOrUpdateAuthorizationRule creates an authorizatio rule for the specified topic.
+// CreateOrUpdateAuthorizationRule creates an authorization rule for the specified topic.
// Parameters:
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// topicName - the topic name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
// parameters - the shared access authorization rule.
func (client TopicsClient) CreateOrUpdateAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters SBAuthorizationRule) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.CreateOrUpdateAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -222,6 +243,16 @@ func (client TopicsClient) CreateOrUpdateAuthorizationRuleResponder(resp *http.R
// namespaceName - the namespace name
// topicName - the topic name.
func (client TopicsClient) Delete(ctx context.Context, resourceGroupName string, namespaceName string, topicName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.Delete")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -301,8 +332,18 @@ func (client TopicsClient) DeleteResponder(resp *http.Response) (result autorest
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// topicName - the topic name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client TopicsClient) DeleteAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result autorest.Response, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.DeleteAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response != nil {
+ sc = result.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -387,6 +428,16 @@ func (client TopicsClient) DeleteAuthorizationRuleResponder(resp *http.Response)
// namespaceName - the namespace name
// topicName - the topic name.
func (client TopicsClient) Get(ctx context.Context, resourceGroupName string, namespaceName string, topicName string) (result SBTopic, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.Get")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -467,8 +518,18 @@ func (client TopicsClient) GetResponder(resp *http.Response) (result SBTopic, er
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// topicName - the topic name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client TopicsClient) GetAuthorizationRule(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result SBAuthorizationRule, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.GetAuthorizationRule")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -554,6 +615,16 @@ func (client TopicsClient) GetAuthorizationRuleResponder(resp *http.Response) (r
// namespaceName - the namespace name
// topicName - the topic name.
func (client TopicsClient) ListAuthorizationRules(ctx context.Context, resourceGroupName string, namespaceName string, topicName string) (result SBAuthorizationRuleListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.sarlr.Response.Response != nil {
+ sc = result.sarlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -631,8 +702,8 @@ func (client TopicsClient) ListAuthorizationRulesResponder(resp *http.Response)
}
// listAuthorizationRulesNextResults retrieves the next set of results, if any.
-func (client TopicsClient) listAuthorizationRulesNextResults(lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
- req, err := lastResults.sBAuthorizationRuleListResultPreparer()
+func (client TopicsClient) listAuthorizationRulesNextResults(ctx context.Context, lastResults SBAuthorizationRuleListResult) (result SBAuthorizationRuleListResult, err error) {
+ req, err := lastResults.sBAuthorizationRuleListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "listAuthorizationRulesNextResults", nil, "Failure preparing next results request")
}
@@ -653,6 +724,16 @@ func (client TopicsClient) listAuthorizationRulesNextResults(lastResults SBAutho
// ListAuthorizationRulesComplete enumerates all values, automatically crossing page boundaries as required.
func (client TopicsClient) ListAuthorizationRulesComplete(ctx context.Context, resourceGroupName string, namespaceName string, topicName string) (result SBAuthorizationRuleListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.ListAuthorizationRules")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListAuthorizationRules(ctx, resourceGroupName, namespaceName, topicName)
return
}
@@ -666,6 +747,16 @@ func (client TopicsClient) ListAuthorizationRulesComplete(ctx context.Context, r
// starting point to use for subsequent calls.
// top - may be used to limit the number of results to the most recent N usageDetails.
func (client TopicsClient) ListByNamespace(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (result SBTopicListResultPage, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.ListByNamespace")
+ defer func() {
+ sc := -1
+ if result.stlr.Response.Response != nil {
+ sc = result.stlr.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -756,8 +847,8 @@ func (client TopicsClient) ListByNamespaceResponder(resp *http.Response) (result
}
// listByNamespaceNextResults retrieves the next set of results, if any.
-func (client TopicsClient) listByNamespaceNextResults(lastResults SBTopicListResult) (result SBTopicListResult, err error) {
- req, err := lastResults.sBTopicListResultPreparer()
+func (client TopicsClient) listByNamespaceNextResults(ctx context.Context, lastResults SBTopicListResult) (result SBTopicListResult, err error) {
+ req, err := lastResults.sBTopicListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "servicebus.TopicsClient", "listByNamespaceNextResults", nil, "Failure preparing next results request")
}
@@ -778,6 +869,16 @@ func (client TopicsClient) listByNamespaceNextResults(lastResults SBTopicListRes
// ListByNamespaceComplete enumerates all values, automatically crossing page boundaries as required.
func (client TopicsClient) ListByNamespaceComplete(ctx context.Context, resourceGroupName string, namespaceName string, skip *int32, top *int32) (result SBTopicListResultIterator, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.ListByNamespace")
+ defer func() {
+ sc := -1
+ if result.Response().Response.Response != nil {
+ sc = result.page.Response().Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
result.page, err = client.ListByNamespace(ctx, resourceGroupName, namespaceName, skip, top)
return
}
@@ -787,8 +888,18 @@ func (client TopicsClient) ListByNamespaceComplete(ctx context.Context, resource
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// topicName - the topic name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
func (client TopicsClient) ListKeys(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.ListKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
@@ -873,9 +984,19 @@ func (client TopicsClient) ListKeysResponder(resp *http.Response) (result Access
// resourceGroupName - name of the Resource group within the Azure subscription.
// namespaceName - the namespace name
// topicName - the topic name.
-// authorizationRuleName - the authorizationrule name.
+// authorizationRuleName - the authorization rule name.
// parameters - parameters supplied to regenerate the authorization rule.
func (client TopicsClient) RegenerateKeys(ctx context.Context, resourceGroupName string, namespaceName string, topicName string, authorizationRuleName string, parameters RegenerateAccessKeyParameters) (result AccessKeys, err error) {
+ if tracing.IsEnabled() {
+ ctx = tracing.StartSpan(ctx, fqdn+"/TopicsClient.RegenerateKeys")
+ defer func() {
+ sc := -1
+ if result.Response.Response != nil {
+ sc = result.Response.Response.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ }
if err := validation.Validate([]validation.Validation{
{TargetValue: resourceGroupName,
Constraints: []validation.Constraint{{Target: "resourceGroupName", Name: validation.MaxLength, Rule: 90, Chain: nil},
diff --git a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
index e9aa658b..6ae14a5f 100644
--- a/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
+++ b/vendor/github.com/Azure/azure-sdk-for-go/version/version.go
@@ -18,4 +18,4 @@ package version
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// Number contains the semantic version of this SDK.
-const Number = "v21.0.0"
+const Number = "v30.1.0"
diff --git a/vendor/github.com/Azure/azure-service-bus-go/.gitignore b/vendor/github.com/Azure/azure-service-bus-go/.gitignore
new file mode 100644
index 00000000..a12a3eed
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/.gitignore
@@ -0,0 +1,29 @@
+# Binaries for programs and plugins
+*.exe
+*.dll
+*.so
+*.dylib
+
+# Project-local configuration
+.env
+
+# Test Infrastructure
+terraform.tfvars
+*.auto.tfvars
+*.tfstate
+*.tfstate.backup
+.terraform/
+.terraform.tfstate.lock.info
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
+.glide/
+
+.idea
+
+vendor/
diff --git a/vendor/github.com/Azure/azure-service-bus-go/.travis.yml b/vendor/github.com/Azure/azure-service-bus-go/.travis.yml
new file mode 100644
index 00000000..b5505a52
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/.travis.yml
@@ -0,0 +1,40 @@
+language: go
+sudo: false
+go:
+ - 1.x
+ - 1.11.x
+
+before_install:
+- cd ${TRAVIS_HOME}
+- go get github.com/mattn/goveralls
+- go get golang.org/x/tools/cmd/cover
+- go get github.com/fzipp/gocyclo
+- go get golang.org/x/lint/golint
+- cd ${TRAVIS_BUILD_DIR}
+
+jobs:
+ include:
+ - stage: integration tests
+ if: type IN (push, cron)
+ env:
+ - secure: S2xZc20LIhEis1gQXTrQID/oezgnpdzEHOfCRAbGsFmnsoyuwoRH4gGPM6Qso9JkJo5yf3qCWzp+yKfH/toK2JSLnU6kNlQeNfc0IJkWXgUgatrBSm1NY9nW56h4J46kFquE3fxgfbVIdZk1RGGzXQksnaW0CvK0+/54QYiyTjwn9CQR5H8pzODgmFeEMsoEqGpTFMWAUE9YlXFvkVFnttNZUeOBfUPrPPvDDg8Q0fRTbH+rDNMXFdMdW/0nmQrCMlEv9EL+wVsTZKiMCXk8pAzx37PNYHZfgibYgvf2QBqN9otTqyP92ZdprweEAMk4lexQL1kvU7k//SRRSL2hxTOxCTXGyZBcLtb9n5Llax/FBxHMrxFDDSQQd4dYMfq1BzHyY0zwJeJ9nDrIxqQEx4qUTRBH577C3PZReERIpd+r5KkDubmkLPItc9t+cAZCBSSbYq+9XLIVnp3Uqx+6bWrfamMHFya1uvn8oKnoyCuMGwyo65kITACFovxu5vGd04NXRi7Pzsq+9kRuZIKulgp6w1tPgj38kanKmHOjhZhhX0crYMnWiUY0qrQTAP1bEk4bh0AhZ/aDb+RoX1fwK5xufkS3RNQyMn35bkFAeXy7eyGKijpGQG1p0J9/pfIqmxMPZsR9VOjQP3Y5vsROFrlZ54xS9cE8s9E8HZkQFBE=
+ - secure: HP6RES1wZGT1+W80mNLJQbj41BfYpI0pCCkJViu61opSetzr+1uzvyzt6J4STl30p9EMgANIp/rubYgEri0qRroqZxIm4RyNJqlKN7EiTh6VOAbCkL5vsWY63U+x6OJhoZSBTwGrQO4yH/nF2+39ED3Fy5m4I5qWyS+W3adEAew/0OweLAQkaSCPvEvYiEZwliWlfzZD7yVZEj2cGzRcvWG0CjzabfCmqvldImfZYU9ZfPNf9SBO6fiu8S8/UBp/fM/qS+Sc6Do3QlZF55Tll+hhbbbIaxWKkeAbyMtmmVTD+b5Mu8+GY8KmQt++ZzvVaO1sAUlazFYkaPLBkHK2pEHxcxK1H9T0nr2kE8Lzo+Ae+gPH5Sne9bEbsc9J1rG67U0GPs8aUSKgJo122SlY+IUWGCMxaLCP6lggkoRqg/1yXQOJNrztjNezqzzaxR2Vln6rXIPLCxrFrMoxjhS0MiEyL2JP/tiUKTSV4EN3xBHH9HUhobUkOs3/qnzl1josRBNiST2MGvfaq5AJJFeAaxbKDHeTBBxxSds+SnM0NKpQBcRUUeYwlVY0ecUD0BK8C5MvKqdJm+uI8m5RfqEzQ+cbJRl1PtRfrVr4Dy+mUibDxGspU4pMTxSJhOASOxAwRMQ7Q1/hd++7CiV28xmPAj9w2trBTJ0VPALooxnN2OU=
+ - secure: eH38UIvLw2kDpvRuT6QkqB4GkJA0df3/Tba6Qy7HluLgIfsJYeq+xgok4oqx1EQlbv4UF5GmeLd5/3HTwB0ljh9XJjGKwQ9lZT5FyuCoM0ovPf+yX+KemhLUvgn7kyAOErvdc+vgzcuw8cAVy2HidVY3lCVEhwvJLimhq4srISAfJu0rtAQTFlzX1fIaDY5zB5tkE2u4VSBqJyLSQgzol7fYV3FGwzByTSmTcF/taxgW3wRtlFBD6v2rMYK+jDrCEYhmS6sy5povvEUwbNX/ouGsmA/2kyzh9NnbFiva1RLPytDCHjf7cJ1blFYBlZJHXKC2/UXwuWtQWlwGXh6ccziLmqn5W13k120hKHOhLxcU82so5eAUMp6mGbxhv4WUhUUfjUmLJDjtzjsTeuQMsamZE1wTbtED0Hj4tr8fXV9d/+IhBksK2WfSm/TjHKMja+aWxZMXJ+AGYsiyetqVc3qtXhWo3o/errIF2/wl6Z40NFMizzzSbT5VS8Xn1/eS/YtVhg99GLHL6a7kXS/sUsK7kedySSmaOoTNkwoKEGMU8egV4P6PqlR0MM8oR3nCZedew1CRkEN/JHKM2ZV+Wp6re9o345rguIhVSlZNP/DEVQ38KuQ9x6cDKFziQaT1LHg743tAGZGlRUfXeN7p9l5xRvtMavYt5zFHG/74d1Y=
+ - secure: U5U8AaP0O/SQdWOfhpGsSCrTxGVi8g+VtyNILy37PEoWgG8dm36XUw7k213m4G6QjZJjhDTsfDkMa2Lfb9DF1yitI2sgECwMnn22XWoPvZUS8JLSbsnMe4kIfZhOqCXbD/SQdyX/oL/5hjJHNPkfJgPpUzfsKCMMrzd4NV6EjxvVuKs8cBecIMt3lIBeOnGEBfMUGQz/gfKXwZ8rMWmX752oVX4bQwQOEMs+stTeESfn2l11olqDyWlzy9Roqz/FEVPeq6d7RTVnJXcwP8BPnc6OM0UpZ2OZbFbVUfvlHywvfq8x31RQpmOidG2TrbbC8XVcEbDLDP78kyR4JgCr+aQz8FChI+aRmpVbNbKdNEHii6XArnygMuzHBqypZIDjDj4zAnODjFmTTuXRvV22691khbdryTDE5nlpilv8icq7HlSWIAdnx2gq9AqpOPYW/5/v1O2A7FSn4wFXKogx37azfDiKSMUIcxrn7rnPAW/M5JTec/k3aVrezLwegzrGOIv4UzLwDSCfd2AjBvAHdy2WZlx71I/awWl3BmSRJA23JaN84+1DkNxUAuRsyES5FOK440c3LZblLs62AThSG5Y1FNufsuRy2WRZV/kjKypuNl5drj2IsmF2+xA7bUmijRobMaq2CwFKXh9mnxmRtCGRnn2sxYibf+fgDn0aUy0=
+ script:
+ - curl -sLo /tmp/terraform.zip https://releases.hashicorp.com/terraform/0.12.4/terraform_0.12.4_linux_amd64.zip
+ - unzip /tmp/terraform.zip -d /tmp
+ - mkdir -p ~/bin
+ - mv /tmp/terraform ~/bin
+ - export PATH="~/bin:$PATH"
+ - export ARM_SUBSCRIPTION_ID=$(echo $AZURE_SUBSCRIPTION_ID)
+ - export ARM_CLIENT_ID=$(echo $AZURE_CLIENT_ID)
+ - export ARM_CLIENT_SECRET=$(echo $AZURE_CLIENT_SECRET)
+ - export ARM_TENANT_ID=$(echo $AZURE_TENANT_ID)
+ - export GO111MODULE=on
+ - make test-cover
+ - goveralls -coverprofile=cover.out -service=travis-ci
+ - make destroy-sb
+script:
+ - export GO111MODULE=on
+ - make
diff --git a/vendor/github.com/Azure/azure-service-bus-go/CONTRIBUTING.md b/vendor/github.com/Azure/azure-service-bus-go/CONTRIBUTING.md
new file mode 100644
index 00000000..77410a63
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/CONTRIBUTING.md
@@ -0,0 +1,87 @@
+# Contributing
+
+## Getting Started
+
+First of all, welcome. Thank you for your interest in furthering Go support for Azure Service Bus.
+
+### Workstation Requirements
+
+The following programs should be installed on your machine before you begin developing.
+
+> Note: Adhering to the linters below is enforced in CI. It is not required to have the tools locally, but contributors
+are expected to fix those issues found in CI.
+
+| Tool | Necessary | Description |
+| :---------------------------------------------------: | :-----: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| [go](https://godoc.org) | Required |This is a Go project, and as such you should have Go installed on your machine. We use modules for package management, so we recommend at least using go1.11. However, you may also use go1.9.7+ or go1.10.3+. |
+| [git](https://git-scm.com) | Required |azure-service-bus-go uses Git as its source control management solution. |
+| [az](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli?view=azure-cli-latest) | Optional | The Azure CLI is used for its ability to authenticate against Azure. Tests themselves only need a connection string and other metadata about the Service Bus namespace, but tooling to automatically setup the necessary infrastructure to run the tests may depend on the Azure CLI. |
+| [terraform](https://terraform.io) | Optional | `terraform` is used to help provision the Azure infrastructure needed to run our tests both in CI and on your local machine. If you have already provisioned a Service Bus Namespace, and created the necessary Service Bus Queues, you do not need terraform. |
+| [golint](https://godoc.org/golang.org/x/lint/golint) | Optional |`golint` is a linter that finds basic stylistic mistakes in Go programs. |
+| [gocyclo](https://github.com/fzipp/gocyclo) | Optional |`gocyclo` checks for programmatic complexity, to ensure code readability. |
+| [megacheck](https://honnef.co/go/tools/cmd/megacheck) | Optional | `megacheck` is a linter that checks for a broader set of errors than `go vet` or `golint`. |
+
+#### Editors
+
+Feel free to use your editor of choice for modifying this project! We use both both [VS Code](https://code.visualstudio.com)
+and [Goland](https://www.jetbrains.com/go/). Whichever editor you choose, please do not commit any project configuration
+files such as the contents of the `.vscode/` or `.idea/` directories.
+
+### License Agreement
+
+In order for us to accept your contribution, you must have signed the the [Microsoft Open Source Contribution License
+Agreement](https://cla.opensource.microsoft.com/Azure/azure-service-bus-go). It only takes a minute, and is attached to
+your GitHub account. Sign once and commit to any Microsoft Open Source Project.
+
+## Running Tests
+
+1. Ensure that you have an App Registration (Service Principal) with a Key setup with access to your subscription.
+ - [Azure AAD Application Documenation](https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-create-service-principal-portal)
+ - [HashiCorp Azure Service Principal Documentation](https://www.terraform.io/docs/providers/azurerm/authenticating_via_service_principal.html)
+1. Set the following environment variables:
+
+ | Name | Contents |
+ | :----------------------------: | :------------------------------------------------------------------------------------------------------------ |
+ | SERVICEBUS_CONNECTION_STRING | The escaped connection string associated with the Service Bus namespace that should be targeted. |
+ | AZURE_CLIENT_ID | The Application ID of the App Registration (i.e. Service Principal) to be used to create test infrastructure. |
+ | AZURE_CLIENT_SECRET | The Key associated with the App Registration to be used to create test infrastructure. |
+ | AZURE_SUBSCRIPTION_ID | The Azure Subscription to be used to run your tests. |
+ | AZURE_TENANT_ID | The UUID used to identify the tenant your Azure Subscription belongs to. |
+ | TEST_SERVICEBUS_RESOURCE_GROUP | The Azure Resource Group that holds the infrastructure needed to run the tests. |
+ | TEST_SERVICEBUS_LOCATION | The Azure Region containing your resource group. (e.g. "eastus", "westus2", etc.) |
+1. Authenticate using the CLI byt running the command `az login`.
+ > Note: Alternatively, set environment variables `ARM_CLIENT_ID`, `ARM_CLIENT_SECRET`, `ARM_TENANT_ID`, and `ARM_SUBSCRIPTION_ID` equal to their AZURE counterparts.
+1. Run `terraform apply` to make sure that all of the infrastructure needed to run the tests is available.
+ > Note: you can save values that it asks you for by defining them in a [file named `terraform.tfvars`](https://www.terraform.io/intro/getting-started/variables.html).
+1. Run the tests by executing `go test` from the repository's root directory.
+
+
+
+#### Linux + MacOS
+
+Running the command `make test` will automatically run all linting rules, terraform, and `go test` for you.
+
+
+## Filing Issues
+
+If you feel that you've found a way to improve Azure Service Bus itself, or with this library, feel free to open an
+issue here in GitHub. We'll see that it gets into the hands of the appropriate people, whomever that is.
+
+### Bugs
+
+When filing an issue to bring awareness to a bug, please provide the following information:
+- The OS and Go version you are using. i.e. the output of running `go version`.
+- The version of Azure-Service-Bus-Go you are using.
+
+It also significantly speeds things up if you can provide the minimum amount of code it takes to reproduce the bug in
+the form of a [GitHub Gist](https://gist.github.com) or [Go Playground](https://play.golang.org) snippet.
+
+### Feature Requests
+
+For expanded capabilities, please describe what you'd like to see and whom you believe would benefit.
+
+## Reference
+
+- [Clemens Vaster explains AMQP 1.0 - youtube.com](https://www.youtube.com/playlist?list=PLmE4bZU0qx-wAP02i0I7PJWvDWoCytEjD)
+- [Service Bus AMQP Protocol Guide - docs.microsoft.com](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-amqp-protocol-guide)
+- [.NET Service Bus Library - github.com](https://github.com/Azure/azure-service-bus-dotnet)
diff --git a/vendor/github.com/Azure/azure-service-bus-go/LICENSE b/vendor/github.com/Azure/azure-service-bus-go/LICENSE
new file mode 100644
index 00000000..21071075
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/LICENSE
@@ -0,0 +1,21 @@
+ MIT License
+
+ Copyright (c) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in all
+ copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE
diff --git a/vendor/github.com/Azure/azure-service-bus-go/Makefile b/vendor/github.com/Azure/azure-service-bus-go/Makefile
new file mode 100644
index 00000000..2b34965c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/Makefile
@@ -0,0 +1,97 @@
+PACKAGE = github.com/Azure/azure-service-bus-go
+DATE ?= $(shell date +%FT%T%z)
+VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \
+ cat $(CURDIR)/.version 2> /dev/null || echo v0)
+BIN = $(GOPATH)/bin
+BASE = $(CURDIR)
+PKGS = $(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -vE "^$(PACKAGE)/templates/"))
+TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS))
+GO_FILES = find . -iname '*.go' -type f
+
+GO = go
+GODOC = godoc
+GOFMT = gofmt
+GOCYCLO = gocyclo
+
+V = 0
+Q = $(if $(filter 1,$V),,@)
+M = $(shell printf "\033[34;1m▶\033[0m")
+TIMEOUT = 1100
+
+.PHONY: all
+all: fmt lint vet tidy ; $(info $(M) building library…) @ ## Build program
+ $Q cd $(BASE) && $(GO) build -tags release
+
+# Tools
+
+GOLINT = $(BIN)/golint
+$(BIN)/golint: ; $(info $(M) building golint…)
+ $Q go get github.com/golang/lint/golint
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-verbose test-race test-debug test-cover
+.PHONY: $(TEST_TARGETS) test-xml check test tests
+test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks
+test-verbose: ARGS=-v ## Run tests in verbose mode
+test-debug: ARGS=-v -debug ## Run tests in verbose mode with debug output
+test-race: ARGS=-race ## Run tests with race detector
+test-cover: ARGS=-cover -coverprofile=cover.out -v ## Run tests in verbose mode with coverage
+$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%)
+$(TEST_TARGETS): test
+check test tests: cyclo lint vet terraform.tfstate; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests
+ $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) $(TESTPKGS)
+
+.PHONY: vet
+vet: $(GOLINT) ; $(info $(M) running vet…) @ ## Run vet
+ $Q cd $(BASE) && $(GO) vet ./...
+
+.PHONY: tidy
+tidy: ; $(info $(M) running tidy…) @ ## Run tidy
+ $Q $(GO) mod tidy
+
+.PHONY: lint
+lint: $(GOLINT) ; $(info $(M) running golint…) @ ## Run golint
+ $Q cd $(BASE) && ret=0 && for pkg in $(PKGS); do \
+ test -z "$$($(GOLINT) $$pkg | tee /dev/stderr)" || ret=1 ; \
+ done ; exit $$ret
+
+.PHONY: fmt
+fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files
+ @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./...); do \
+ $(GOFMT) -l -w $$d/*.go || ret=$$? ; \
+ done ; exit $$ret
+
+.PHONY: cyclo
+cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files
+ $Q cd $(BASE) && $(GOCYCLO) -over 19 $$($(GO_FILES))
+
+terraform.tfstate: azuredeploy.tf $(wildcard terraform.tfvars) .terraform ; $(info $(M) running terraform...) @ ## Run terraform to provision infrastructure needed for testing
+ $Q TF_VAR_azure_client_secret="$${ARM_CLIENT_SECRET}" terraform apply -auto-approve
+ $Q terraform output -json | jq -r 'keys[] as $$k | "\($$k) = \(.[$$k].value)"' > .env
+
+.terraform:
+ $Q terraform init
+
+.Phony: destroy-sb
+destroy-sb: ; $(info $(M) running sb destroy...)
+ $(Q) terraform destroy -auto-approve
+
+# Dependency management
+go.sum: go.mod
+ $Q cd $(BASE) && $(GO) mod tidy
+
+# Misc
+
+.PHONY: clean
+clean: ; $(info $(M) cleaning…) @ ## Cleanup everything
+ @rm -rf test/tests.* test/coverage.*
+
+.PHONY: help
+help:
+ @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
+ awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: version
+version:
+ @echo $(VERSION)
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-service-bus-go/README.md b/vendor/github.com/Azure/azure-service-bus-go/README.md
new file mode 100644
index 00000000..b6424c2e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/README.md
@@ -0,0 +1,45 @@
+# Microsoft Azure Service Bus Client for Golang
+[](https://goreportcard.com/report/github.com/Azure/azure-service-bus-go)
+[](https://godoc.org/github.com/Azure/azure-service-bus-go)
+[](https://travis-ci.org/Azure/azure-service-bus-go)
+[](https://coveralls.io/github/Azure/azure-service-bus-go?branch=master)
+
+Microsoft Azure Service Bus is a reliable cloud messaging service (MaaS) which simplifies enterprise cloud messaging. It
+enables developers to build scalable cloud solutions and implement complex messaging workflows over an efficient binary
+protocol called AMQP.
+
+This library provides a simple interface for sending, receiving and managing Service Bus entities such as Queues, Topics
+and Subscriptions.
+
+For more information about Service Bus, check out the [Azure documentation](https://azure.microsoft.com/en-us/services/service-bus/).
+
+This library is a pure Golang implementation of Azure Service Bus over AMQP.
+
+## Preview of Service Bus for Golang
+This library is currently a preview. There may be breaking interface changes until it reaches semantic version `v1.0.0`.
+If you run into an issue, please don't hesitate to log a
+[new issue](https://github.com/Azure/azure-service-bus-go/issues/new) or open a pull request.
+
+## Install using Go modules
+
+``` bash
+go get -u github.com/Azure/azure-service-bus-go
+```
+
+If you need to install Go, follow [the official instructions](https://golang.org/dl/)
+
+### Examples
+
+Find up-to-date examples and documentation on [godoc.org](https://godoc.org/github.com/Azure/azure-service-bus-go#pkg-examples).
+
+### Have questions?
+
+The developers of this library are all active on the [Gopher Slack](https://gophers.slack.com), it is likely easiest to
+get our attention in the [Microsoft Channel](https://gophers.slack.com/messages/C6NH8V2E9). We'll also find your issue
+if you ask on [Stack Overflow](https://stackoverflow.com/questions/tagged/go+azure) with the tags `azure` and `go`.
+
+## Code of Conduct
+
+This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
+For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
+contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/azure-service-bus-go/action.go b/vendor/github.com/Azure/azure-service-bus-go/action.go
new file mode 100644
index 00000000..107ed4fd
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/action.go
@@ -0,0 +1,25 @@
+package servicebus
+
+type (
+ // SQLAction represents a SQL language-based action expression that is evaluated against a BrokeredMessage. A
+ // SQLAction supports a subset of the SQL-92 standard.
+ //
+ // With SQL filter conditions, you can define an action that can annotate the message by adding, removing, or
+ // replacing properties and their values. The action uses a SQL-like expression that loosely leans on the SQL
+ // UPDATE statement syntax. The action is performed on the message after it has been matched and before the message
+ // is selected into the subscription. The changes to the message properties are private to the message copied into
+ // the subscription.
+ //
+ // see: https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-messaging-sql-filter
+ SQLAction struct {
+ Expression string
+ }
+)
+
+// ToActionDescription will transform the SqlAction into a ActionDescription
+func (sf SQLAction) ToActionDescription() ActionDescription {
+ return ActionDescription{
+ Type: "SqlRuleAction",
+ SQLExpression: sf.Expression,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/atom/atom.go b/vendor/github.com/Azure/azure-service-bus-go/atom/atom.go
new file mode 100644
index 00000000..78e3c347
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/atom/atom.go
@@ -0,0 +1,53 @@
+package atom
+
+import (
+ "encoding/xml"
+
+ "github.com/Azure/go-autorest/autorest/date"
+)
+
+type (
+ // Feed is an Atom feed which contains entries
+ Feed struct {
+ XMLName xml.Name `xml:"feed"`
+ ID string `xml:"id"`
+ Title string `xml:"title"`
+ Updated *date.Time `xml:"updated,omitempty"`
+ Entries []Entry `xml:"entry"`
+ }
+
+ // Entry is the Atom wrapper for a management request
+ Entry struct {
+ XMLName xml.Name `xml:"entry"`
+ ID string `xml:"id,omitempty"`
+ Title string `xml:"title,omitempty"`
+ Published *date.Time `xml:"published,omitempty"`
+ Updated *date.Time `xml:"updated,omitempty"`
+ Author *Author `xml:"author,omitempty"`
+ Link *Link `xml:"link,omitempty"`
+ Content *Content `xml:"content"`
+ DataServiceSchema string `xml:"xmlns:d,attr,omitempty"`
+ DataServiceMetadataSchema string `xml:"xmlns:m,attr,omitempty"`
+ AtomSchema string `xml:"xmlns,attr"`
+ }
+
+ // Author is an Atom author used in an entry
+ Author struct {
+ XMLName xml.Name `xml:"author"`
+ Name *string `xml:"name,omitempty"`
+ }
+
+ // Link is an Atom link used in an entry
+ Link struct {
+ XMLName xml.Name `xml:"link"`
+ Rel string `xml:"rel,attr"`
+ HREF string `xml:"href,attr"`
+ }
+
+ // Content is a generic body for an Atom entry
+ Content struct {
+ XMLName xml.Name `xml:"content"`
+ Type string `xml:"type,attr"`
+ Body string `xml:",innerxml"`
+ }
+)
diff --git a/vendor/github.com/Azure/azure-service-bus-go/azuredeploy.tf b/vendor/github.com/Azure/azure-service-bus-go/azuredeploy.tf
new file mode 100644
index 00000000..c54cbac1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/azuredeploy.tf
@@ -0,0 +1,163 @@
+provider "azuread" {
+ version = "~> 0.4"
+}
+
+provider "azurerm" {
+ version = "~> 1.31"
+}
+
+provider "random" {
+ version = "~> 2.1"
+}
+
+variable "location" {
+ description = "Azure datacenter to deploy to."
+ default = "westus2"
+}
+
+variable "servicebus_name_prefix" {
+ description = "Input your unique Azure Service Bus Namespace name"
+ default = "azuresbtests"
+}
+
+variable "resource_group_name_prefix" {
+ description = "Resource group to provision test infrastructure in."
+ default = "servicebus-go-tests"
+}
+
+variable "azure_client_secret" {
+ description = "(Optional) piped in from env var so .env will be updated if there is an existing client secret"
+ default = "foo"
+}
+
+resource "random_string" "name" {
+ length = 8
+ upper = false
+ special = false
+ number = false
+}
+
+# Create resource group for all of the things
+resource "azurerm_resource_group" "test" {
+ name = "${var.resource_group_name_prefix}-${random_string.name.result}"
+ location = var.location
+}
+
+resource "azurerm_servicebus_namespace" "test" {
+ name = "${var.servicebus_name_prefix}-${random_string.name.result}"
+ location = azurerm_resource_group.test.location
+ resource_group_name = azurerm_resource_group.test.name
+ sku = "standard"
+}
+
+# Generate a random secret fo the service principal
+resource "random_string" "secret" {
+ count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0
+ length = 32
+ upper = true
+ special = true
+ number = true
+}
+
+// Application for AAD authentication
+resource "azuread_application" "test" {
+ count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0
+ name = "servicebustest"
+ homepage = "https://servicebustest"
+ identifier_uris = ["https://servicebustest"]
+ reply_urls = ["https://servicebustest"]
+ available_to_other_tenants = false
+ oauth2_allow_implicit_flow = true
+}
+
+# Create a service principal, which represents a linkage between the AAD application and the password
+resource "azuread_service_principal" "test" {
+ count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0
+ application_id = azuread_application.test[0].application_id
+}
+
+# Create a new service principal password which will be the AZURE_CLIENT_SECRET env var
+resource "azuread_service_principal_password" "test" {
+ count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0
+ service_principal_id = azuread_service_principal.test[0].id
+ value = random_string.secret[0].result
+ end_date = "2030-01-01T01:02:03Z"
+}
+
+# This provides the new AAD application the rights to managed, send and receive from the Event Hubs instance
+resource "azurerm_role_assignment" "service_principal_eh" {
+ count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0
+ scope = "subscriptions/${data.azurerm_client_config.current.subscription_id}/resourceGroups/${azurerm_resource_group.test.name}/providers/Microsoft.ServiceBus/namespaces/${azurerm_servicebus_namespace.test.name}"
+ role_definition_name = "Owner"
+ principal_id = azuread_service_principal.test[0].id
+}
+
+# This provides the new AAD application the rights to managed the resource group
+resource "azurerm_role_assignment" "service_principal_rg" {
+ count = data.azurerm_client_config.current.service_principal_application_id == "" ? 1 : 0
+ scope = "subscriptions/${data.azurerm_client_config.current.subscription_id}/resourceGroups/${azurerm_resource_group.test.name}"
+ role_definition_name = "Owner"
+ principal_id = azuread_service_principal.test[0].id
+}
+
+# Most tests should create and destroy their own Queues, Topics, and Subscriptions. However, to keep examples from being
+# bloated, the items below are created externally by Terraform.
+
+resource "azurerm_servicebus_queue" "scheduledMessages" {
+ name = "scheduledmessages"
+ resource_group_name = azurerm_resource_group.test.name
+ namespace_name = azurerm_servicebus_namespace.test.name
+}
+
+resource "azurerm_servicebus_queue" "queueSchedule" {
+ name = "schedulewithqueue"
+ resource_group_name = azurerm_resource_group.test.name
+ namespace_name = azurerm_servicebus_namespace.test.name
+}
+
+resource "azurerm_servicebus_queue" "helloworld" {
+ name = "helloworld"
+ resource_group_name = azurerm_resource_group.test.name
+ namespace_name = azurerm_servicebus_namespace.test.name
+}
+
+resource "azurerm_servicebus_queue" "receiveSession" {
+ name = "receivesession"
+ resource_group_name = azurerm_resource_group.test.name
+ namespace_name = azurerm_servicebus_namespace.test.name
+ default_message_ttl = "PT300S"
+ requires_session = true
+}
+
+# Data resources used to get SubID and Tennant Info
+data "azurerm_client_config" "current" {}
+
+output "TEST_SERVICEBUS_RESOURCE_GROUP" {
+ value = azurerm_resource_group.test.name
+}
+
+output "SERVICEBUS_CONNECTION_STRING" {
+ value = "Endpoint=sb://${azurerm_servicebus_namespace.test.name}.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=${azurerm_servicebus_namespace.test.default_primary_key}"
+ sensitive = true
+}
+
+output "AZURE_SUBSCRIPTION_ID" {
+ value = data.azurerm_client_config.current.subscription_id
+}
+
+output "TEST_SERVICEBUS_LOCATION" {
+ value = azurerm_servicebus_namespace.test.location
+}
+
+output "AZURE_TENANT_ID" {
+ value = data.azurerm_client_config.current.tenant_id
+}
+
+output "AZURE_CLIENT_ID" {
+ value = compact(concat(azuread_application.test.*.application_id, list(data.azurerm_client_config.current.client_id)))[0]
+}
+
+output "AZURE_CLIENT_SECRET" {
+ value = compact(concat(azuread_service_principal_password.test.*.value, list(var.azure_client_secret)))[0]
+ sensitive = true
+}
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-service-bus-go/batch.go b/vendor/github.com/Azure/azure-service-bus-go/batch.go
new file mode 100644
index 00000000..ec1983c1
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/batch.go
@@ -0,0 +1,168 @@
+package servicebus
+
+import (
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "pack.ag/amqp"
+)
+
+type (
+ // BatchOptions are optional information to add to a batch of messages
+ BatchOptions struct {
+ SessionID *string
+ }
+
+ // BatchIterator offers a simple mechanism for batching a list of messages
+ BatchIterator interface {
+ Done() bool
+ Next(messageID string, opts *BatchOptions) (*MessageBatch, error)
+ }
+
+ // MessageBatchIterator provides an easy way to iterate over a slice of messages to reliably create batches
+ MessageBatchIterator struct {
+ Messages []*Message
+ Cursor int
+ MaxSize MaxMessageSizeInBytes
+ }
+
+ // MessageBatch represents a batch of messages to send to Service Bus in a single message
+ MessageBatch struct {
+ *Message
+ marshaledMessages [][]byte
+ MaxSize MaxMessageSizeInBytes
+ size int
+ }
+
+ // MaxMessageSizeInBytes is the max number of bytes allowed by Azure Service Bus
+ MaxMessageSizeInBytes int
+)
+
+const (
+ // StandardMaxMessageSizeInBytes is the maximum number of bytes in a message for the Standard tier
+ StandardMaxMessageSizeInBytes MaxMessageSizeInBytes = 256000
+ // PremiumMaxMessageSizeInBytes is the maximum number of bytes in a message for the Premium tier
+ PremiumMaxMessageSizeInBytes MaxMessageSizeInBytes = 1000000
+
+ batchMessageFormat uint32 = 0x80013700
+
+ batchMessageWrapperSize = 100
+)
+
+// NewMessageBatchIterator wraps a slice of Message pointers to allow it to be made into a MessageIterator.
+func NewMessageBatchIterator(maxBatchSize MaxMessageSizeInBytes, msgs ...*Message) *MessageBatchIterator {
+ return &MessageBatchIterator{
+ Messages: msgs,
+ MaxSize: maxBatchSize,
+ }
+}
+
+// Done communicates whether there are more messages remaining to be iterated over.
+func (mbi *MessageBatchIterator) Done() bool {
+ return len(mbi.Messages) == mbi.Cursor
+}
+
+// Next fetches the batch of messages in the message slice at a position one larger than the last one accessed.
+func (mbi *MessageBatchIterator) Next(messageID string, opts *BatchOptions) (*MessageBatch, error) {
+ if mbi.Done() {
+ return nil, ErrNoMessages{}
+ }
+
+ if opts == nil {
+ opts = &BatchOptions{}
+ }
+
+ mb := NewMessageBatch(mbi.MaxSize, messageID, opts)
+ for mbi.Cursor < len(mbi.Messages) {
+ ok, err := mb.Add(mbi.Messages[mbi.Cursor])
+ if err != nil {
+ return nil, err
+ }
+
+ if !ok {
+ return mb, nil
+ }
+ mbi.Cursor++
+ }
+ return mb, nil
+}
+
+// NewMessageBatch builds a new message batch with a default standard max message size
+func NewMessageBatch(maxSize MaxMessageSizeInBytes, messageID string, opts *BatchOptions) *MessageBatch {
+ if opts == nil {
+ opts = &BatchOptions{}
+ }
+
+ mb := &MessageBatch{
+ MaxSize: maxSize,
+ Message: &Message{
+ ID: messageID,
+ SessionID: opts.SessionID,
+ },
+ }
+
+ return mb
+}
+
+// Add adds a message to the batch if the message will not exceed the max size of the batch
+func (mb *MessageBatch) Add(m *Message) (bool, error) {
+ msg, err := m.toMsg()
+ if err != nil {
+ return false, err
+ }
+
+ if msg.Properties.MessageID == nil || msg.Properties.MessageID == "" {
+ uid, err := uuid.NewV4()
+ if err != nil {
+ return false, err
+ }
+ msg.Properties.MessageID = uid.String()
+ }
+
+ if mb.SessionID != nil {
+ msg.Properties.GroupID = *mb.SessionID
+ }
+
+ bin, err := msg.MarshalBinary()
+ if err != nil {
+ return false, err
+ }
+
+ if mb.Size()+len(bin) > int(mb.MaxSize) {
+ return false, nil
+ }
+
+ mb.size += len(bin)
+ mb.marshaledMessages = append(mb.marshaledMessages, bin)
+ return true, nil
+}
+
+// Clear will zero out the batch size and clear the buffered messages
+func (mb *MessageBatch) Clear() {
+ mb.marshaledMessages = [][]byte{}
+ mb.size = 0
+}
+
+// Size is the number of bytes in the message batch
+func (mb *MessageBatch) Size() int {
+ // calculated data size + batch message wrapper + data wrapper portions of the message
+ return mb.size + batchMessageWrapperSize + (len(mb.marshaledMessages) * 5)
+}
+
+func (mb *MessageBatch) toMsg() (*amqp.Message, error) {
+ batchMessage := mb.amqpBatchMessage()
+
+ batchMessage.Data = make([][]byte, len(mb.marshaledMessages))
+ for idx, bytes := range mb.marshaledMessages {
+ batchMessage.Data[idx] = bytes
+ }
+ return batchMessage, nil
+}
+
+func (mb *MessageBatch) amqpBatchMessage() *amqp.Message {
+ return &amqp.Message{
+ Data: make([][]byte, len(mb.marshaledMessages)),
+ Format: batchMessageFormat,
+ Properties: &amqp.MessageProperties{
+ MessageID: mb.ID,
+ },
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/batch_disposition.go b/vendor/github.com/Azure/azure-service-bus-go/batch_disposition.go
new file mode 100644
index 00000000..3a05b01d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/batch_disposition.go
@@ -0,0 +1,98 @@
+package servicebus
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+)
+
+type (
+ // MessageStatus defines an acceptable Message disposition status.
+ MessageStatus dispositionStatus
+ // BatchDispositionIterator provides an iterator over LockTokenIDs
+ BatchDispositionIterator struct {
+ LockTokenIDs []*uuid.UUID
+ Status MessageStatus
+ cursor int
+ }
+ // BatchDispositionError is an error which returns a collection of DispositionError.
+ BatchDispositionError struct {
+ Errors []DispositionError
+ }
+ // DispositionError is an error associated with a LockTokenID.
+ DispositionError struct {
+ LockTokenID *uuid.UUID
+ err error
+ }
+)
+
+const (
+ // Complete exposes completedDisposition
+ Complete MessageStatus = MessageStatus(completedDisposition)
+ // Abort exposes abandonedDisposition
+ Abort MessageStatus = MessageStatus(abandonedDisposition)
+)
+
+func (bde BatchDispositionError) Error() string {
+ msg := ""
+ if len(bde.Errors) != 0 {
+ msg = fmt.Sprintf("Operation failed, %d error(s) reported.", len(bde.Errors))
+ }
+ return msg
+}
+
+func (de DispositionError) Error() string {
+ return de.err.Error()
+}
+
+// UnWrap will return the private error.
+func (de DispositionError) UnWrap() error {
+ return de.err
+}
+
+// Done communicates whether there are more messages remaining to be iterated over.
+func (bdi *BatchDispositionIterator) Done() bool {
+ return len(bdi.LockTokenIDs) == bdi.cursor
+}
+
+// Next iterates to the next LockToken
+func (bdi *BatchDispositionIterator) Next() (uuid *uuid.UUID) {
+ if done := bdi.Done(); done == false {
+ uuid = bdi.LockTokenIDs[bdi.cursor]
+ bdi.cursor++
+ }
+ return uuid
+}
+
+func (bdi *BatchDispositionIterator) doUpdate(ctx context.Context, ec entityConnector) BatchDispositionError {
+ batchError := BatchDispositionError{}
+ for !bdi.Done() {
+ if id := bdi.Next(); id != nil {
+ m := &Message{
+ LockToken: id,
+ }
+ m.ec = ec
+ err := m.sendDisposition(ctx, bdi.Status)
+ if err != nil {
+ batchError.Errors = append(batchError.Errors, DispositionError{
+ LockTokenID: id,
+ err: err,
+ })
+ }
+ }
+ }
+ return batchError
+}
+
+func (m *Message) sendDisposition(ctx context.Context, dispositionStatus MessageStatus) (err error) {
+ switch dispositionStatus {
+ case Complete:
+ err = m.Complete(ctx)
+ case Abort:
+ err = m.Abandon(ctx)
+ default:
+ err = fmt.Errorf("unsupported bulk disposition status %q", dispositionStatus)
+ }
+ return err
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/changelog.md b/vendor/github.com/Azure/azure-service-bus-go/changelog.md
new file mode 100644
index 00000000..421cd61c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/changelog.md
@@ -0,0 +1,53 @@
+# Change Log
+
+## `head`
+
+## `v0.9.1`
+- bump common version to v2.1.0
+
+## `v0.9.0`
+- periodically refresh claims based auth for connections to resolve [issue #116](https://github.com/Azure/azure-service-bus-go/issues/116)
+- refactor management functionality for entities into composition structs
+- fix session deferral for queues and subscriptions
+- add topic scheduled messages
+
+## `v0.8.0`
+- tab for tracing and logging which supports both opencensus and opentracing. To use opencensus, just add a
+ `_ "github.com/devigned/tab/opencensus"`. To use opentracing, just add a `_ "github.com/devigned/tab/opentracing"`
+- target azure-amqp-common-go/v2
+
+## `v0.7.0`
+- [add batch disposition errors](https://github.com/Azure/azure-service-bus-go/pull/129)
+
+## `v0.6.0`
+- add namespace TLS configuration option
+- update to Azure SDK v28 and AutoRest 12
+
+## `v0.5.1`
+- update the Azure Resource Manager dependency to the latest to help protect people not using a dependency
+ management tool such as `dep` or `vgo`.
+
+## `v0.5.0`
+- add support for websockets
+
+## `v0.4.1`
+- fix issue with sender when SB returns a different receiver disposition [#119](https://github.com/Azure/azure-service-bus-go/issues/119)
+
+## `v0.4.0`
+- Update to AMQP 0.11.0 which introduces strict settlement mode
+ ([#111](https://github.com/Azure/azure-service-bus-go/issues/111))
+
+## `v0.3.0`
+- Add disposition batching
+- Add NotFound errors for mgmt API
+- Fix go routine leak when listening for messages upon context close
+- Add batch sends for Topics
+
+## `v0.2.0`
+- Refactor disposition handler so that errors can be handled in handlers
+- Add dead letter queues for entities
+- Fix connection leaks when using multiple calls to Receive
+- Ensure senders wait for message disposition before returning
+
+## `v0.1.0`
+- initial tag for Service Bus which includes Queues, Topics and Subscriptions using AMQP
\ No newline at end of file
diff --git a/vendor/github.com/Azure/azure-service-bus-go/deadletter.go b/vendor/github.com/Azure/azure-service-bus-go/deadletter.go
new file mode 100644
index 00000000..29d6d296
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/deadletter.go
@@ -0,0 +1,161 @@
+package servicebus
+
+import (
+ "context"
+ "sync"
+
+ "github.com/devigned/tab"
+)
+
+type (
+ // Closer provides the ability to close an entity
+ Closer interface {
+ Close(ctx context.Context) error
+ }
+
+ // ReceiveOner provides the ability to receive and handle events
+ ReceiveOner interface {
+ Closer
+ ReceiveOne(ctx context.Context, handler Handler) error
+ }
+
+ // DeadLetterBuilder provides the ability to create a new receiver addressed to a given entity's dead letter queue.
+ DeadLetterBuilder interface {
+ NewDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error)
+ }
+
+ // TransferDeadLetterBuilder provides the ability to create a new receiver addressed to a given entity's transfer
+ // dead letter queue.
+ TransferDeadLetterBuilder interface {
+ NewTransferDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error)
+ }
+
+ // DeadLetter represents a dead letter queue in Azure Service Bus.
+ //
+ // Azure Service Bus queues, topics and subscriptions provide a secondary sub-queue, called a dead-letter queue
+ // (DLQ). The dead-letter queue does not need to be explicitly created and cannot be deleted or otherwise managed
+ // independent of the main entity.
+ //
+ // The purpose of the dead-letter queue is to hold messages that cannot be delivered to any receiver, or messages
+ // that could not be processed. Messages can then be removed from the DLQ and inspected. An application might, with
+ // help of an operator, correct issues and resubmit the message, log the fact that there was an error, and take
+ // corrective action.
+ //
+ // From an API and protocol perspective, the DLQ is mostly similar to any other queue, except that messages can only
+ // be submitted via the dead-letter operation of the parent entity. In addition, time-to-live is not observed, and
+ // you can't dead-letter a message from a DLQ. The dead-letter queue fully supports peek-lock delivery and
+ // transactional operations.
+ //
+ // Note that there is no automatic cleanup of the DLQ. Messages remain in the DLQ until you explicitly retrieve
+ // them from the DLQ and call Complete() on the dead-letter message.
+ DeadLetter struct {
+ builder DeadLetterBuilder
+ rMu sync.Mutex
+ receiver ReceiveOner
+ }
+
+ // TransferDeadLetter represents a transfer dead letter queue in Azure Service Bus.
+ //
+ // Messages will be sent to the transfer dead-letter queue under the following conditions:
+ // - A message passes through more than 3 queues or topics that are chained together.
+ // - The destination queue or topic is disabled or deleted.
+ // - The destination queue or topic exceeds the maximum entity size.
+ TransferDeadLetter struct {
+ builder TransferDeadLetterBuilder
+ rMu sync.Mutex
+ receiver ReceiveOner
+ }
+)
+
+// NewDeadLetter constructs an instance of DeadLetter which represents a dead letter queue in Azure Service Bus
+func NewDeadLetter(builder DeadLetterBuilder) *DeadLetter {
+ return &DeadLetter{
+ builder: builder,
+ }
+}
+
+// ReceiveOne will receive one message from the dead letter queue
+func (dl *DeadLetter) ReceiveOne(ctx context.Context, handler Handler) error {
+ if err := dl.ensureReceiver(ctx); err != nil {
+ return err
+ }
+
+ return dl.receiver.ReceiveOne(ctx, handler)
+}
+
+// Close the underlying connection to Service Bus
+func (dl *DeadLetter) Close(ctx context.Context) error {
+ dl.rMu.Lock()
+ defer dl.rMu.Unlock()
+
+ if dl.receiver == nil {
+ return nil
+ }
+
+ if err := dl.receiver.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ return nil
+}
+
+func (dl *DeadLetter) ensureReceiver(ctx context.Context) error {
+ dl.rMu.Lock()
+ defer dl.rMu.Unlock()
+
+ r, err := dl.builder.NewDeadLetterReceiver(ctx)
+ if err != nil {
+ return err
+ }
+
+ dl.receiver = r
+ return nil
+}
+
+// NewTransferDeadLetter constructs an instance of DeadLetter which represents a transfer dead letter queue in
+// Azure Service Bus
+func NewTransferDeadLetter(builder TransferDeadLetterBuilder) *TransferDeadLetter {
+ return &TransferDeadLetter{
+ builder: builder,
+ }
+}
+
+// ReceiveOne will receive one message from the dead letter queue
+func (dl *TransferDeadLetter) ReceiveOne(ctx context.Context, handler Handler) error {
+ if err := dl.ensureReceiver(ctx); err != nil {
+ return err
+ }
+
+ return dl.receiver.ReceiveOne(ctx, handler)
+}
+
+// Close the underlying connection to Service Bus
+func (dl *TransferDeadLetter) Close(ctx context.Context) error {
+ dl.rMu.Lock()
+ defer dl.rMu.Unlock()
+
+ if dl.receiver == nil {
+ return nil
+ }
+
+ if err := dl.receiver.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ return nil
+}
+
+func (dl *TransferDeadLetter) ensureReceiver(ctx context.Context) error {
+ dl.rMu.Lock()
+ defer dl.rMu.Unlock()
+
+ r, err := dl.builder.NewTransferDeadLetterReceiver(ctx)
+ if err != nil {
+ return err
+ }
+
+ dl.receiver = r
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/entity.go b/vendor/github.com/Azure/azure-service-bus-go/entity.go
new file mode 100644
index 00000000..2a254bd8
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/entity.go
@@ -0,0 +1,262 @@
+package servicebus
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/devigned/tab"
+)
+
+type (
+ entity struct {
+ Name string
+ managementPath string
+ namespace *Namespace
+ rpcClient *rpcClient
+ rpcClientMu sync.RWMutex
+ }
+
+ sendingEntity struct {
+ *entity
+ }
+
+ receivingEntity struct {
+ renewMessageLockMutex sync.Mutex
+ *entity
+ }
+
+ sendAndReceiveEntity struct {
+ *entity
+ *sendingEntity
+ *receivingEntity
+ }
+)
+
+func newEntity(name string, managementPath string, ns *Namespace) *entity {
+ return &entity{
+ Name: name,
+ managementPath: managementPath,
+ namespace: ns,
+ }
+}
+
+func newReceivingEntity(e *entity) *receivingEntity {
+ return &receivingEntity{
+ entity: e,
+ }
+}
+
+func newSendingEntity(e *entity) *sendingEntity {
+ return &sendingEntity{
+ entity: e,
+ }
+}
+
+func newSendAndReceiveEntity(entity *entity) *sendAndReceiveEntity {
+ return &sendAndReceiveEntity{
+ entity: entity,
+ receivingEntity: newReceivingEntity(entity),
+ sendingEntity: newSendingEntity(entity),
+ }
+}
+
+func (e *entity) GetRPCClient(ctx context.Context) (*rpcClient, error) {
+ if err := e.ensureRPCClient(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ return e.rpcClient, nil
+}
+
+// ManagementPath is the relative uri to address the entity's management functionality
+func (e *entity) ManagementPath() string {
+ return e.managementPath
+}
+
+func (e *entity) Namespace() *Namespace {
+ return e.namespace
+}
+
+func (e *entity) getEntity() *entity {
+ return e
+}
+
+// Peek fetches a list of Messages from the Service Bus broker without acquiring a lock or committing to a disposition.
+// The messages are delivered as close to sequence order as possible.
+//
+// The MessageIterator that is returned has the following properties:
+// - Messages are fetches from the server in pages. Page size is configurable with PeekOptions.
+// - The MessageIterator will always return "false" for Done().
+// - When Next() is called, it will return either: a slice of messages and no error, nil with an error related to being
+// unable to complete the operation, or an empty slice of messages and an instance of "ErrNoMessages" signifying that
+// there are currently no messages in the queue with a sequence ID larger than previously viewed ones.
+func (re *receivingEntity) Peek(ctx context.Context, options ...PeekOption) (MessageIterator, error) {
+ ctx, span := re.entity.startSpanFromContext(ctx, "sb.entity.Peek")
+ defer span.End()
+
+ return newPeekIterator(re.entity, options...)
+}
+
+// PeekOne fetches a single Message from the Service Bus broker without acquiring a lock or committing to a disposition.
+func (re *receivingEntity) PeekOne(ctx context.Context, options ...PeekOption) (*Message, error) {
+ ctx, span := re.entity.startSpanFromContext(ctx, "sb.receivingEntity.PeekOne")
+ defer span.End()
+
+ // Adding PeekWithPageSize(1) as the last option assures that either:
+ // - creating the iterator will fail because two of the same option will be applied.
+ // - PeekWithPageSize(1) will be applied after all others, so we will not wastefully pull down messages destined to
+ // be unread.
+ options = append(options, PeekWithPageSize(1))
+
+ it, err := newPeekIterator(re.entity, options...)
+ if err != nil {
+ return nil, err
+ }
+ return it.Next(ctx)
+}
+
+// ReceiveDeferred will receive and handle a set of deferred messages
+//
+// When a queue or subscription client receives a message that it is willing to process, but for which processing is
+// not currently possible due to special circumstances inside of the application, it has the option of "deferring"
+// retrieval of the message to a later point. The message remains in the queue or subscription, but it is set aside.
+//
+// Deferral is a feature specifically created for workflow processing scenarios. Workflow frameworks may require certain
+// operations to be processed in a particular order, and may have to postpone processing of some received messages
+// until prescribed prior work that is informed by other messages has been completed.
+//
+// A simple illustrative example is an order processing sequence in which a payment notification from an external
+// payment provider appears in a system before the matching purchase order has been propagated from the store front
+// to the fulfillment system. In that case, the fulfillment system might defer processing the payment notification
+// until there is an order with which to associate it. In rendezvous scenarios, where messages from different sources
+// drive a workflow forward, the real-time execution order may indeed be correct, but the messages reflecting the
+// outcomes may arrive out of order.
+//
+// Ultimately, deferral aids in reordering messages from the arrival order into an order in which they can be
+// processed, while leaving those messages safely in the message store for which processing needs to be postponed.
+func (re *receivingEntity) ReceiveDeferred(ctx context.Context, handler Handler, sequenceNumbers ...int64) error {
+ ctx, span := re.startSpanFromContext(ctx, "sb.receivingEntity.ReceiveDeferred")
+ defer span.End()
+
+ return re.ReceiveDeferredWithMode(ctx, handler, PeekLockMode, sequenceNumbers...)
+}
+
+// ReceiveDeferredWithMode will receive and handle a set of deferred messages
+//
+// When a queue or subscription client receives a message that it is willing to process, but for which processing is
+// not currently possible due to special circumstances inside of the application, it has the option of "deferring"
+// retrieval of the message to a later point. The message remains in the queue or subscription, but it is set aside.
+//
+// Deferral is a feature specifically created for workflow processing scenarios. Workflow frameworks may require certain
+// operations to be processed in a particular order, and may have to postpone processing of some received messages
+// until prescribed prior work that is informed by other messages has been completed.
+//
+// A simple illustrative example is an order processing sequence in which a payment notification from an external
+// payment provider appears in a system before the matching purchase order has been propagated from the store front
+// to the fulfillment system. In that case, the fulfillment system might defer processing the payment notification
+// until there is an order with which to associate it. In rendezvous scenarios, where messages from different sources
+// drive a workflow forward, the real-time execution order may indeed be correct, but the messages reflecting the
+// outcomes may arrive out of order.
+//
+// Ultimately, deferral aids in reordering messages from the arrival order into an order in which they can be
+// processed, while leaving those messages safely in the message store for which processing needs to be postponed.
+func (re *receivingEntity) ReceiveDeferredWithMode(ctx context.Context, handler Handler, mode ReceiveMode, sequenceNumbers ...int64) error {
+ ctx, span := re.startSpanFromContext(ctx, "sb.receivingEntity.ReceiveDeferred")
+ defer span.End()
+
+ rpcClient, err := re.entity.GetRPCClient(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ messages, err := rpcClient.ReceiveDeferred(ctx, mode, sequenceNumbers...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ for _, msg := range messages {
+ if err := handler.Handle(ctx, msg); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+ return nil
+}
+
+// RenewLocks renews the locks on messages provided
+func (re *receivingEntity) RenewLocks(ctx context.Context, messages ...*Message) error {
+ ctx, span := re.startSpanFromContext(ctx, "sb.receivingEntity.RenewLocks")
+ defer span.End()
+
+ re.renewMessageLockMutex.Lock()
+ defer re.renewMessageLockMutex.Unlock()
+
+ client, err := re.entity.GetRPCClient(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ return client.RenewLocks(ctx, messages...)
+}
+
+// SendBatchDisposition updates the LockTokenIDs to the disposition status.
+func (re *receivingEntity) SendBatchDisposition(ctx context.Context, iterator BatchDispositionIterator) error {
+ ctx, span := re.startSpanFromContext(ctx, "sb.receivingEntity.SendBatchDisposition")
+ defer span.End()
+ return iterator.doUpdate(ctx, re)
+}
+
+// ScheduleAt will send a batch of messages to a Queue, schedule them to be enqueued, and return the sequence numbers
+// that can be used to cancel each message.
+func (se *sendingEntity) ScheduleAt(ctx context.Context, enqueueTime time.Time, messages ...*Message) ([]int64, error) {
+ ctx, span := se.startSpanFromContext(ctx, "sb.sendingEntity.ScheduleAt")
+ defer span.End()
+
+ client, err := se.entity.GetRPCClient(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ return client.ScheduleAt(ctx, enqueueTime, messages...)
+}
+
+// CancelScheduled allows for removal of messages that have been handed to the Service Bus broker for later delivery,
+// but have not yet ben enqueued.
+func (se *sendingEntity) CancelScheduled(ctx context.Context, seq ...int64) error {
+ ctx, span := se.startSpanFromContext(ctx, "sb.sendingEntity.CancelScheduled")
+ defer span.End()
+
+ client, err := se.entity.GetRPCClient(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ return client.CancelScheduled(ctx, seq...)
+}
+
+func (e *entity) ensureRPCClient(ctx context.Context) error {
+ ctx, span := e.startSpanFromContext(ctx, "sb.entity.ensureRPCClient")
+ defer span.End()
+
+ e.rpcClientMu.Lock()
+ defer e.rpcClientMu.Unlock()
+
+ if e.rpcClient != nil {
+ return nil
+ }
+
+ client, err := newRPCClient(ctx, e)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ e.rpcClient = client
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/errors.go b/vendor/github.com/Azure/azure-service-bus-go/errors.go
new file mode 100644
index 00000000..135761eb
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/errors.go
@@ -0,0 +1,82 @@
+package servicebus
+
+import (
+ "fmt"
+ "reflect"
+
+ "github.com/Azure/azure-amqp-common-go/v2/rpc"
+)
+
+type (
+ // ErrMissingField indicates that an expected property was missing from an AMQP message. This should only be
+ // encountered when there is an error with this library, or the server has altered its behavior unexpectedly.
+ ErrMissingField string
+
+ // ErrMalformedMessage indicates that a message was expected in the form of []byte was not a []byte. This is likely
+ // a bug and should be reported.
+ ErrMalformedMessage string
+
+ // ErrIncorrectType indicates that type assertion failed. This should only be encountered when there is an error
+ // with this library, or the server has altered its behavior unexpectedly.
+ ErrIncorrectType struct {
+ Key string
+ ExpectedType reflect.Type
+ ActualValue interface{}
+ }
+
+ // ErrAMQP indicates that the server communicated an AMQP error with a particular
+ ErrAMQP rpc.Response
+
+ // ErrNoMessages is returned when an operation returned no messages. It is not indicative that there will not be
+ // more messages in the future.
+ ErrNoMessages struct{}
+
+ // ErrNotFound is returned when an entity is not found (404)
+ ErrNotFound struct {
+ EntityPath string
+ }
+)
+
+func (e ErrMissingField) Error() string {
+ return fmt.Sprintf("missing value %q", string(e))
+}
+
+func (e ErrMalformedMessage) Error() string {
+ return fmt.Sprintf("message was expected in the form of []byte was not a []byte")
+}
+
+// NewErrIncorrectType lets you skip using the `reflect` package. Just provide a variable of the desired type as
+// 'expected'.
+func newErrIncorrectType(key string, expected, actual interface{}) ErrIncorrectType {
+ return ErrIncorrectType{
+ Key: key,
+ ExpectedType: reflect.TypeOf(expected),
+ ActualValue: actual,
+ }
+}
+
+func (e ErrIncorrectType) Error() string {
+ return fmt.Sprintf(
+ "value at %q was expected to be of type %q but was actually of type %q",
+ e.Key,
+ e.ExpectedType,
+ reflect.TypeOf(e.ActualValue))
+}
+
+func (e ErrAMQP) Error() string {
+ return fmt.Sprintf("server says (%d) %s", e.Code, e.Description)
+}
+
+func (e ErrNoMessages) Error() string {
+ return "no messages available"
+}
+
+func (e ErrNotFound) Error() string {
+ return fmt.Sprintf("entity at %s not found", e.EntityPath)
+}
+
+// IsErrNotFound returns true if the error argument is an ErrNotFound type
+func IsErrNotFound(err error) bool {
+ _, ok := err.(ErrNotFound)
+ return ok
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/filter.go b/vendor/github.com/Azure/azure-service-bus-go/filter.go
new file mode 100644
index 00000000..20abb1f9
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/filter.go
@@ -0,0 +1,68 @@
+package servicebus
+
+type (
+ // TrueFilter represents a always true sql expression which will accept all messages
+ TrueFilter struct{}
+
+ // FalseFilter represents a always false sql expression which will deny all messages
+ FalseFilter struct{}
+
+ // SQLFilter represents a SQL language-based filter expression that is evaluated against a BrokeredMessage. A
+ // SQLFilter supports a subset of the SQL-92 standard.
+ //
+ // see: https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-messaging-sql-filter
+ SQLFilter struct {
+ Expression string
+ }
+
+ // CorrelationFilter holds a set of conditions that are matched against one or more of an arriving message's user
+ // and system properties. A common use is to match against the CorrelationId property, but the application can also
+ // choose to match against ContentType, Label, MessageId, ReplyTo, ReplyToSessionId, SessionId, To, and any
+ // user-defined properties. A match exists when an arriving message's value for a property is equal to the value
+ // specified in the correlation filter. For string expressions, the comparison is case-sensitive. When specifying
+ // multiple match properties, the filter combines them as a logical AND condition, meaning for the filter to match,
+ // all conditions must match.
+ CorrelationFilter struct {
+ CorrelationID *string `xml:"CorrelationId,omitempty"`
+ MessageID *string `xml:"MessageId,omitempty"`
+ To *string `xml:"To,omitempty"`
+ ReplyTo *string `xml:"ReplyTo,omitempty"`
+ Label *string `xml:"Label,omitempty"`
+ SessionID *string `xml:"SessionId,omitempty"`
+ ReplyToSessionID *string `xml:"ReplyToSessionId,omitempty"`
+ ContentType *string `xml:"ContentType,omitempty"`
+ Properties map[string]interface{} `xml:"Properties,omitempty"`
+ }
+)
+
+// ToFilterDescription will transform the TrueFilter into a FilterDescription
+func (tf TrueFilter) ToFilterDescription() FilterDescription {
+ return FilterDescription{
+ Type: "TrueFilter",
+ SQLExpression: ptrString("1=1"),
+ }
+}
+
+// ToFilterDescription will transform the FalseFilter into a FilterDescription
+func (ff FalseFilter) ToFilterDescription() FilterDescription {
+ return FilterDescription{
+ Type: "FalseFilter",
+ SQLExpression: ptrString("1!=1"),
+ }
+}
+
+// ToFilterDescription will transform the SqlFilter into a FilterDescription
+func (sf SQLFilter) ToFilterDescription() FilterDescription {
+ return FilterDescription{
+ Type: "SqlFilter",
+ SQLExpression: &sf.Expression,
+ }
+}
+
+// ToFilterDescription will transform the CorrelationFilter into a FilterDescription
+func (cf CorrelationFilter) ToFilterDescription() FilterDescription {
+ return FilterDescription{
+ Type: "CorrelationFilter",
+ CorrelationFilter: cf,
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/go.mod b/vendor/github.com/Azure/azure-service-bus-go/go.mod
new file mode 100644
index 00000000..02283d4a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/go.mod
@@ -0,0 +1,24 @@
+module github.com/Azure/azure-service-bus-go
+
+go 1.12
+
+require (
+ github.com/Azure/azure-amqp-common-go/v2 v2.1.0
+ github.com/Azure/azure-sdk-for-go v30.1.0+incompatible
+ github.com/Azure/go-autorest v12.0.0+incompatible
+ github.com/devigned/tab v0.1.1
+ github.com/fortytw2/leaktest v1.3.0 // indirect
+ github.com/grpc-ecosystem/grpc-gateway v1.9.2 // indirect
+ github.com/joho/godotenv v1.3.0
+ github.com/mitchellh/mapstructure v1.1.2
+ github.com/pkg/errors v0.8.1 // indirect
+ github.com/stretchr/objx v0.2.0 // indirect
+ github.com/stretchr/testify v1.3.0
+ go.opencensus.io v0.22.0 // indirect
+ golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b
+ golang.org/x/sys v0.0.0-20190620070143-6f217b454f45 // indirect
+ google.golang.org/api v0.6.0 // indirect
+ google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 // indirect
+ google.golang.org/grpc v1.21.1 // indirect
+ pack.ag/amqp v0.11.2
+)
diff --git a/vendor/github.com/Azure/azure-service-bus-go/go.sum b/vendor/github.com/Azure/azure-service-bus-go/go.sum
new file mode 100644
index 00000000..64bba5fb
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/go.sum
@@ -0,0 +1,154 @@
+cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
+cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ=
+contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0=
+github.com/Azure/azure-amqp-common-go/v2 v2.1.0 h1:+QbFgmWCnPzdaRMfsI0Yb6GrRdBj5jVL8N3EXuEUcBQ=
+github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
+github.com/Azure/azure-sdk-for-go v29.0.0+incompatible h1:CYPU39ULbGjQBo3gXIqiWouK0C4F+Pt2Zx5CqGvqknE=
+github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/azure-sdk-for-go v30.1.0+incompatible h1:HyYPft8wXpxMd0kfLtXo6etWcO+XuPbLkcgx9g2cqxU=
+github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
+github.com/Azure/go-autorest v12.0.0+incompatible h1:N+VqClcomLGD/sHb3smbSYYtNMgKpVV3Cd5r5i8z6bQ=
+github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
+github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
+github.com/census-instrumentation/opencensus-proto v0.2.0 h1:LzQXZOgg4CQfE6bFvXGM30YZL1WW/M337pXml+GrcZ4=
+github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/devigned/tab v0.1.1 h1:3mD6Kb1mUOYeLpJvTVSDwSg5ZsfSxfvxGRTxRsJsITA=
+github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
+github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
+github.com/fortytw2/leaktest v1.2.0 h1:cj6GCiwJDH7l3tMHLjZDo0QqPtrXJiWSI9JgpeQKw+Q=
+github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
+github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
+github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
+github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
+github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
+github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
+github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
+github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
+github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
+github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
+github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
+github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/grpc-ecosystem/grpc-gateway v1.9.2 h1:S+ef0492XaIknb8LMjcwgW2i3cNTzDYMmDrOThOJNWc=
+github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
+github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo=
+github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU=
+github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
+github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc=
+github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
+github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
+github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
+github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
+github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
+github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48=
+github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
+github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
+github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg=
+go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
+go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4=
+go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
+golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
+golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
+golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
+golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=
+golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b h1:lkjdUzSyJ5P1+eal9fxXX9Xg2BTfswsonKUse48C0uE=
+golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
+golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI=
+golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190620070143-6f217b454f45 h1:Dl2hc890lrizvUppGbRWhnIh2f8jOTCQpY5IKWRS0oM=
+golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
+golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
+golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
+golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
+golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
+google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI=
+google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
+google.golang.org/api v0.6.0 h1:2tJEkRfnZL5g1GeBUlITh/rqT5HG3sFcoVCUUxmgJ2g=
+google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4=
+google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
+google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
+google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk=
+google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
+google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601 h1:9VBRTdmgQxbs6HE0sUnMrSWNePppAJU07NYvX5dIB04=
+google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s=
+google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
+google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
+google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
+google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8=
+google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
+gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
+honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
+pack.ag/amqp v0.11.2 h1:cuNDWLUTbKRtEZwhB0WQBXf9pGbm87pUBXQhvcFxBWg=
+pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4=
diff --git a/vendor/github.com/Azure/azure-service-bus-go/handler.go b/vendor/github.com/Azure/azure-service-bus-go/handler.go
new file mode 100644
index 00000000..6d5e0625
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/handler.go
@@ -0,0 +1,65 @@
+package servicebus
+
+import "context"
+
+type (
+ // TODO: message session should be available for each of the handler methods
+
+ // TODO: write a session manager that will handle a max concurrent session
+
+ // Handler exposes the functionality required to process a Service Bus message.
+ Handler interface {
+ Handle(context.Context, *Message) error
+ }
+
+ // HandlerFunc is a type converter that allows a func to be used as a `Handler`
+ HandlerFunc func(context.Context, *Message) error
+
+ // SessionHandler exposes a manner of handling a group of messages together. Instances of SessionHandler should be
+ // passed to a Receiver such as a Queue or Subscription.
+ SessionHandler interface {
+ Handler
+
+ // Start is called when a Receiver is informed that has acquired a lock on a Service Bus Session.
+ Start(*MessageSession) error
+
+ // End is called when a Receiver is informed that the last message of a Session has been passed to it.
+ End()
+ }
+)
+
+// Handle redirects this call to the func that was provided.
+func (hf HandlerFunc) Handle(ctx context.Context, msg *Message) error {
+ return hf(ctx, msg)
+}
+
+type defaultSessionHandler struct {
+ Handler
+ start func(*MessageSession) error
+ end func()
+}
+
+// NewSessionHandler is a type converter that allows three funcs to be tied together into a type that fulfills the
+// SessionHandler interface.
+func NewSessionHandler(base Handler, start func(*MessageSession) error, end func()) SessionHandler {
+ return &defaultSessionHandler{
+ Handler: base,
+ start: start,
+ end: end,
+ }
+}
+
+// Start calls the func() that was provided to `NewSessionHandler` when a new session lock is established.
+func (dsh defaultSessionHandler) Start(ms *MessageSession) error {
+ if dsh.start != nil {
+ return dsh.start(ms)
+ }
+ return nil
+}
+
+// End calls the func() that was provided to `NewSessionHandler` when a session is finished processing for any reason.
+func (dsh defaultSessionHandler) End() {
+ if dsh.end != nil {
+ dsh.end()
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/iterator.go b/vendor/github.com/Azure/azure-service-bus-go/iterator.go
new file mode 100644
index 00000000..4a6be304
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/iterator.go
@@ -0,0 +1,163 @@
+package servicebus
+
+import (
+ "context"
+ "errors"
+
+ "github.com/devigned/tab"
+)
+
+type (
+ // MessageIterator offers a simple mechanism for iterating over a list of
+ MessageIterator interface {
+ Done() bool
+ Next(context.Context) (*Message, error)
+ }
+
+ // MessageSliceIterator is a wrapper, which lets any slice of Message pointers be used as a MessageIterator.
+ MessageSliceIterator struct {
+ Target []*Message
+ Cursor int
+ }
+
+ peekIterator struct {
+ entity *entity
+ buffer chan *Message
+ lastSequenceNumber int64
+ }
+
+ // PeekOption allows customization of parameters when querying a Service Bus entity for messages without committing
+ // to processing them.
+ PeekOption func(*peekIterator) error
+)
+
+const (
+ defaultPeekPageSize = 10
+)
+
+// AsMessageSliceIterator wraps a slice of Message pointers to allow it to be made into a MessageIterator.
+func AsMessageSliceIterator(target []*Message) *MessageSliceIterator {
+ return &MessageSliceIterator{
+ Target: target,
+ }
+}
+
+// Done communicates whether there are more messages remaining to be iterated over.
+func (ms MessageSliceIterator) Done() bool {
+ return ms.Cursor >= len(ms.Target)
+}
+
+// Next fetches the Message in the slice at a position one larger than the last one accessed.
+func (ms *MessageSliceIterator) Next(_ context.Context) (*Message, error) {
+ if ms.Done() {
+ return nil, ErrNoMessages{}
+ }
+
+ retval := ms.Target[ms.Cursor]
+ ms.Cursor++
+ return retval, nil
+}
+
+func newPeekIterator(entity *entity, options ...PeekOption) (*peekIterator, error) {
+ retval := &peekIterator{
+ entity: entity,
+ }
+
+ foundPageSize := false
+ for i := range options {
+ if err := options[i](retval); err != nil {
+ return nil, err
+ }
+
+ if retval.buffer != nil {
+ foundPageSize = true
+ }
+ }
+
+ if !foundPageSize {
+ err := PeekWithPageSize(defaultPeekPageSize)(retval)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return retval, nil
+}
+
+// PeekWithPageSize adjusts how many messages are fetched at once while peeking from the server.
+func PeekWithPageSize(pageSize int) PeekOption {
+ return func(pi *peekIterator) error {
+ if pageSize < 0 {
+ return errors.New("page size must not be less than zero")
+ }
+
+ if pi.buffer != nil {
+ return errors.New("cannot modify an existing peekIterator's buffer")
+ }
+
+ pi.buffer = make(chan *Message, pageSize)
+ return nil
+ }
+}
+
+// PeekFromSequenceNumber adds a filter to the Peek operation, so that no messages with a Sequence Number less than
+// 'seq' are returned.
+func PeekFromSequenceNumber(seq int64) PeekOption {
+ return func(pi *peekIterator) error {
+ pi.lastSequenceNumber = seq + 1
+ return nil
+ }
+}
+
+func (pi peekIterator) Done() bool {
+ return false
+}
+
+func (pi *peekIterator) Next(ctx context.Context) (*Message, error) {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.peekIterator.Next")
+ defer span.End()
+
+ if len(pi.buffer) == 0 {
+ if err := pi.getNextPage(ctx); err != nil {
+ return nil, err
+ }
+ }
+
+ select {
+ case next := <-pi.buffer:
+ return next, nil
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ }
+}
+
+func (pi *peekIterator) getNextPage(ctx context.Context) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.peekIterator.getNextPage")
+ defer span.End()
+
+ client, err := pi.entity.GetRPCClient(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ msgs, err := client.GetNextPage(ctx, pi.lastSequenceNumber, int32(cap(pi.buffer)))
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ for i := range msgs {
+ select {
+ case pi.buffer <- msgs[i]:
+ // Intentionally Left Blank
+ case <-ctx.Done():
+ return ctx.Err()
+ }
+ }
+
+ // Update last seen sequence number so that the next read starts from where this ended.
+ lastMsg := msgs[len(msgs)-1]
+ pi.lastSequenceNumber = *lastMsg.SystemProperties.SequenceNumber + 1
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/message.go b/vendor/github.com/Azure/azure-service-bus-go/message.go
new file mode 100644
index 00000000..db5cbfce
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/message.go
@@ -0,0 +1,546 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "fmt"
+ "reflect"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "github.com/devigned/tab"
+ "github.com/mitchellh/mapstructure"
+ "pack.ag/amqp"
+)
+
+type (
+ // Message is an Service Bus message to be sent or received
+ Message struct {
+ ContentType string
+ CorrelationID string
+ Data []byte
+ DeliveryCount uint32
+ SessionID *string
+ GroupSequence *uint32
+ ID string
+ Label string
+ ReplyTo string
+ ReplyToGroupID string
+ To string
+ TTL *time.Duration
+ LockToken *uuid.UUID
+ SystemProperties *SystemProperties
+ UserProperties map[string]interface{}
+ Format uint32
+ message *amqp.Message
+ ec entityConnector // if an entityConnector is present, a message should send disposition via mgmt
+ useSession bool
+ sessionID *string
+ }
+
+ // DispositionAction represents the action to notify Azure Service Bus of the Message's disposition
+ DispositionAction func(ctx context.Context) error
+
+ // MessageErrorCondition represents a well-known collection of AMQP errors
+ MessageErrorCondition string
+
+ // SystemProperties are used to store properties that are set by the system.
+ SystemProperties struct {
+ LockedUntil *time.Time `mapstructure:"x-opt-locked-until"`
+ SequenceNumber *int64 `mapstructure:"x-opt-sequence-number"`
+ PartitionID *int16 `mapstructure:"x-opt-partition-id"`
+ PartitionKey *string `mapstructure:"x-opt-partition-key"`
+ EnqueuedTime *time.Time `mapstructure:"x-opt-enqueued-time"`
+ DeadLetterSource *string `mapstructure:"x-opt-deadletter-source"`
+ ScheduledEnqueueTime *time.Time `mapstructure:"x-opt-scheduled-enqueue-time"`
+ EnqueuedSequenceNumber *int64 `mapstructure:"x-opt-enqueue-sequence-number"`
+ ViaPartitionKey *string `mapstructure:"x-opt-via-partition-key"`
+ }
+
+ mapStructureTag struct {
+ Name string
+ PersistEmpty bool
+ }
+
+ dispositionStatus string
+
+ disposition struct {
+ Status dispositionStatus
+ LockTokens []*uuid.UUID
+ DeadLetterReason *string
+ DeadLetterDescription *string
+ }
+)
+
+// Error Conditions
+const (
+ ErrorInternalError MessageErrorCondition = "amqp:internal-error"
+ ErrorNotFound MessageErrorCondition = "amqp:not-found"
+ ErrorUnauthorizedAccess MessageErrorCondition = "amqp:unauthorized-access"
+ ErrorDecodeError MessageErrorCondition = "amqp:decode-error"
+ ErrorResourceLimitExceeded MessageErrorCondition = "amqp:resource-limit-exceeded"
+ ErrorNotAllowed MessageErrorCondition = "amqp:not-allowed"
+ ErrorInvalidField MessageErrorCondition = "amqp:invalid-field"
+ ErrorNotImplemented MessageErrorCondition = "amqp:not-implemented"
+ ErrorResourceLocked MessageErrorCondition = "amqp:resource-locked"
+ ErrorPreconditionFailed MessageErrorCondition = "amqp:precondition-failed"
+ ErrorResourceDeleted MessageErrorCondition = "amqp:resource-deleted"
+ ErrorIllegalState MessageErrorCondition = "amqp:illegal-state"
+
+ completedDisposition dispositionStatus = "completed"
+ abandonedDisposition dispositionStatus = "abandoned"
+ suspendedDisposition dispositionStatus = "suspended"
+)
+
+const (
+ lockTokenName = "x-opt-lock-token"
+)
+
+// NewMessageFromString builds an Message from a string message
+func NewMessageFromString(message string) *Message {
+ return NewMessage([]byte(message))
+}
+
+// NewMessage builds an Message from a slice of data
+func NewMessage(data []byte) *Message {
+ return &Message{
+ Data: data,
+ }
+}
+
+// CompleteAction will notify Azure Service Bus that the message was successfully handled and should be deleted from the
+// queue
+func (m *Message) CompleteAction() DispositionAction {
+ return func(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.CompleteAction")
+ defer span.End()
+
+ return m.Complete(ctx)
+ }
+}
+
+// AbandonAction will notify Azure Service Bus the message failed but should be re-queued for delivery.
+func (m *Message) AbandonAction() DispositionAction {
+ return func(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.AbandonAction")
+ defer span.End()
+
+ return m.Abandon(ctx)
+ }
+}
+
+// DeadLetterAction will notify Azure Service Bus the message failed and should not re-queued
+func (m *Message) DeadLetterAction(err error) DispositionAction {
+ return func(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.DeadLetterAction")
+ defer span.End()
+
+ return m.DeadLetter(ctx, err)
+ }
+}
+
+// DeadLetterWithInfoAction will notify Azure Service Bus the message failed and should not be re-queued with additional
+// context
+func (m *Message) DeadLetterWithInfoAction(err error, condition MessageErrorCondition, additionalData map[string]string) DispositionAction {
+ return func(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.DeadLetterWithInfoAction")
+ defer span.End()
+
+ return m.DeadLetterWithInfo(ctx, err, condition, additionalData)
+ }
+}
+
+// Complete will notify Azure Service Bus that the message was successfully handled and should be deleted from the queue
+func (m *Message) Complete(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.Complete")
+ defer span.End()
+
+ if m.ec != nil {
+ return sendMgmtDisposition(ctx, m, disposition{Status: completedDisposition})
+ }
+
+ return m.message.Accept()
+}
+
+// Abandon will notify Azure Service Bus the message failed but should be re-queued for delivery.
+func (m *Message) Abandon(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.Abandon")
+ defer span.End()
+
+ if m.ec != nil {
+ d := disposition{
+ Status: abandonedDisposition,
+ }
+ return sendMgmtDisposition(ctx, m, d)
+ }
+
+ return m.message.Modify(false, false, nil)
+}
+
+// Defer will set aside the message for later processing
+//
+// When a queue or subscription client receives a message that it is willing to process, but for which processing is
+// not currently possible due to special circumstances inside of the application, it has the option of "deferring"
+// retrieval of the message to a later point. The message remains in the queue or subscription, but it is set aside.
+//
+// Deferral is a feature specifically created for workflow processing scenarios. Workflow frameworks may require certain
+// operations to be processed in a particular order, and may have to postpone processing of some received messages
+// until prescribed prior work that is informed by other messages has been completed.
+//
+// A simple illustrative example is an order processing sequence in which a payment notification from an external
+// payment provider appears in a system before the matching purchase order has been propagated from the store front
+// to the fulfillment system. In that case, the fulfillment system might defer processing the payment notification
+// until there is an order with which to associate it. In rendezvous scenarios, where messages from different sources
+// drive a workflow forward, the real-time execution order may indeed be correct, but the messages reflecting the
+// outcomes may arrive out of order.
+//
+// Ultimately, deferral aids in reordering messages from the arrival order into an order in which they can be
+// processed, while leaving those messages safely in the message store for which processing needs to be postponed.
+func (m *Message) Defer(ctx context.Context) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.Defer")
+ defer span.End()
+
+ return m.message.Modify(true, true, nil)
+}
+
+// Release will notify Azure Service Bus the message should be re-queued without failure.
+//func (m *Message) Release() DispositionAction {
+// return func(ctx context.Context) {
+// span, _ := m.startSpanFromContext(ctx, "sb.Message.Release")
+// defer span.Finish()
+//
+// m.message.Release()
+// }
+//}
+
+// DeadLetter will notify Azure Service Bus the message failed and should not re-queued
+func (m *Message) DeadLetter(ctx context.Context, err error) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.DeadLetter")
+ defer span.End()
+
+ if m.ec != nil {
+ d := disposition{
+ Status: suspendedDisposition,
+ DeadLetterDescription: ptrString(err.Error()),
+ DeadLetterReason: ptrString("amqp:error"),
+ }
+ return sendMgmtDisposition(ctx, m, d)
+ }
+
+ amqpErr := amqp.Error{
+ Condition: amqp.ErrorCondition(ErrorInternalError),
+ Description: err.Error(),
+ }
+ return m.message.Reject(&amqpErr)
+
+}
+
+// DeadLetterWithInfo will notify Azure Service Bus the message failed and should not be re-queued with additional
+// context
+func (m *Message) DeadLetterWithInfo(ctx context.Context, err error, condition MessageErrorCondition, additionalData map[string]string) error {
+ _, span := m.startSpanFromContext(ctx, "sb.Message.DeadLetterWithInfo")
+ defer span.End()
+
+ if m.ec != nil {
+ d := disposition{
+ Status: suspendedDisposition,
+ DeadLetterDescription: ptrString(err.Error()),
+ DeadLetterReason: ptrString("amqp:error"),
+ }
+ return sendMgmtDisposition(ctx, m, d)
+ }
+
+ var info map[string]interface{}
+ if additionalData != nil {
+ info = make(map[string]interface{}, len(additionalData))
+ for key, val := range additionalData {
+ info[key] = val
+ }
+ }
+
+ amqpErr := amqp.Error{
+ Condition: amqp.ErrorCondition(condition),
+ Description: err.Error(),
+ Info: info,
+ }
+ return m.message.Reject(&amqpErr)
+}
+
+// ScheduleAt will ensure Azure Service Bus delivers the message after the time specified
+// (usually within 1 minute after the specified time)
+func (m *Message) ScheduleAt(t time.Time) {
+ if m.SystemProperties == nil {
+ m.SystemProperties = new(SystemProperties)
+ }
+ utcTime := t.UTC()
+ m.SystemProperties.ScheduledEnqueueTime = &utcTime
+}
+
+// Set implements tab.Carrier
+func (m *Message) Set(key string, value interface{}) {
+ if m.UserProperties == nil {
+ m.UserProperties = make(map[string]interface{})
+ }
+ m.UserProperties[key] = value
+}
+
+// GetKeyValues implements tab.Carrier
+func (m *Message) GetKeyValues() map[string]interface{} {
+ return m.UserProperties
+}
+
+func sendMgmtDisposition(ctx context.Context, m *Message, state disposition) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.sendMgmtDisposition")
+ defer span.End()
+
+ client, err := m.ec.getEntity().GetRPCClient(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ return client.SendDisposition(ctx, m, state)
+}
+
+func (m *Message) toMsg() (*amqp.Message, error) {
+ amqpMsg := m.message
+ if amqpMsg == nil {
+ amqpMsg = amqp.NewMessage(m.Data)
+ }
+
+ if m.TTL != nil {
+ if amqpMsg.Header == nil {
+ amqpMsg.Header = new(amqp.MessageHeader)
+ }
+ amqpMsg.Header.TTL = *m.TTL
+ }
+
+ amqpMsg.Properties = &amqp.MessageProperties{
+ MessageID: m.ID,
+ }
+
+ if m.SessionID != nil {
+ amqpMsg.Properties.GroupID = *m.SessionID
+ }
+
+ if m.GroupSequence != nil {
+ amqpMsg.Properties.GroupSequence = *m.GroupSequence
+ }
+
+ amqpMsg.Properties.CorrelationID = m.CorrelationID
+ amqpMsg.Properties.ContentType = m.ContentType
+ amqpMsg.Properties.Subject = m.Label
+ amqpMsg.Properties.To = m.To
+ amqpMsg.Properties.ReplyTo = m.ReplyTo
+ amqpMsg.Properties.ReplyToGroupID = m.ReplyToGroupID
+
+ if len(m.UserProperties) > 0 {
+ amqpMsg.ApplicationProperties = make(map[string]interface{})
+ for key, value := range m.UserProperties {
+ amqpMsg.ApplicationProperties[key] = value
+ }
+ }
+
+ if m.SystemProperties != nil {
+ sysPropMap, err := encodeStructureToMap(m.SystemProperties)
+ if err != nil {
+ return nil, err
+ }
+ amqpMsg.Annotations = annotationsFromMap(sysPropMap)
+ }
+
+ if m.LockToken != nil {
+ if amqpMsg.DeliveryAnnotations == nil {
+ amqpMsg.DeliveryAnnotations = make(amqp.Annotations)
+ }
+ amqpMsg.DeliveryAnnotations[lockTokenName] = *m.LockToken
+ }
+
+ return amqpMsg, nil
+}
+
+func annotationsFromMap(m map[string]interface{}) amqp.Annotations {
+ a := make(amqp.Annotations)
+ for key, val := range m {
+ a[key] = val
+ }
+ return a
+}
+
+func messageFromAMQPMessage(msg *amqp.Message) (*Message, error) {
+ return newMessage(msg.Data[0], msg)
+}
+
+func newMessage(data []byte, amqpMsg *amqp.Message) (*Message, error) {
+ msg := &Message{
+ Data: data,
+ message: amqpMsg,
+ }
+
+ if amqpMsg == nil {
+ return msg, nil
+ }
+
+ if amqpMsg.Properties != nil {
+ if id, ok := amqpMsg.Properties.MessageID.(string); ok {
+ msg.ID = id
+ }
+ msg.SessionID = &amqpMsg.Properties.GroupID
+ msg.GroupSequence = &amqpMsg.Properties.GroupSequence
+ if id, ok := amqpMsg.Properties.CorrelationID.(string); ok {
+ msg.CorrelationID = id
+ }
+ msg.ContentType = amqpMsg.Properties.ContentType
+ msg.Label = amqpMsg.Properties.Subject
+ msg.To = amqpMsg.Properties.To
+ msg.ReplyTo = amqpMsg.Properties.ReplyTo
+ msg.ReplyToGroupID = amqpMsg.Properties.ReplyToGroupID
+ if amqpMsg.Header != nil {
+ msg.DeliveryCount = amqpMsg.Header.DeliveryCount + 1
+ msg.TTL = &amqpMsg.Header.TTL
+ }
+ }
+
+ if amqpMsg.ApplicationProperties != nil {
+ msg.UserProperties = make(map[string]interface{}, len(amqpMsg.ApplicationProperties))
+ for key, value := range amqpMsg.ApplicationProperties {
+ msg.UserProperties[key] = value
+ }
+ }
+
+ if amqpMsg.Annotations != nil {
+ if err := mapstructure.Decode(amqpMsg.Annotations, &msg.SystemProperties); err != nil {
+ return msg, err
+ }
+ }
+
+ if amqpMsg.DeliveryTag != nil && len(amqpMsg.DeliveryTag) > 0 {
+ lockToken, err := lockTokenFromMessageTag(amqpMsg)
+ if err != nil {
+ return msg, err
+ }
+ msg.LockToken = lockToken
+ }
+
+ if token, ok := amqpMsg.DeliveryAnnotations[lockTokenName]; ok {
+ if id, ok := token.(amqp.UUID); ok {
+ sid := uuid.UUID([16]byte(id))
+ msg.LockToken = &sid
+ }
+ }
+
+ msg.Format = amqpMsg.Format
+ return msg, nil
+}
+
+func lockTokenFromMessageTag(msg *amqp.Message) (*uuid.UUID, error) {
+ return uuidFromLockTokenBytes(msg.DeliveryTag)
+}
+
+func uuidFromLockTokenBytes(bytes []byte) (*uuid.UUID, error) {
+ if len(bytes) != 16 {
+ return nil, fmt.Errorf("invalid lock token, token was not 16 bytes long")
+ }
+
+ var swapIndex = func(indexOne, indexTwo int, array *[16]byte) {
+ v1 := array[indexOne]
+ array[indexOne] = array[indexTwo]
+ array[indexTwo] = v1
+ }
+
+ // Get lock token from the deliveryTag
+ var lockTokenBytes [16]byte
+ copy(lockTokenBytes[:], bytes[:16])
+ // translate from .net guid byte serialisation format to amqp rfc standard
+ swapIndex(0, 3, &lockTokenBytes)
+ swapIndex(1, 2, &lockTokenBytes)
+ swapIndex(4, 5, &lockTokenBytes)
+ swapIndex(6, 7, &lockTokenBytes)
+ amqpUUID := uuid.UUID(lockTokenBytes)
+
+ return &amqpUUID, nil
+}
+
+func encodeStructureToMap(structPointer interface{}) (map[string]interface{}, error) {
+ valueOfStruct := reflect.ValueOf(structPointer)
+ s := valueOfStruct.Elem()
+ if s.Kind() != reflect.Struct {
+ return nil, fmt.Errorf("must provide a struct")
+ }
+
+ encoded := make(map[string]interface{})
+ for i := 0; i < s.NumField(); i++ {
+ f := s.Field(i)
+ if f.IsValid() && f.CanSet() {
+ tf := s.Type().Field(i)
+ tag, err := parseMapStructureTag(tf.Tag)
+ if err != nil {
+ return nil, err
+ }
+
+ if tag != nil {
+ switch f.Kind() {
+ case reflect.Ptr:
+ if !f.IsNil() || tag.PersistEmpty {
+ if f.IsNil() {
+ encoded[tag.Name] = nil
+ } else {
+ encoded[tag.Name] = f.Elem().Interface()
+ }
+ }
+ default:
+ if f.Interface() != reflect.Zero(f.Type()).Interface() || tag.PersistEmpty {
+ encoded[tag.Name] = f.Interface()
+ }
+ }
+ }
+ }
+ }
+
+ return encoded, nil
+}
+
+func parseMapStructureTag(tag reflect.StructTag) (*mapStructureTag, error) {
+ str, ok := tag.Lookup("mapstructure")
+ if !ok {
+ return nil, nil
+ }
+
+ mapTag := new(mapStructureTag)
+ split := strings.Split(str, ",")
+ mapTag.Name = strings.TrimSpace(split[0])
+
+ if len(split) > 1 {
+ for _, tagKey := range split[1:] {
+ switch tagKey {
+ case "persistempty":
+ mapTag.PersistEmpty = true
+ default:
+ return nil, fmt.Errorf("key %q is not understood", tagKey)
+ }
+ }
+ }
+ return mapTag, nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/message_session.go b/vendor/github.com/Azure/azure-service-bus-go/message_session.go
new file mode 100644
index 00000000..0e2f9e5a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/message_session.go
@@ -0,0 +1,208 @@
+package servicebus
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2/rpc"
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+)
+
+// MessageSession represents and allows for interaction with a Service Bus Session.
+type MessageSession struct {
+ *Receiver
+ entity EntityManagementAddresser
+ mu sync.RWMutex
+ sessionID *string
+ lockExpiration time.Time
+ done chan struct {
+ }
+ cancel sync.Once
+}
+
+func newMessageSession(r *Receiver, entity EntityManagementAddresser, sessionID *string) (retval *MessageSession, _ error) {
+ retval = &MessageSession{
+ Receiver: r,
+ entity: entity,
+ sessionID: sessionID,
+ lockExpiration: time.Now(),
+ done: make(chan struct{}),
+ }
+
+ return
+}
+
+// Close communicates that Handler receiving messages should no longer continue to be executed. This can happen when:
+// - A Handler recognizes that no further messages will come to this session.
+// - A Handler has given up on receiving more messages before a session. Future messages should be delegated to the next
+// available session client.
+func (ms *MessageSession) Close() {
+ ms.cancel.Do(func() {
+ close(ms.done)
+ })
+}
+
+// LockedUntil fetches the moment in time when the Session lock held by this Receiver will expire.
+func (ms *MessageSession) LockedUntil() time.Time {
+ ms.mu.RLock()
+ defer ms.mu.RUnlock()
+
+ return ms.lockExpiration
+}
+
+// RenewLock requests that the Service Bus Server renews this client's lock on an existing Session.
+func (ms *MessageSession) RenewLock(ctx context.Context) error {
+ ms.mu.Lock()
+ defer ms.mu.Unlock()
+
+ link, err := rpc.NewLinkWithSession(ms.Receiver.session.Session, ms.entity.ManagementPath())
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ "operation": "com.microsoft:renew-session-lock",
+ },
+ Value: map[string]interface{}{
+ "session-id": ms.SessionID(),
+ },
+ }
+
+ if deadline, ok := ctx.Deadline(); ok {
+ msg.ApplicationProperties["com.microsoft:server-timeout"] = uint(time.Until(deadline) / time.Millisecond)
+ }
+
+ resp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ if rawMessageValue, ok := resp.Message.Value.(map[string]interface{}); ok {
+ if rawExpiration, ok := rawMessageValue["expiration"]; ok {
+ if ms.lockExpiration, ok = rawExpiration.(time.Time); ok {
+ return nil
+ }
+ return errors.New("\"expiration\" not of expected type time.Time")
+ }
+ return errors.New("missing expected property \"expiration\" in \"Value\"")
+
+ }
+ return errors.New("value not of expected type map[string]interface{}")
+}
+
+// ListSessions will list all of the sessions available
+func (ms *MessageSession) ListSessions(ctx context.Context) ([]byte, error) {
+ link, err := rpc.NewLink(ms.Receiver.client, ms.entity.ManagementPath())
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ "operation": "com.microsoft:get-message-sessions",
+ },
+ Value: map[string]interface{}{
+ "last-updated-time": time.Now().UTC().Add(-30 * time.Minute),
+ "skip": int32(0),
+ "top": int32(100),
+ },
+ }
+
+ rsp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ if rsp.Code != 200 {
+ err := fmt.Errorf("amqp error (%d): %q", rsp.Code, rsp.Description)
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ return rsp.Message.Data[0], nil
+}
+
+// SetState updates the current State associated with this Session.
+func (ms *MessageSession) SetState(ctx context.Context, state []byte) error {
+ link, err := rpc.NewLinkWithSession(ms.Receiver.session.Session, ms.entity.ManagementPath())
+ if err != nil {
+ return err
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ "operation": "com.microsoft:set-session-state",
+ },
+ Properties: &amqp.MessageProperties{
+ GroupID: *ms.SessionID(),
+ },
+ Value: map[string]interface{}{
+ "session-id": ms.SessionID(),
+ "session-state": state,
+ },
+ }
+
+ rsp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ return err
+ }
+
+ if rsp.Code != 200 {
+ return fmt.Errorf("amqp error (%d): %q", rsp.Code, rsp.Description)
+ }
+ return nil
+}
+
+// State retrieves the current State associated with this Session.
+// https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-amqp-request-response#get-session-state
+func (ms *MessageSession) State(ctx context.Context) ([]byte, error) {
+ const sessionStateField = "session-state"
+
+ link, err := rpc.NewLinkWithSession(ms.Receiver.session.Session, ms.entity.ManagementPath())
+ if err != nil {
+ return []byte{}, err
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ "operation": "com.microsoft:get-session-state",
+ },
+ Value: map[string]interface{}{
+ "session-id": ms.SessionID(),
+ },
+ }
+
+ rsp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ return []byte{}, err
+ }
+
+ if rsp.Code != 200 {
+ return []byte{}, fmt.Errorf("amqp error (%d): %q", rsp.Code, rsp.Description)
+ }
+
+ if val, ok := rsp.Message.Value.(map[string]interface{}); ok {
+ if rawState, ok := val[sessionStateField]; ok {
+ if state, ok := rawState.([]byte); ok || rawState == nil {
+ return state, nil
+ }
+ return nil, newErrIncorrectType(sessionStateField, []byte{}, rawState)
+ }
+ return nil, ErrMissingField(sessionStateField)
+ }
+ return nil, newErrIncorrectType("value", map[string]interface{}{}, rsp.Message.Value)
+}
+
+// SessionID gets the unique identifier of the session being interacted with by this MessageSession.
+func (ms *MessageSession) SessionID() *string {
+ return ms.sessionID
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/mgmt.go b/vendor/github.com/Azure/azure-service-bus-go/mgmt.go
new file mode 100644
index 00000000..c2d08ebf
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/mgmt.go
@@ -0,0 +1,348 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "bytes"
+ "context"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/http/httputil"
+ "strings"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2/auth"
+ "github.com/devigned/tab"
+)
+
+const (
+ serviceBusSchema = "http://schemas.microsoft.com/netservices/2010/10/servicebus/connect"
+ schemaInstance = "http://www.w3.org/2001/XMLSchema-instance"
+ atomSchema = "http://www.w3.org/2005/Atom"
+ applicationXML = "application/xml"
+)
+
+type (
+ // entityManager provides CRUD functionality for Service Bus entities (Queues, Topics, Subscriptions...)
+ entityManager struct {
+ tokenProvider auth.TokenProvider
+ Host string
+ mwStack []MiddlewareFunc
+ }
+
+ // BaseEntityDescription provides common fields which are part of Queues, Topics and Subscriptions
+ BaseEntityDescription struct {
+ InstanceMetadataSchema *string `xml:"xmlns:i,attr,omitempty"`
+ ServiceBusSchema *string `xml:"xmlns,attr,omitempty"`
+ }
+
+ managementError struct {
+ XMLName xml.Name `xml:"Error"`
+ Code int `xml:"Code"`
+ Detail string `xml:"Detail"`
+ }
+
+ // CountDetails has current active (and other) messages for queue/topic.
+ CountDetails struct {
+ XMLName xml.Name `xml:"CountDetails"`
+ ActiveMessageCount *int32 `xml:"ActiveMessageCount,omitempty"`
+ DeadLetterMessageCount *int32 `xml:"DeadLetterMessageCount,omitempty"`
+ ScheduledMessageCount *int32 `xml:"ScheduledMessageCount,omitempty"`
+ TransferDeadLetterMessageCount *int32 `xml:"TransferDeadLetterMessageCount,omitempty"`
+ TransferMessageCount *int32 `xml:"TransferMessageCount,omitempty"`
+ }
+
+ // EntityStatus enumerates the values for entity status.
+ EntityStatus string
+
+ // MiddlewareFunc allows a consumer of the entity manager to inject handlers within the request / response pipeline
+ //
+ // The example below adds the atom xml content type to the request, calls the next middleware and returns the
+ // result.
+ //
+ // addAtomXMLContentType MiddlewareFunc = func(next RestHandler) RestHandler {
+ // return func(ctx context.Context, req *http.Request) (res *http.Response, e error) {
+ // if req.Method != http.MethodGet && req.Method != http.MethodHead {
+ // req.Header.Add("content-Type", "application/atom+xml;type=entry;charset=utf-8")
+ // }
+ // return next(ctx, req)
+ // }
+ // }
+ MiddlewareFunc func(next RestHandler) RestHandler
+
+ // RestHandler is used to transform a request and response within the http pipeline
+ RestHandler func(ctx context.Context, req *http.Request) (*http.Response, error)
+)
+
+var (
+ addAtomXMLContentType MiddlewareFunc = func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (res *http.Response, e error) {
+ if req.Method != http.MethodGet && req.Method != http.MethodHead {
+ req.Header.Add("content-Type", "application/atom+xml;type=entry;charset=utf-8")
+ }
+ return next(ctx, req)
+ }
+ }
+
+ addAPIVersion201704 MiddlewareFunc = func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (*http.Response, error) {
+ q := req.URL.Query()
+ q.Add("api-version", "2017-04")
+ req.URL.RawQuery = q.Encode()
+ return next(ctx, req)
+ }
+ }
+
+ applyTracing MiddlewareFunc = func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (*http.Response, error) {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.Middleware.ApplyTracing")
+ defer span.End()
+
+ applyRequestInfo(span, req)
+ res, err := next(ctx, req)
+ applyResponseInfo(span, res)
+ return res, err
+ }
+ }
+)
+
+const (
+ // Active ...
+ Active EntityStatus = "Active"
+ // Creating ...
+ Creating EntityStatus = "Creating"
+ // Deleting ...
+ Deleting EntityStatus = "Deleting"
+ // Disabled ...
+ Disabled EntityStatus = "Disabled"
+ // ReceiveDisabled ...
+ ReceiveDisabled EntityStatus = "ReceiveDisabled"
+ // Renaming ...
+ Renaming EntityStatus = "Renaming"
+ // Restoring ...
+ Restoring EntityStatus = "Restoring"
+ // SendDisabled ...
+ SendDisabled EntityStatus = "SendDisabled"
+ // Unknown ...
+ Unknown EntityStatus = "Unknown"
+)
+
+func (m *managementError) String() string {
+ return fmt.Sprintf("Code: %d, Details: %s", m.Code, m.Detail)
+}
+
+// newEntityManager creates a new instance of an entityManager given a token provider and host
+func newEntityManager(host string, tokenProvider auth.TokenProvider) *entityManager {
+ return &entityManager{
+ Host: host,
+ tokenProvider: tokenProvider,
+ mwStack: []MiddlewareFunc{
+ addAPIVersion201704,
+ addAtomXMLContentType,
+ addAuthorization(tokenProvider),
+ applyTracing,
+ },
+ }
+}
+
+// Get performs an HTTP Get for a given entity path
+func (em *entityManager) Get(ctx context.Context, entityPath string, mw ...MiddlewareFunc) (*http.Response, error) {
+ ctx, span := em.startSpanFromContext(ctx, "sb.EntityManger.Get")
+ defer span.End()
+
+ return em.Execute(ctx, http.MethodGet, entityPath, http.NoBody, mw...)
+}
+
+// Put performs an HTTP PUT for a given entity path and body
+func (em *entityManager) Put(ctx context.Context, entityPath string, body []byte, mw ...MiddlewareFunc) (*http.Response, error) {
+ ctx, span := em.startSpanFromContext(ctx, "sb.EntityManger.Put")
+ defer span.End()
+
+ return em.Execute(ctx, http.MethodPut, entityPath, bytes.NewReader(body), mw...)
+}
+
+// Delete performs an HTTP DELETE for a given entity path
+func (em *entityManager) Delete(ctx context.Context, entityPath string, mw ...MiddlewareFunc) (*http.Response, error) {
+ ctx, span := em.startSpanFromContext(ctx, "sb.EntityManger.Delete")
+ defer span.End()
+
+ return em.Execute(ctx, http.MethodDelete, entityPath, http.NoBody, mw...)
+}
+
+// Post performs an HTTP POST for a given entity path and body
+func (em *entityManager) Post(ctx context.Context, entityPath string, body []byte, mw ...MiddlewareFunc) (*http.Response, error) {
+ ctx, span := em.startSpanFromContext(ctx, "sb.EntityManger.Post")
+ defer span.End()
+
+ return em.Execute(ctx, http.MethodPost, entityPath, bytes.NewReader(body), mw...)
+}
+
+func (em *entityManager) Execute(ctx context.Context, method string, entityPath string, body io.Reader, mw ...MiddlewareFunc) (*http.Response, error) {
+ ctx, span := em.startSpanFromContext(ctx, "sb.EntityManger.Execute")
+ defer span.End()
+
+ req, err := http.NewRequest(method, em.Host+strings.TrimPrefix(entityPath, "/"), body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ final := func(_ RestHandler) RestHandler {
+ return func(reqCtx context.Context, request *http.Request) (*http.Response, error) {
+ client := &http.Client{
+ Timeout: 60 * time.Second,
+ }
+ request = request.WithContext(reqCtx)
+ return client.Do(request)
+ }
+ }
+
+ mwStack := []MiddlewareFunc{final}
+ sl := len(em.mwStack) - 1
+ for i := sl; i >= 0; i-- {
+ mwStack = append(mwStack, em.mwStack[i])
+ }
+
+ for i := len(mw) - 1; i >= 0; i-- {
+ mwStack = append(mwStack, mw[i])
+ }
+
+ var h RestHandler
+ for _, mw := range mwStack {
+ h = mw(h)
+ }
+
+ return h(ctx, req)
+}
+
+// Use adds middleware to the middleware mwStack
+func (em *entityManager) Use(mw ...MiddlewareFunc) {
+ em.mwStack = append(em.mwStack, mw...)
+}
+
+// TokenProvider generates authorization tokens for communicating with the Service Bus management API
+func (em *entityManager) TokenProvider() auth.TokenProvider {
+ return em.tokenProvider
+}
+
+func addAuthorization(tp auth.TokenProvider) MiddlewareFunc {
+ return func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (*http.Response, error) {
+ signature, err := tp.GetToken(req.URL.String())
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Add("Authorization", signature.Token)
+ return next(ctx, req)
+ }
+ }
+}
+
+func addSupplementalAuthorization(supplementalURI string, tp auth.TokenProvider) MiddlewareFunc {
+ return func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (*http.Response, error) {
+ signature, err := tp.GetToken(supplementalURI)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Add("ServiceBusSupplementaryAuthorization", signature.Token)
+ return next(ctx, req)
+ }
+ }
+}
+
+func addDeadLetterSupplementalAuthorization(targetURI string, tp auth.TokenProvider) MiddlewareFunc {
+ return func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (response *http.Response, e error) {
+ signature, err := tp.GetToken(targetURI)
+ if err != nil {
+ return nil, err
+ }
+
+ req.Header.Add("ServiceBusDlqSupplementaryAuthorization", signature.Token)
+ return next(ctx, req)
+ }
+ }
+}
+
+// TraceReqAndResponseMiddleware will print the dump of the management request and response.
+//
+// This should only be used for debugging or educational purposes.
+func TraceReqAndResponseMiddleware() MiddlewareFunc {
+ return func(next RestHandler) RestHandler {
+ return func(ctx context.Context, req *http.Request) (*http.Response, error) {
+ if dump, err := httputil.DumpRequest(req, true); err == nil {
+ fmt.Println(string(dump))
+ }
+
+ res, err := next(ctx, req)
+
+ if dump, err := httputil.DumpResponse(res, true); err == nil {
+ fmt.Println(string(dump))
+ }
+
+ return res, err
+ }
+ }
+}
+
+func isEmptyFeed(b []byte) bool {
+ var emptyFeed queueFeed
+ feedErr := xml.Unmarshal(b, &emptyFeed)
+ return feedErr == nil && emptyFeed.Title == "Publicly Listed Services"
+}
+
+func xmlDoc(content []byte) []byte {
+ return []byte(xml.Header + string(content))
+}
+
+// ptrBool takes a boolean and returns a pointer to that bool. For use in literal pointers, ptrBool(true) -> *bool
+func ptrBool(toPtr bool) *bool {
+ return &toPtr
+}
+
+// ptrString takes a string and returns a pointer to that string. For use in literal pointers,
+// ptrString(fmt.Sprintf("..", foo)) -> *string
+func ptrString(toPtr string) *string {
+ return &toPtr
+}
+
+// durationTo8601Seconds takes a duration and returns a string period of whole seconds (int cast of float)
+func durationTo8601Seconds(duration time.Duration) string {
+ return fmt.Sprintf("PT%dS", duration/time.Second)
+}
+
+func formatManagementError(body []byte) error {
+ var mgmtError managementError
+ unmarshalErr := xml.Unmarshal(body, &mgmtError)
+ if unmarshalErr != nil {
+ return errors.New(string(body))
+ }
+
+ return fmt.Errorf("error code: %d, Details: %s", mgmtError.Code, mgmtError.Detail)
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/namespace.go b/vendor/github.com/Azure/azure-service-bus-go/namespace.go
new file mode 100644
index 00000000..e3b45da6
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/namespace.go
@@ -0,0 +1,225 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "runtime"
+ "strings"
+
+ "github.com/Azure/azure-amqp-common-go/v2/auth"
+ "github.com/Azure/azure-amqp-common-go/v2/cbs"
+ "github.com/Azure/azure-amqp-common-go/v2/conn"
+ "github.com/Azure/azure-amqp-common-go/v2/sas"
+ "github.com/Azure/go-autorest/autorest/azure"
+ "golang.org/x/net/websocket"
+ "pack.ag/amqp"
+)
+
+const (
+ // banner = `
+ // _____ _ ____
+ // / ___/___ ______ __(_)________ / __ )__ _______
+ // \__ \/ _ \/ ___/ | / / // ___/ _ \ / __ / / / / ___/
+ // ___/ / __/ / | |/ / // /__/ __/ / /_/ / /_/ (__ )
+ ///____/\___/_/ |___/_/ \___/\___/ /_____/\__,_/____/
+ //`
+
+ // Version is the semantic version number
+ Version = "0.9.1"
+
+ rootUserAgent = "/golang-service-bus"
+)
+
+type (
+ // Namespace provides a simplified facade over the AMQP implementation of Azure Service Bus and is the entry point
+ // for using Queues, Topics and Subscriptions
+ Namespace struct {
+ Name string
+ Suffix string
+ TokenProvider auth.TokenProvider
+ Environment azure.Environment
+ tlsConfig *tls.Config
+ userAgent string
+ useWebSocket bool
+ }
+
+ // NamespaceOption provides structure for configuring a new Service Bus namespace
+ NamespaceOption func(h *Namespace) error
+)
+
+// NamespaceWithConnectionString configures a namespace with the information provided in a Service Bus connection string
+func NamespaceWithConnectionString(connStr string) NamespaceOption {
+ return func(ns *Namespace) error {
+ parsed, err := conn.ParsedConnectionFromStr(connStr)
+ if err != nil {
+ return err
+ }
+
+ if parsed.Namespace != "" {
+ ns.Name = parsed.Namespace
+ }
+
+ if parsed.Suffix != "" {
+ ns.Suffix = parsed.Suffix
+ }
+
+ provider, err := sas.NewTokenProvider(sas.TokenProviderWithKey(parsed.KeyName, parsed.Key))
+ if err != nil {
+ return err
+ }
+
+ ns.TokenProvider = provider
+ return nil
+ }
+}
+
+// NamespaceWithTLSConfig appends to the TLS config.
+func NamespaceWithTLSConfig(tlsConfig *tls.Config) NamespaceOption {
+ return func(ns *Namespace) error {
+ ns.tlsConfig = tlsConfig
+ return nil
+ }
+}
+
+// NamespaceWithUserAgent appends to the root user-agent value.
+func NamespaceWithUserAgent(userAgent string) NamespaceOption {
+ return func(ns *Namespace) error {
+ ns.userAgent = userAgent
+ return nil
+ }
+}
+
+// NamespaceWithWebSocket configures the namespace and all entities to use wss:// rather than amqps://
+func NamespaceWithWebSocket() NamespaceOption {
+ return func(ns *Namespace) error {
+ ns.useWebSocket = true
+ return nil
+ }
+}
+
+// NewNamespace creates a new namespace configured through NamespaceOption(s)
+func NewNamespace(opts ...NamespaceOption) (*Namespace, error) {
+ ns := &Namespace{
+ Environment: azure.PublicCloud,
+ }
+
+ for _, opt := range opts {
+ err := opt(ns)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ return ns, nil
+}
+
+func (ns *Namespace) newClient() (*amqp.Client, error) {
+ defaultConnOptions := []amqp.ConnOption{
+ amqp.ConnSASLAnonymous(),
+ amqp.ConnMaxSessions(65535),
+ amqp.ConnProperty("product", "MSGolangClient"),
+ amqp.ConnProperty("version", Version),
+ amqp.ConnProperty("platform", runtime.GOOS),
+ amqp.ConnProperty("framework", runtime.Version()),
+ amqp.ConnProperty("user-agent", ns.getUserAgent()),
+ }
+
+ if ns.tlsConfig != nil {
+ defaultConnOptions = append(
+ defaultConnOptions,
+ amqp.ConnTLS(true),
+ amqp.ConnTLSConfig(ns.tlsConfig),
+ )
+ }
+
+ if ns.useWebSocket {
+ wssHost := ns.getWSSHostURI() + "$servicebus/websocket"
+ wssConn, err := websocket.Dial(wssHost, "amqp", "http://localhost/")
+ if err != nil {
+ return nil, err
+ }
+
+ wssConn.PayloadType = websocket.BinaryFrame
+ return amqp.New(wssConn, append(defaultConnOptions, amqp.ConnServerHostname(ns.getHostname()))...)
+ }
+
+ return amqp.Dial(ns.getAMQPHostURI(), defaultConnOptions...)
+}
+
+func (ns *Namespace) negotiateClaim(ctx context.Context, client *amqp.Client, entityPath string) error {
+ ctx, span := ns.startSpanFromContext(ctx, "sb.namespace.negotiateClaim")
+ defer span.End()
+
+ audience := ns.getEntityAudience(entityPath)
+ return cbs.NegotiateClaim(ctx, audience, client, ns.TokenProvider)
+}
+
+func (ns *Namespace) getWSSHostURI() string {
+ suffix := ns.resolveSuffix()
+ if strings.HasSuffix(suffix, "onebox.windows-int.net") {
+ return fmt.Sprintf("wss://%s:4446/", ns.getHostname())
+ }
+ return fmt.Sprintf("wss://%s/", ns.getHostname())
+}
+
+func (ns *Namespace) getAMQPHostURI() string {
+ return fmt.Sprintf("amqps://%s/", ns.getHostname())
+}
+
+func (ns *Namespace) getHTTPSHostURI() string {
+ suffix := ns.resolveSuffix()
+ if strings.HasSuffix(suffix, "onebox.windows-int.net") {
+ return fmt.Sprintf("https://%s:4446/", ns.getHostname())
+ }
+ return fmt.Sprintf("https://%s/", ns.getHostname())
+}
+
+func (ns *Namespace) getHostname() string {
+ return strings.Join([]string{ns.Name, ns.resolveSuffix()}, ".")
+}
+
+func (ns *Namespace) getEntityAudience(entityPath string) string {
+ return ns.getAMQPHostURI() + entityPath
+}
+
+func (ns *Namespace) getUserAgent() string {
+ userAgent := rootUserAgent
+ if ns.userAgent != "" {
+ userAgent = fmt.Sprintf("%s/%s", userAgent, ns.userAgent)
+ }
+ return userAgent
+}
+
+func (ns *Namespace) resolveSuffix() string {
+ var suffix string
+ if ns.Suffix != "" {
+ suffix = ns.Suffix
+ } else {
+ suffix = azure.PublicCloud.ServiceBusEndpointSuffix
+ }
+
+ return suffix
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/operation_constants.go b/vendor/github.com/Azure/azure-service-bus-go/operation_constants.go
new file mode 100644
index 00000000..d50b4c47
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/operation_constants.go
@@ -0,0 +1,18 @@
+package servicebus
+
+const vendorPrefix = "com.microsoft:"
+
+// Operations
+const (
+ lockRenewalOperationName = vendorPrefix + "renew-lock"
+ peekMessageOperationID = vendorPrefix + "peek-message"
+ scheduleMessageOperationID = vendorPrefix + "schedule-message"
+ cancelScheduledOperationID = vendorPrefix + "cancel-scheduled-message"
+)
+
+// Field Descriptions
+const (
+ operationFieldName = "operation"
+ lockTokensFieldName = "lock-tokens"
+ serverTimeoutFieldName = vendorPrefix + "server-timeout"
+)
diff --git a/vendor/github.com/Azure/azure-service-bus-go/queue.go b/vendor/github.com/Azure/azure-service-bus-go/queue.go
new file mode 100644
index 00000000..f2724a05
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/queue.go
@@ -0,0 +1,416 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "encoding/xml"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/devigned/tab"
+)
+
+type (
+
+ // Queue represents a Service Bus Queue entity, which offers First In, First Out (FIFO) message delivery to one or
+ // more competing consumers. That is, messages are typically expected to be received and processed by the receivers
+ // in the order in which they were added to the queue, and each message is received and processed by only one
+ // message consumer.
+ Queue struct {
+ *sendAndReceiveEntity
+ sender *Sender
+ receiver *Receiver
+ receiverMu sync.Mutex
+ senderMu sync.Mutex
+ receiveMode ReceiveMode
+ prefetchCount *uint32
+ }
+
+ // queueContent is a specialized Queue body for an Atom entry
+ queueContent struct {
+ XMLName xml.Name `xml:"content"`
+ Type string `xml:"type,attr"`
+ QueueDescription QueueDescription `xml:"QueueDescription"`
+ }
+
+ // QueueDescription is the content type for Queue management requests
+ QueueDescription struct {
+ XMLName xml.Name `xml:"QueueDescription"`
+ BaseEntityDescription
+ LockDuration *string `xml:"LockDuration,omitempty"` // LockDuration - ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default value is 1 minute.
+ MaxSizeInMegabytes *int32 `xml:"MaxSizeInMegabytes,omitempty"` // MaxSizeInMegabytes - The maximum size of the queue in megabytes, which is the size of memory allocated for the queue. Default is 1024.
+ RequiresDuplicateDetection *bool `xml:"RequiresDuplicateDetection,omitempty"` // RequiresDuplicateDetection - A value indicating if this queue requires duplicate detection.
+ RequiresSession *bool `xml:"RequiresSession,omitempty"`
+ DefaultMessageTimeToLive *string `xml:"DefaultMessageTimeToLive,omitempty"` // DefaultMessageTimeToLive - ISO 8601 default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
+ DeadLetteringOnMessageExpiration *bool `xml:"DeadLetteringOnMessageExpiration,omitempty"` // DeadLetteringOnMessageExpiration - A value that indicates whether this queue has dead letter support when a message expires.
+ DuplicateDetectionHistoryTimeWindow *string `xml:"DuplicateDetectionHistoryTimeWindow,omitempty"` // DuplicateDetectionHistoryTimeWindow - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.
+ MaxDeliveryCount *int32 `xml:"MaxDeliveryCount,omitempty"` // MaxDeliveryCount - The maximum delivery count. A message is automatically deadlettered after this number of deliveries. default value is 10.
+ EnableBatchedOperations *bool `xml:"EnableBatchedOperations,omitempty"` // EnableBatchedOperations - Value that indicates whether server-side batched operations are enabled.
+ SizeInBytes *int64 `xml:"SizeInBytes,omitempty"` // SizeInBytes - The size of the queue, in bytes.
+ MessageCount *int64 `xml:"MessageCount,omitempty"` // MessageCount - The number of messages in the queue.
+ IsAnonymousAccessible *bool `xml:"IsAnonymousAccessible,omitempty"`
+ Status *EntityStatus `xml:"Status,omitempty"`
+ CreatedAt *date.Time `xml:"CreatedAt,omitempty"`
+ UpdatedAt *date.Time `xml:"UpdatedAt,omitempty"`
+ SupportOrdering *bool `xml:"SupportOrdering,omitempty"`
+ AutoDeleteOnIdle *string `xml:"AutoDeleteOnIdle,omitempty"`
+ EnablePartitioning *bool `xml:"EnablePartitioning,omitempty"`
+ EnableExpress *bool `xml:"EnableExpress,omitempty"`
+ CountDetails *CountDetails `xml:"CountDetails,omitempty"`
+ ForwardTo *string `xml:"ForwardTo,omitempty"`
+ ForwardDeadLetteredMessagesTo *string `xml:"ForwardDeadLetteredMessagesTo,omitempty"` // ForwardDeadLetteredMessagesTo - absolute URI of the entity to forward dead letter messages
+ }
+
+ // QueueOption represents named options for assisting Queue message handling
+ QueueOption func(*Queue) error
+
+ // ReceiveMode represents the behavior when consuming a message from a queue
+ ReceiveMode int
+
+ entityConnector interface {
+ EntityManagementAddresser
+ Namespace() *Namespace
+ getEntity() *entity
+ }
+)
+
+const (
+ // PeekLockMode causes a Receiver to peek at a message, lock it so no others can consume and have the queue wait for
+ // the DispositionAction
+ PeekLockMode ReceiveMode = 0
+ // ReceiveAndDeleteMode causes a Receiver to pop messages off of the queue without waiting for DispositionAction
+ ReceiveAndDeleteMode ReceiveMode = 1
+
+ // DeadLetterQueueName is the name of the dead letter queue to be appended to the entity path
+ DeadLetterQueueName = "$DeadLetterQueue"
+
+ // TransferDeadLetterQueueName is the name of the transfer dead letter queue which is appended to the entity name to
+ // build the full address of the transfer dead letter queue.
+ TransferDeadLetterQueueName = "$Transfer/" + DeadLetterQueueName
+)
+
+// QueueWithReceiveAndDelete configures a queue to pop and delete messages off of the queue upon receiving the message.
+// This differs from the default, PeekLock, where PeekLock receives a message, locks it for a period of time, then sends
+// a disposition to the broker when the message has been processed.
+func QueueWithReceiveAndDelete() QueueOption {
+ return func(q *Queue) error {
+ q.receiveMode = ReceiveAndDeleteMode
+ return nil
+ }
+}
+
+// QueueWithPrefetchCount configures the queue to attempt to fetch the number of messages specified by the
+// prefetch count at one time.
+//
+// The default is 1 message at a time.
+//
+// Caution: Using PeekLock, messages have a set lock timeout, which can be renewed. By setting a high prefetch count, a
+// local queue of messages could build up and cause message locks to expire before the message lands in the handler. If
+// this happens, the message disposition will fail and will be re-queued and processed again.
+func QueueWithPrefetchCount(prefetch uint32) QueueOption {
+ return func(q *Queue) error {
+ q.prefetchCount = &prefetch
+ return nil
+ }
+}
+
+// NewQueue creates a new Queue Sender / Receiver
+func (ns *Namespace) NewQueue(name string, opts ...QueueOption) (*Queue, error) {
+ entity := newEntity(name, queueManagementPath(name), ns)
+ queue := &Queue{
+ sendAndReceiveEntity: newSendAndReceiveEntity(entity),
+ receiveMode: PeekLockMode,
+ }
+
+ for _, opt := range opts {
+ if err := opt(queue); err != nil {
+ return nil, err
+ }
+ }
+ return queue, nil
+}
+
+// Send sends messages to the Queue
+func (q *Queue) Send(ctx context.Context, msg *Message) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.Send")
+ defer span.End()
+
+ err := q.ensureSender(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ return q.sender.Send(ctx, msg)
+}
+
+// SendBatch sends a batch of messages to the Queue
+func (q *Queue) SendBatch(ctx context.Context, iterator BatchIterator) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.SendBatch")
+ defer span.End()
+
+ err := q.ensureSender(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ for !iterator.Done() {
+ id, err := uuid.NewV4()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ batch, err := iterator.Next(id.String(), &BatchOptions{
+ SessionID: q.sender.sessionID,
+ })
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ if err := q.sender.trySend(ctx, batch); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// ReceiveOne will listen to receive a single message. ReceiveOne will only wait as long as the context allows.
+//
+// Handler must call a disposition action such as Complete, Abandon, Deadletter on the message. If the messages does not
+// have a disposition set, the Queue's DefaultDisposition will be used.
+func (q *Queue) ReceiveOne(ctx context.Context, handler Handler) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.ReceiveOne")
+ defer span.End()
+
+ if err := q.ensureReceiver(ctx); err != nil {
+ return err
+ }
+
+ return q.receiver.ReceiveOne(ctx, handler)
+}
+
+// Receive subscribes for messages sent to the Queue. If the messages not within a session, messages will arrive
+// unordered.
+//
+// Handler must call a disposition action such as Complete, Abandon, Deadletter on the message. If the messages does not
+// have a disposition set, the Queue's DefaultDisposition will be used.
+//
+// If the handler returns an error, the receive loop will be terminated.
+func (q *Queue) Receive(ctx context.Context, handler Handler) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.Receive")
+ defer span.End()
+
+ err := q.ensureReceiver(ctx)
+ if err != nil {
+ return err
+ }
+
+ handle := q.receiver.Listen(ctx, handler)
+ <-handle.Done()
+ return handle.Err()
+}
+
+// NewSession will create a new session based receiver and sender for the queue
+//
+// Microsoft Azure Service Bus sessions enable joint and ordered handling of unbounded sequences of related messages.
+// To realize a FIFO guarantee in Service Bus, use Sessions. Service Bus is not prescriptive about the nature of the
+// relationship between the messages, and also does not define a particular model for determining where a message
+// sequence starts or ends.
+func (q *Queue) NewSession(sessionID *string) *QueueSession {
+ return NewQueueSession(q, sessionID)
+}
+
+// NewReceiver will create a new Receiver for receiving messages off of a queue
+func (q *Queue) NewReceiver(ctx context.Context, opts ...ReceiverOption) (*Receiver, error) {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.NewReceiver")
+ defer span.End()
+
+ opts = append(opts, ReceiverWithReceiveMode(q.receiveMode))
+ return q.namespace.NewReceiver(ctx, q.Name, opts...)
+}
+
+// NewSender will create a new Sender for sending messages to the queue
+func (q *Queue) NewSender(ctx context.Context, opts ...SenderOption) (*Sender, error) {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.NewSender")
+ defer span.End()
+
+ return q.namespace.NewSender(ctx, q.Name)
+}
+
+// NewDeadLetter creates an entity that represents the dead letter sub queue of the queue
+//
+// Azure Service Bus queues and topic subscriptions provide a secondary sub-queue, called a dead-letter queue
+// (DLQ). The dead-letter queue does not need to be explicitly created and cannot be deleted or otherwise managed
+// independent of the main entity.
+//
+// The purpose of the dead-letter queue is to hold messages that cannot be delivered to any receiver, or messages
+// that could not be processed. Messages can then be removed from the DLQ and inspected. An application might, with
+// help of an operator, correct issues and resubmit the message, log the fact that there was an error, and take
+// corrective action.
+//
+// From an API and protocol perspective, the DLQ is mostly similar to any other queue, except that messages can only
+// be submitted via the dead-letter operation of the parent entity. In addition, time-to-live is not observed, and
+// you can't dead-letter a message from a DLQ. The dead-letter queue fully supports peek-lock delivery and
+// transactional operations.
+//
+// Note that there is no automatic cleanup of the DLQ. Messages remain in the DLQ until you explicitly retrieve
+// them from the DLQ and call Complete() on the dead-letter message.
+func (q *Queue) NewDeadLetter() *DeadLetter {
+ return NewDeadLetter(q)
+}
+
+// NewDeadLetterReceiver builds a receiver for the Queue's dead letter queue
+func (q *Queue) NewDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error) {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.NewDeadLetterReceiver")
+ defer span.End()
+
+ deadLetterEntityPath := strings.Join([]string{q.Name, DeadLetterQueueName}, "/")
+ return q.namespace.NewReceiver(ctx, deadLetterEntityPath, opts...)
+}
+
+// NewTransferDeadLetter creates an entity that represents the transfer dead letter sub queue of the queue
+//
+// Messages will be sent to the transfer dead-letter queue under the following conditions:
+// - A message passes through more than 3 queues or topics that are chained together.
+// - The destination queue or topic is disabled or deleted.
+// - The destination queue or topic exceeds the maximum entity size.
+func (q *Queue) NewTransferDeadLetter() *TransferDeadLetter {
+ return NewTransferDeadLetter(q)
+}
+
+// NewTransferDeadLetterReceiver builds a receiver for the Queue's transfer dead letter queue
+//
+// Messages will be sent to the transfer dead-letter queue under the following conditions:
+// - A message passes through more than 3 queues or topics that are chained together.
+// - The destination queue or topic is disabled or deleted.
+// - The destination queue or topic exceeds the maximum entity size.
+func (q *Queue) NewTransferDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error) {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.NewTransferDeadLetterReceiver")
+ defer span.End()
+
+ transferDeadLetterEntityPath := strings.Join([]string{q.Name, TransferDeadLetterQueueName}, "/")
+ return q.namespace.NewReceiver(ctx, transferDeadLetterEntityPath, opts...)
+}
+
+// Close the underlying connection to Service Bus
+func (q *Queue) Close(ctx context.Context) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.Close")
+ defer span.End()
+
+ var lastErr error
+ if q.receiver != nil {
+ if err := q.receiver.Close(ctx); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ q.receiver = nil
+ }
+
+ if q.sender != nil {
+ if err := q.sender.Close(ctx); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ q.sender = nil
+ }
+
+ if q.rpcClient != nil {
+ if err := q.rpcClient.Close(); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ q.rpcClient = nil
+ }
+
+ return lastErr
+}
+
+func isConnectionClosed(err error) bool {
+ return err.Error() == "amqp: connection closed"
+}
+
+func (q *Queue) newReceiver(ctx context.Context, opts ...ReceiverOption) (*Receiver, error) {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.NewReceiver")
+ defer span.End()
+
+ opts = append(opts, ReceiverWithReceiveMode(q.receiveMode))
+
+ if q.prefetchCount != nil {
+ opts = append(opts, ReceiverWithPrefetchCount(*q.prefetchCount))
+ }
+
+ return q.namespace.NewReceiver(ctx, q.Name, opts...)
+}
+
+func (q *Queue) ensureReceiver(ctx context.Context, opts ...ReceiverOption) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.ensureReceiver")
+ defer span.End()
+
+ q.receiverMu.Lock()
+ defer q.receiverMu.Unlock()
+
+ if q.receiver != nil {
+ return nil
+ }
+
+ receiver, err := q.newReceiver(ctx, opts...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ q.receiver = receiver
+ return nil
+}
+
+func (q *Queue) ensureSender(ctx context.Context) error {
+ ctx, span := q.startSpanFromContext(ctx, "sb.Queue.ensureSender")
+ defer span.End()
+
+ q.senderMu.Lock()
+ defer q.senderMu.Unlock()
+
+ if q.sender != nil {
+ return nil
+ }
+
+ s, err := q.NewSender(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ q.sender = s
+ return nil
+}
+
+func queueManagementPath(qName string) string {
+ return fmt.Sprintf("%s/$management", qName)
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/queue_manager.go b/vendor/github.com/Azure/azure-service-bus-go/queue_manager.go
new file mode 100644
index 00000000..38aa3321
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/queue_manager.go
@@ -0,0 +1,367 @@
+package servicebus
+
+import (
+ "context"
+ "encoding/xml"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/devigned/tab"
+
+ "github.com/Azure/azure-service-bus-go/atom"
+)
+
+type (
+ // QueueManager provides CRUD functionality for Service Bus Queues
+ QueueManager struct {
+ *entityManager
+ }
+
+ // Entity is represents the most basic form of an Azure Service Bus entity.
+ Entity struct {
+ Name string
+ ID string
+ }
+
+ // QueueEntity is the Azure Service Bus description of a Queue for management activities
+ QueueEntity struct {
+ *QueueDescription
+ *Entity
+ }
+
+ // queueFeed is a specialized feed containing QueueEntries
+ queueFeed struct {
+ *atom.Feed
+ Entries []queueEntry `xml:"entry"`
+ }
+
+ // queueEntry is a specialized Queue feed entry
+ queueEntry struct {
+ *atom.Entry
+ Content *queueContent `xml:"content"`
+ }
+
+ // QueueManagementOption represents named configuration options for queue mutation
+ QueueManagementOption func(*QueueDescription) error
+
+ // Targetable provides the ability to forward messages to the entity
+ Targetable interface {
+ TargetURI() string
+ }
+)
+
+// TargetURI provides an absolute address to a target entity
+func (e Entity) TargetURI() string {
+ split := strings.Split(e.ID, "?")
+ return split[0]
+}
+
+func queueEntryToEntity(entry *queueEntry) *QueueEntity {
+ return &QueueEntity{
+ QueueDescription: &entry.Content.QueueDescription,
+ Entity: &Entity{
+ Name: entry.Title,
+ ID: entry.ID,
+ },
+ }
+}
+
+/*
+QueueEntityWithPartitioning ensure the created queue will be a partitioned queue. Partitioned queues offer increased
+storage and availability compared to non-partitioned queues with the trade-off of requiring the following to ensure
+FIFO message retrieval:
+
+SessionId. If a message has the SessionId property set, then Service Bus uses the SessionId property as the
+partition key. This way, all messages that belong to the same session are assigned to the same fragment and handled
+by the same message broker. This allows Service Bus to guarantee message ordering as well as the consistency of
+session states.
+
+PartitionKey. If a message has the PartitionKey property set but not the SessionId property, then Service Bus uses
+the PartitionKey property as the partition key. Use the PartitionKey property to send non-sessionful transactional
+messages. The partition key ensures that all messages that are sent within a transaction are handled by the same
+messaging broker.
+
+MessageId. If the queue has the RequiresDuplicationDetection property set to true, then the MessageId
+property serves as the partition key if the SessionId or a PartitionKey properties are not set. This ensures that
+all copies of the same message are handled by the same message broker and, thus, allows Service Bus to detect and
+eliminate duplicate messages
+*/
+func QueueEntityWithPartitioning() QueueManagementOption {
+ return func(queue *QueueDescription) error {
+ queue.EnablePartitioning = ptrBool(true)
+ return nil
+ }
+}
+
+// QueueEntityWithMaxSizeInMegabytes configures the maximum size of the queue in megabytes (1 * 1024 - 5 * 1024), which is the size of
+// the memory allocated for the queue. Default is 1 MB (1 * 1024).
+//
+// size must be between 1024 and 5 * 1024 for the Standard sku and up to 80 * 1024 for Premium sku
+func QueueEntityWithMaxSizeInMegabytes(size int) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ if size < 1024 || size > 80*1024 {
+ return errors.New("QueueEntityWithMaxSizeInMegabytes: must be between 1024 and 5 * 1024 for the Standard sku and up to 80 * 1024 for Premium sku")
+ }
+ int32Size := int32(size)
+ q.MaxSizeInMegabytes = &int32Size
+ return nil
+ }
+}
+
+// QueueEntityWithDuplicateDetection configures the queue to detect duplicates for a given time window. If window
+// is not specified, then it uses the default of 10 minutes.
+func QueueEntityWithDuplicateDetection(window *time.Duration) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ q.RequiresDuplicateDetection = ptrBool(true)
+ if window != nil {
+ q.DuplicateDetectionHistoryTimeWindow = ptrString(durationTo8601Seconds(*window))
+ }
+ return nil
+ }
+}
+
+// QueueEntityWithRequiredSessions will ensure the queue requires senders and receivers to have sessionIDs
+func QueueEntityWithRequiredSessions() QueueManagementOption {
+ return func(q *QueueDescription) error {
+ q.RequiresSession = ptrBool(true)
+ return nil
+ }
+}
+
+// QueueEntityWithDeadLetteringOnMessageExpiration will ensure the queue sends expired messages to the dead letter queue
+func QueueEntityWithDeadLetteringOnMessageExpiration() QueueManagementOption {
+ return func(q *QueueDescription) error {
+ q.DeadLetteringOnMessageExpiration = ptrBool(true)
+ return nil
+ }
+}
+
+// QueueEntityWithAutoDeleteOnIdle configures the queue to automatically delete after the specified idle interval. The
+// minimum duration is 5 minutes.
+func QueueEntityWithAutoDeleteOnIdle(window *time.Duration) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ if window != nil {
+ if window.Minutes() < 5 {
+ return errors.New("QueueEntityWithAutoDeleteOnIdle: window must be greater than 5 minutes")
+ }
+ q.AutoDeleteOnIdle = ptrString(durationTo8601Seconds(*window))
+ }
+ return nil
+ }
+}
+
+// QueueEntityWithMessageTimeToLive configures the queue to set a time to live on messages. This is the duration after which
+// the message expires, starting from when the message is sent to Service Bus. This is the default value used when
+// TimeToLive is not set on a message itself. If nil, defaults to 14 days.
+func QueueEntityWithMessageTimeToLive(window *time.Duration) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ if window == nil {
+ duration := time.Duration(14 * 24 * time.Hour)
+ window = &duration
+ }
+ q.DefaultMessageTimeToLive = ptrString(durationTo8601Seconds(*window))
+ return nil
+ }
+}
+
+// QueueEntityWithLockDuration configures the queue to have a duration of a peek-lock; that is, the amount of time that the
+// message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default value is 1
+// minute.
+func QueueEntityWithLockDuration(window *time.Duration) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ if window == nil {
+ duration := time.Duration(1 * time.Minute)
+ window = &duration
+ }
+ q.LockDuration = ptrString(durationTo8601Seconds(*window))
+ return nil
+ }
+}
+
+// QueueEntityWithAutoForward configures the queue to automatically forward messages to the specified target.
+//
+// The ability to AutoForward to a target requires the connection have management authorization. If the connection
+// string or Azure Active Directory identity used does not have management authorization, an unauthorized error will be
+// returned on the PUT.
+func QueueEntityWithAutoForward(target Targetable) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ uri := target.TargetURI()
+ q.ForwardTo = &uri
+ return nil
+ }
+}
+
+// QueueEntityWithForwardDeadLetteredMessagesTo configures the queue to automatically forward dead letter messages to
+// the specified target.
+//
+// The ability to forward dead letter messages to a target requires the connection have management authorization. If
+// the connection string or Azure Active Directory identity used does not have management authorization, an unauthorized
+// error will be returned on the PUT.
+func QueueEntityWithForwardDeadLetteredMessagesTo(target Targetable) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ uri := target.TargetURI()
+ q.ForwardDeadLetteredMessagesTo = &uri
+ return nil
+ }
+}
+
+// QueueEntityWithMaxDeliveryCount configures the queue to have a maximum number of delivery attempts before
+// dead-lettering the message
+func QueueEntityWithMaxDeliveryCount(count int32) QueueManagementOption {
+ return func(q *QueueDescription) error {
+ q.MaxDeliveryCount = &count
+ return nil
+ }
+}
+
+// NewQueueManager creates a new QueueManager for a Service Bus Namespace
+func (ns *Namespace) NewQueueManager() *QueueManager {
+ return &QueueManager{
+ entityManager: newEntityManager(ns.getHTTPSHostURI(), ns.TokenProvider),
+ }
+}
+
+// Delete deletes a Service Bus Queue entity by name
+func (qm *QueueManager) Delete(ctx context.Context, name string) error {
+ ctx, span := qm.startSpanFromContext(ctx, "sb.QueueManager.Delete")
+ defer span.End()
+
+ res, err := qm.entityManager.Delete(ctx, "/"+name)
+ defer closeRes(ctx, res)
+
+ return err
+}
+
+// Put creates or updates a Service Bus Queue
+func (qm *QueueManager) Put(ctx context.Context, name string, opts ...QueueManagementOption) (*QueueEntity, error) {
+ ctx, span := qm.startSpanFromContext(ctx, "sb.QueueManager.Put")
+ defer span.End()
+
+ qd := new(QueueDescription)
+ for _, opt := range opts {
+ if err := opt(qd); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+ }
+
+ qd.ServiceBusSchema = to.StringPtr(serviceBusSchema)
+
+ qe := &queueEntry{
+ Entry: &atom.Entry{
+ AtomSchema: atomSchema,
+ },
+ Content: &queueContent{
+ Type: applicationXML,
+ QueueDescription: *qd,
+ },
+ }
+
+ var mw []MiddlewareFunc
+ if qd.ForwardTo != nil {
+ mw = append(mw, addSupplementalAuthorization(*qd.ForwardTo, qm.TokenProvider()))
+ }
+
+ if qd.ForwardDeadLetteredMessagesTo != nil {
+ mw = append(mw, addDeadLetterSupplementalAuthorization(*qd.ForwardDeadLetteredMessagesTo, qm.TokenProvider()))
+ }
+
+ reqBytes, err := xml.Marshal(qe)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ reqBytes = xmlDoc(reqBytes)
+ res, err := qm.entityManager.Put(ctx, "/"+name, reqBytes, mw...)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var entry queueEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+ return queueEntryToEntity(&entry), nil
+}
+
+// List fetches all of the queues for a Service Bus Namespace
+func (qm *QueueManager) List(ctx context.Context) ([]*QueueEntity, error) {
+ ctx, span := qm.startSpanFromContext(ctx, "sb.QueueManager.List")
+ defer span.End()
+
+ res, err := qm.entityManager.Get(ctx, `/$Resources/Queues`)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var feed queueFeed
+ err = xml.Unmarshal(b, &feed)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+
+ qd := make([]*QueueEntity, len(feed.Entries))
+ for idx, entry := range feed.Entries {
+ qd[idx] = queueEntryToEntity(&entry)
+ }
+ return qd, nil
+}
+
+// Get fetches a Service Bus Queue entity by name
+func (qm *QueueManager) Get(ctx context.Context, name string) (*QueueEntity, error) {
+ ctx, span := qm.startSpanFromContext(ctx, "sb.QueueManager.Get")
+ defer span.End()
+
+ res, err := qm.entityManager.Get(ctx, name)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ if res.StatusCode == http.StatusNotFound {
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var entry queueEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ if isEmptyFeed(b) {
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+ return nil, formatManagementError(b)
+ }
+
+ return queueEntryToEntity(&entry), nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/receiver.go b/vendor/github.com/Azure/azure-service-bus-go/receiver.go
new file mode 100644
index 00000000..1fef267a
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/receiver.go
@@ -0,0 +1,478 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "sync"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2"
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+)
+
+type (
+ // Receiver provides connection, session and link handling for a receiving to an entity path
+ Receiver struct {
+ namespace *Namespace
+ client *amqp.Client
+ clientMu sync.RWMutex
+ session *session
+ receiver *amqp.Receiver
+ entityPath string
+ doneListening func()
+ Name string
+ useSessions bool
+ sessionID *string
+ lastError error
+ mode ReceiveMode
+ prefetch uint32
+ DefaultDisposition DispositionAction
+ Closed bool
+ doneRefreshingAuth func()
+ }
+
+ // ReceiverOption provides a structure for configuring receivers
+ ReceiverOption func(receiver *Receiver) error
+
+ // ListenerHandle provides the ability to close or listen to the close of a Receiver
+ ListenerHandle struct {
+ r *Receiver
+ ctx context.Context
+ }
+)
+
+// ReceiverWithSession configures a Receiver to use a session
+func ReceiverWithSession(sessionID *string) ReceiverOption {
+ return func(r *Receiver) error {
+ r.sessionID = sessionID
+ r.useSessions = true
+ return nil
+ }
+}
+
+// ReceiverWithReceiveMode configures a Receiver to use the specified receive mode
+func ReceiverWithReceiveMode(mode ReceiveMode) ReceiverOption {
+ return func(r *Receiver) error {
+ r.mode = mode
+ return nil
+ }
+}
+
+// ReceiverWithPrefetchCount configures the receiver to attempt to fetch the number of messages specified by the prefect
+// at one time.
+//
+// The default is 1 message at a time.
+//
+// Caution: Using PeekLock, messages have a set lock timeout, which can be renewed. By setting a high prefetch count, a
+// local queue of messages could build up and cause message locks to expire before the message lands in the handler. If
+// this happens, the message disposition will fail and will be re-queued and processed again.
+func ReceiverWithPrefetchCount(prefetch uint32) ReceiverOption {
+ return func(receiver *Receiver) error {
+ receiver.prefetch = prefetch
+ return nil
+ }
+}
+
+// NewReceiver creates a new Service Bus message listener given an AMQP client and an entity path
+func (ns *Namespace) NewReceiver(ctx context.Context, entityPath string, opts ...ReceiverOption) (*Receiver, error) {
+ ctx, span := ns.startSpanFromContext(ctx, "sb.Namespace.NewReceiver")
+ defer span.End()
+
+ r := &Receiver{
+ namespace: ns,
+ entityPath: entityPath,
+ mode: PeekLockMode,
+ prefetch: 1,
+ }
+
+ for _, opt := range opts {
+ if err := opt(r); err != nil {
+ return nil, err
+ }
+ }
+
+ err := r.newSessionAndLink(ctx)
+ if err != nil {
+ _ = r.Close(ctx)
+ return nil, err
+ }
+
+ r.periodicallyRefreshAuth()
+
+ return r, nil
+}
+
+// Close will close the AMQP session and link of the Receiver
+func (r *Receiver) Close(ctx context.Context) error {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.Close")
+ defer span.End()
+
+ r.clientMu.Lock()
+ defer r.clientMu.Unlock()
+
+ if r.doneListening != nil {
+ r.doneListening()
+ }
+
+ if r.doneRefreshingAuth != nil {
+ r.doneRefreshingAuth()
+ }
+
+ r.Closed = true
+
+ var lastErr error
+ if r.receiver != nil {
+ lastErr = r.receiver.Close(ctx)
+ if lastErr != nil {
+ tab.For(ctx).Error(lastErr)
+ }
+ }
+
+ if r.session != nil {
+ if err := r.session.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ if r.client != nil {
+ if err := r.client.Close(); err != nil {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ r.receiver = nil
+ r.session = nil
+ r.client = nil
+
+ return lastErr
+}
+
+// Recover will attempt to close the current session and link, then rebuild them
+func (r *Receiver) Recover(ctx context.Context) error {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.Recover")
+ defer span.End()
+
+ // we expect the Sender, session or client is in an error state, ignore errors
+ closeCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ closeCtx = tab.NewContext(closeCtx, span)
+ defer cancel()
+ _ = r.Close(ctx)
+ return r.newSessionAndLink(ctx)
+}
+
+// ReceiveOne will receive one message from the link
+func (r *Receiver) ReceiveOne(ctx context.Context, handler Handler) error {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.ReceiveOne")
+ defer span.End()
+
+ amqpMsg, err := r.listenForMessage(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ r.handleMessage(ctx, amqpMsg, handler)
+
+ return nil
+}
+
+// Listen start a listener for messages sent to the entity path
+func (r *Receiver) Listen(ctx context.Context, handler Handler) *ListenerHandle {
+ ctx, done := context.WithCancel(ctx)
+ r.doneListening = done
+
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.Listen")
+ defer span.End()
+
+ messages := make(chan *amqp.Message)
+ go r.listenForMessages(ctx, messages)
+ go r.handleMessages(ctx, messages, handler)
+
+ return &ListenerHandle{
+ r: r,
+ ctx: ctx,
+ }
+}
+
+func (r *Receiver) handleMessages(ctx context.Context, messages chan *amqp.Message, handler Handler) {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.handleMessages")
+ defer span.End()
+ for msg := range messages {
+ r.handleMessage(ctx, msg, handler)
+ }
+}
+
+func (r *Receiver) handleMessage(ctx context.Context, msg *amqp.Message, handler Handler) {
+ const optName = "sb.Receiver.handleMessage"
+
+ event, err := messageFromAMQPMessage(msg)
+ if err != nil {
+ _, span := r.startConsumerSpanFromContext(ctx, optName)
+ span.Logger().Error(err)
+ r.lastError = err
+ r.doneListening()
+ return
+ }
+
+ ctx, span := tab.StartSpanWithRemoteParent(ctx, optName, event)
+ defer span.End()
+
+ id := messageID(msg)
+ if idStr, ok := id.(string); ok {
+ span.AddAttributes(tab.StringAttribute("amqp.message.id", idStr))
+ }
+
+ if err := handler.Handle(ctx, event); err != nil {
+ // stop handling messages since the message consumer ran into an unexpected error
+ r.lastError = err
+ r.doneListening()
+ return
+ }
+
+ // nothing more to be done. The message was settled when it was accepted by the Receiver
+ if r.mode == ReceiveAndDeleteMode {
+ return
+ }
+
+ // nothing more to be done. The Receiver has no default disposition, so the handler is solely responsible for
+ // disposition
+ if r.DefaultDisposition == nil {
+ return
+ }
+
+ // default disposition is set, so try to send the disposition. If the message disposition has already been set, the
+ // underlying AMQP library will ignore the second disposition respecting the disposition of the handler func.
+ if err := r.DefaultDisposition(ctx); err != nil {
+ // if an error is returned by the default disposition, then we must alert the message consumer as we can't
+ // be sure the final message disposition.
+ tab.For(ctx).Error(err)
+ r.lastError = err
+ r.doneListening()
+ return
+ }
+}
+
+func (r *Receiver) listenForMessages(ctx context.Context, msgChan chan *amqp.Message) {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.listenForMessages")
+ defer span.End()
+
+ for {
+ msg, err := r.listenForMessage(ctx)
+ if err == nil {
+ msgChan <- msg
+ continue
+ }
+
+ select {
+ case <-ctx.Done():
+ tab.For(ctx).Debug("context done")
+ close(msgChan)
+ return
+ default:
+ _, retryErr := common.Retry(10, 10*time.Second, func() (interface{}, error) {
+ ctx, sp := r.startConsumerSpanFromContext(ctx, "sb.Receiver.listenForMessages.tryRecover")
+ defer sp.End()
+
+ tab.For(ctx).Debug("recovering connection")
+ err := r.Recover(ctx)
+ if err == nil {
+ tab.For(ctx).Debug("recovered connection")
+ return nil, nil
+ }
+
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ return nil, common.Retryable(err.Error())
+ }
+ })
+
+ if retryErr != nil {
+ tab.For(ctx).Debug("retried, but error was unrecoverable")
+ r.lastError = retryErr
+ if err := r.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ }
+ close(msgChan)
+ return
+ }
+ }
+ }
+}
+
+func (r *Receiver) listenForMessage(ctx context.Context) (*amqp.Message, error) {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.listenForMessage")
+ defer span.End()
+
+ msg, err := r.receiver.Receive(ctx)
+ if err != nil {
+ tab.For(ctx).Debug(err.Error())
+ return nil, err
+ }
+
+ id := messageID(msg)
+ if idStr, ok := id.(string); ok {
+ span.AddAttributes(tab.StringAttribute("amqp.message.id", idStr))
+ }
+
+ return msg, nil
+}
+
+// newSessionAndLink will replace the session and link on the Receiver
+func (r *Receiver) newSessionAndLink(ctx context.Context) error {
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.newSessionAndLink")
+ defer span.End()
+
+ r.clientMu.Lock()
+ defer r.clientMu.Unlock()
+
+ client, err := r.namespace.newClient()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ r.client = client
+
+ err = r.namespace.negotiateClaim(ctx, client, r.entityPath)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ amqpSession, err := client.NewSession()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ r.session, err = newSession(amqpSession)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ receiveMode := amqp.ModeSecond
+ if r.mode == ReceiveAndDeleteMode {
+ receiveMode = amqp.ModeFirst
+ }
+
+ opts := []amqp.LinkOption{
+ amqp.LinkSourceAddress(r.entityPath),
+ amqp.LinkReceiverSettle(receiveMode),
+ amqp.LinkCredit(r.prefetch),
+ }
+
+ if r.mode == ReceiveAndDeleteMode {
+ opts = append(opts, amqp.LinkSenderSettle(amqp.ModeSettled))
+ }
+
+ if opt, ok := r.getSessionFilterLinkOption(); ok {
+ opts = append(opts, opt)
+ }
+
+ amqpReceiver, err := amqpSession.NewReceiver(opts...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ r.receiver = amqpReceiver
+ return nil
+}
+
+func (r *Receiver) getSessionFilterLinkOption() (amqp.LinkOption, bool) {
+ const name = "com.microsoft:session-filter"
+ const code = uint64(0x00000137000000C)
+
+ if !r.useSessions {
+ return nil, false
+ }
+
+ if r.sessionID == nil {
+ return amqp.LinkSourceFilter(name, code, nil), true
+ }
+
+ return amqp.LinkSourceFilter(name, code, r.sessionID), true
+}
+
+func (r *Receiver) periodicallyRefreshAuth() {
+ ctx, done := context.WithCancel(context.Background())
+ r.doneRefreshingAuth = done
+
+ ctx, span := r.startConsumerSpanFromContext(ctx, "sb.Receiver.periodicallyRefreshAuth")
+ defer span.End()
+
+ doNegotiateClaimLocked := func(ctx context.Context, r *Receiver) {
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ if r.client != nil {
+ if err := r.namespace.negotiateClaim(ctx, r.client, r.entityPath); err != nil {
+ tab.For(ctx).Error(err)
+ }
+ }
+ }
+
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ time.Sleep(5 * time.Minute)
+ doNegotiateClaimLocked(ctx, r)
+ }
+ }
+ }()
+}
+
+func messageID(msg *amqp.Message) interface{} {
+ var id interface{} = "null"
+ if msg.Properties != nil {
+ id = msg.Properties.MessageID
+ }
+ return id
+}
+
+// Close will close the listener
+func (lc *ListenerHandle) Close(ctx context.Context) error {
+ return lc.r.Close(ctx)
+}
+
+// Done will close the channel when the listener has stopped
+func (lc *ListenerHandle) Done() <-chan struct{} {
+ return lc.ctx.Done()
+}
+
+// Err will return the last error encountered
+func (lc *ListenerHandle) Err() error {
+ if lc.r.lastError != nil {
+ return lc.r.lastError
+ }
+ return lc.ctx.Err()
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/rpc.go b/vendor/github.com/Azure/azure-service-bus-go/rpc.go
new file mode 100644
index 00000000..29302561
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/rpc.go
@@ -0,0 +1,643 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2/rpc"
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+)
+
+type (
+ rpcClient struct {
+ ec entityConnector
+ client *amqp.Client
+ clientMu sync.RWMutex
+ sessionID *string
+ isSessionFilterSet bool
+ doneRefreshingAuth func()
+ }
+
+ rpcClientOption func(*rpcClient) error
+)
+
+func newRPCClient(ctx context.Context, ec entityConnector, opts ...rpcClientOption) (*rpcClient, error) {
+ r := &rpcClient{
+ ec: ec,
+ }
+
+ for _, opt := range opts {
+ if err := opt(r); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+ }
+
+ return r, nil
+}
+
+// Recover will attempt to close the current session and link, then rebuild them
+func (r *rpcClient) Recover(ctx context.Context) error {
+ ctx, span := r.startSpanFromContext(ctx, "sb.rpcClient.Recover")
+ defer span.End()
+
+ _ = r.Close()
+ return r.ensureConn(ctx)
+}
+
+// Close will close the AMQP connection
+func (r *rpcClient) Close() error {
+ r.clientMu.Lock()
+ defer r.clientMu.Unlock()
+
+ return r.client.Close()
+}
+
+func (r *rpcClient) ensureConn(ctx context.Context) error {
+ ctx, span := r.startSpanFromContext(ctx, "sb.rpcClient.ensureConn")
+ defer span.End()
+
+ if r.client != nil {
+ return nil
+ }
+
+ r.clientMu.Lock()
+ defer r.clientMu.Unlock()
+
+ client, err := r.ec.Namespace().newClient()
+ err = r.ec.Namespace().negotiateClaim(ctx, client, r.ec.ManagementPath())
+ if err != nil {
+ tab.For(ctx).Error(err)
+ _ = client.Close()
+ return err
+ }
+
+ r.client = client
+ return err
+}
+
+func (r *rpcClient) ReceiveDeferred(ctx context.Context, mode ReceiveMode, sequenceNumbers ...int64) ([]*Message, error) {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.rpcClient.ReceiveDeferred")
+ defer span.End()
+
+ if err := r.ensureConn(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ const messagesField, messageField = "messages", "message"
+
+ backwardsMode := uint32(0)
+ if mode == PeekLockMode {
+ backwardsMode = 1
+ }
+
+ values := map[string]interface{}{
+ "sequence-numbers": sequenceNumbers,
+ "receiver-settle-mode": uint32(backwardsMode), // pick up messages with peek lock
+ }
+
+ var opts []rpc.LinkOption
+ if r.isSessionFilterSet {
+ opts = append(opts, rpc.LinkWithSessionFilter(r.sessionID))
+ values["session-id"] = r.sessionID
+ }
+
+ link, err := rpc.NewLink(r.client, r.ec.ManagementPath(), opts...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ operationFieldName: "com.microsoft:receive-by-sequence-number",
+ },
+ Value: values,
+ }
+
+ rsp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ if rsp.Code == 204 {
+ return nil, ErrNoMessages{}
+ }
+
+ // Deferred messages come back in a relatively convoluted manner:
+ // a map (always with one key: "messages")
+ // of arrays
+ // of maps (always with one key: "message")
+ // of an array with raw encoded Service Bus messages
+ val, ok := rsp.Message.Value.(map[string]interface{})
+ if !ok {
+ return nil, newErrIncorrectType(messageField, map[string]interface{}{}, rsp.Message.Value)
+ }
+
+ rawMessages, ok := val[messagesField]
+ if !ok {
+ return nil, ErrMissingField(messagesField)
+ }
+
+ messages, ok := rawMessages.([]interface{})
+ if !ok {
+ return nil, newErrIncorrectType(messagesField, []interface{}{}, rawMessages)
+ }
+
+ transformedMessages := make([]*Message, len(messages))
+ for i := range messages {
+ rawEntry, ok := messages[i].(map[string]interface{})
+ if !ok {
+ return nil, newErrIncorrectType(messageField, map[string]interface{}{}, messages[i])
+ }
+
+ rawMessage, ok := rawEntry[messageField]
+ if !ok {
+ return nil, ErrMissingField(messageField)
+ }
+
+ marshaled, ok := rawMessage.([]byte)
+ if !ok {
+ return nil, new(ErrMalformedMessage)
+ }
+
+ var rehydrated amqp.Message
+ err = rehydrated.UnmarshalBinary(marshaled)
+ if err != nil {
+ return nil, err
+ }
+
+ transformedMessages[i], err = messageFromAMQPMessage(&rehydrated)
+ if err != nil {
+ return nil, err
+ }
+
+ transformedMessages[i].ec = r.ec
+ transformedMessages[i].useSession = r.isSessionFilterSet
+ transformedMessages[i].sessionID = r.sessionID
+ }
+
+ // This sort is done to ensure that folks wanting to peek messages in sequence order may do so.
+ sort.Slice(transformedMessages, func(i, j int) bool {
+ iSeq := *transformedMessages[i].SystemProperties.SequenceNumber
+ jSeq := *transformedMessages[j].SystemProperties.SequenceNumber
+ return iSeq < jSeq
+ })
+
+ return transformedMessages, nil
+}
+
+func (r *rpcClient) GetNextPage(ctx context.Context, fromSequenceNumber int64, messageCount int32) ([]*Message, error) {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.rpcClient.GetNextPage")
+ defer span.End()
+
+ if err := r.ensureConn(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ const messagesField, messageField = "messages", "message"
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ operationFieldName: peekMessageOperationID,
+ },
+ Value: map[string]interface{}{
+ "from-sequence-number": fromSequenceNumber,
+ "message-count": messageCount,
+ },
+ }
+
+ if deadline, ok := ctx.Deadline(); ok {
+ msg.ApplicationProperties["server-timeout"] = uint(time.Until(deadline) / time.Millisecond)
+ }
+
+ link, err := rpc.NewLink(r.client, r.ec.ManagementPath())
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ rsp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ if rsp.Code == 204 {
+ return nil, ErrNoMessages{}
+ }
+
+ // Peeked messages come back in a relatively convoluted manner:
+ // a map (always with one key: "messages")
+ // of arrays
+ // of maps (always with one key: "message")
+ // of an array with raw encoded Service Bus messages
+ val, ok := rsp.Message.Value.(map[string]interface{})
+ if !ok {
+ err = newErrIncorrectType(messageField, map[string]interface{}{}, rsp.Message.Value)
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ rawMessages, ok := val[messagesField]
+ if !ok {
+ err = ErrMissingField(messagesField)
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ messages, ok := rawMessages.([]interface{})
+ if !ok {
+ err = newErrIncorrectType(messagesField, []interface{}{}, rawMessages)
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ transformedMessages := make([]*Message, len(messages))
+ for i := range messages {
+ rawEntry, ok := messages[i].(map[string]interface{})
+ if !ok {
+ err = newErrIncorrectType(messageField, map[string]interface{}{}, messages[i])
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ rawMessage, ok := rawEntry[messageField]
+ if !ok {
+ err = ErrMissingField(messageField)
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ marshaled, ok := rawMessage.([]byte)
+ if !ok {
+ err = new(ErrMalformedMessage)
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var rehydrated amqp.Message
+ err = rehydrated.UnmarshalBinary(marshaled)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ transformedMessages[i], err = messageFromAMQPMessage(&rehydrated)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ transformedMessages[i].ec = r.ec
+ transformedMessages[i].useSession = r.isSessionFilterSet
+ transformedMessages[i].sessionID = r.sessionID
+ }
+
+ // This sort is done to ensure that folks wanting to peek messages in sequence order may do so.
+ sort.Slice(transformedMessages, func(i, j int) bool {
+ iSeq := *transformedMessages[i].SystemProperties.SequenceNumber
+ jSeq := *transformedMessages[j].SystemProperties.SequenceNumber
+ return iSeq < jSeq
+ })
+
+ return transformedMessages, nil
+}
+
+func (r *rpcClient) RenewLocks(ctx context.Context, messages ...*Message) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.RenewLocks")
+ defer span.End()
+
+ if err := r.ensureConn(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ lockTokens := make([]amqp.UUID, 0, len(messages))
+ for _, m := range messages {
+ if m.LockToken == nil {
+ tab.For(ctx).Error(fmt.Errorf("failed: message has nil lock token, cannot renew lock"), tab.StringAttribute("messageId", m.ID))
+ continue
+ }
+
+ amqpLockToken := amqp.UUID(*m.LockToken)
+ lockTokens = append(lockTokens, amqpLockToken)
+ }
+
+ if len(lockTokens) < 1 {
+ tab.For(ctx).Info("no lock tokens present to renew")
+ return nil
+ }
+
+ renewRequestMsg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ operationFieldName: lockRenewalOperationName,
+ },
+ Value: map[string]interface{}{
+ lockTokensFieldName: lockTokens,
+ },
+ }
+
+ rpcLink, err := rpc.NewLink(r.client, r.ec.ManagementPath())
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ response, err := rpcLink.RetryableRPC(ctx, 3, 1*time.Second, renewRequestMsg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ if response.Code != 200 {
+ err := fmt.Errorf("error renewing locks: %v", response.Description)
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ return nil
+}
+
+func (r *rpcClient) SendDisposition(ctx context.Context, m *Message, state disposition) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.rpcClient.SendDisposition")
+ defer span.End()
+
+ if err := r.ensureConn(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ if m.LockToken == nil {
+ err := errors.New("lock token on the message is not set, thus cannot send disposition")
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ var opts []rpc.LinkOption
+ value := map[string]interface{}{
+ "disposition-status": string(state.Status),
+ "lock-tokens": []amqp.UUID{amqp.UUID(*m.LockToken)},
+ }
+
+ if state.DeadLetterReason != nil {
+ value["deadletter-reason"] = state.DeadLetterReason
+ }
+
+ if state.DeadLetterDescription != nil {
+ value["deadletter-description"] = state.DeadLetterDescription
+ }
+
+ if m.useSession {
+ value["session-id"] = m.sessionID
+ opts = append(opts, rpc.LinkWithSessionFilter(m.sessionID))
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ operationFieldName: "com.microsoft:update-disposition",
+ },
+ Value: value,
+ }
+
+ link, err := rpc.NewLink(r.client, m.ec.ManagementPath(), opts...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ // no error, then it was successful
+ _, err = link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ return nil
+}
+
+// ScheduleAt will send a batch of messages to a Queue, schedule them to be enqueued, and return the sequence numbers
+// that can be used to cancel each message.
+func (r *rpcClient) ScheduleAt(ctx context.Context, enqueueTime time.Time, messages ...*Message) ([]int64, error) {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.rpcClient.ScheduleAt")
+ defer span.End()
+
+ if err := r.ensureConn(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ if len(messages) <= 0 {
+ return nil, errors.New("expected one or more messages")
+ }
+
+ transformed := make([]interface{}, 0, len(messages))
+ for i := range messages {
+ messages[i].ScheduleAt(enqueueTime)
+
+ if messages[i].ID == "" {
+ id, err := uuid.NewV4()
+ if err != nil {
+ return nil, err
+ }
+ messages[i].ID = id.String()
+ }
+
+ rawAmqp, err := messages[i].toMsg()
+ if err != nil {
+ return nil, err
+ }
+ encoded, err := rawAmqp.MarshalBinary()
+ if err != nil {
+ return nil, err
+ }
+
+ individualMessage := map[string]interface{}{
+ "message-id": messages[i].ID,
+ "message": encoded,
+ }
+ if messages[i].SessionID != nil {
+ individualMessage["session-id"] = *messages[i].SessionID
+ }
+ if partitionKey := messages[i].SystemProperties.PartitionKey; partitionKey != nil {
+ individualMessage["partition-key"] = *partitionKey
+ }
+ if viaPartitionKey := messages[i].SystemProperties.ViaPartitionKey; viaPartitionKey != nil {
+ individualMessage["via-partition-key"] = *viaPartitionKey
+ }
+
+ transformed = append(transformed, individualMessage)
+ }
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ operationFieldName: scheduleMessageOperationID,
+ },
+ Value: map[string]interface{}{
+ "messages": transformed,
+ },
+ }
+
+ if deadline, ok := ctx.Deadline(); ok {
+ msg.ApplicationProperties[serverTimeoutFieldName] = uint(time.Until(deadline) / time.Millisecond)
+ }
+
+ link, err := rpc.NewLink(r.client, r.ec.ManagementPath())
+ if err != nil {
+ return nil, err
+ }
+
+ resp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ return nil, err
+ }
+
+ if resp.Code != 200 {
+ return nil, ErrAMQP(*resp)
+ }
+
+ retval := make([]int64, 0, len(messages))
+ if rawVal, ok := resp.Message.Value.(map[string]interface{}); ok {
+ const sequenceFieldName = "sequence-numbers"
+ if rawArr, ok := rawVal[sequenceFieldName]; ok {
+ if arr, ok := rawArr.([]int64); ok {
+ for i := range arr {
+ retval = append(retval, arr[i])
+ }
+ return retval, nil
+ }
+ return nil, newErrIncorrectType(sequenceFieldName, []int64{}, rawArr)
+ }
+ return nil, ErrMissingField(sequenceFieldName)
+ }
+ return nil, newErrIncorrectType("value", map[string]interface{}{}, resp.Message.Value)
+}
+
+// CancelScheduled allows for removal of messages that have been handed to the Service Bus broker for later delivery,
+// but have not yet ben enqueued.
+func (r *rpcClient) CancelScheduled(ctx context.Context, seq ...int64) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.rpcClient.CancelScheduled")
+ defer span.End()
+
+ if err := r.ensureConn(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ msg := &amqp.Message{
+ ApplicationProperties: map[string]interface{}{
+ operationFieldName: cancelScheduledOperationID,
+ },
+ Value: map[string]interface{}{
+ "sequence-numbers": seq,
+ },
+ }
+
+ if deadline, ok := ctx.Deadline(); ok {
+ msg.ApplicationProperties[serverTimeoutFieldName] = uint(time.Until(deadline) / time.Millisecond)
+ }
+
+ link, err := rpc.NewLink(r.client, r.ec.ManagementPath())
+ if err != nil {
+ return err
+ }
+
+ resp, err := link.RetryableRPC(ctx, 5, 5*time.Second, msg)
+ if err != nil {
+ return err
+ }
+
+ if resp.Code != 200 {
+ return ErrAMQP(*resp)
+ }
+
+ return nil
+}
+
+func (r *rpcClient) periodicallyRefreshAuth() {
+ ctx, done := context.WithCancel(context.Background())
+ r.doneRefreshingAuth = done
+
+ ctx, span := r.startSpanFromContext(ctx, "sb.rpcClient.periodicallyRefreshAuth")
+ defer span.End()
+
+ doNegotiateClaimLocked := func(ctx context.Context, r *rpcClient) {
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ if r.client != nil {
+ if err := r.ec.Namespace().negotiateClaim(ctx, r.client, r.ec.ManagementPath()); err != nil {
+ tab.For(ctx).Error(err)
+ }
+ }
+ }
+
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ time.Sleep(5 * time.Minute)
+ doNegotiateClaimLocked(ctx, r)
+ }
+ }
+ }()
+}
+
+func rpcClientWithSession(sessionID *string) rpcClientOption {
+ return func(r *rpcClient) error {
+ r.sessionID = sessionID
+ r.isSessionFilterSet = true
+ return nil
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/sender.go b/vendor/github.com/Azure/azure-service-bus-go/sender.go
new file mode 100644
index 00000000..d0c8dbda
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/sender.go
@@ -0,0 +1,327 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "math/rand"
+ "sync"
+ "time"
+
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+)
+
+type (
+ // Sender provides connection, session and link handling for an sending to an entity path
+ Sender struct {
+ namespace *Namespace
+ client *amqp.Client
+ clientMu sync.RWMutex
+ session *session
+ sender *amqp.Sender
+ entityPath string
+ Name string
+ sessionID *string
+ doneRefreshingAuth func()
+ }
+
+ // SendOption provides a way to customize a message on sending
+ SendOption func(event *Message) error
+
+ eventer interface {
+ toMsg() (*amqp.Message, error)
+ GetKeyValues() map[string]interface{}
+ Set(key string, value interface{})
+ }
+
+ // SenderOption provides a way to customize a Sender
+ SenderOption func(*Sender) error
+)
+
+// NewSender creates a new Service Bus message Sender given an AMQP client and entity path
+func (ns *Namespace) NewSender(ctx context.Context, entityPath string, opts ...SenderOption) (*Sender, error) {
+ ctx, span := ns.startSpanFromContext(ctx, "sb.Namespace.NewSender")
+ defer span.End()
+
+ s := &Sender{
+ namespace: ns,
+ entityPath: entityPath,
+ }
+
+ for _, opt := range opts {
+ if err := opt(s); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+ }
+
+ err := s.newSessionAndLink(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ }
+
+ s.periodicallyRefreshAuth()
+
+ return s, err
+}
+
+// Recover will attempt to close the current session and link, then rebuild them
+func (s *Sender) Recover(ctx context.Context) error {
+ ctx, span := s.startProducerSpanFromContext(ctx, "sb.Sender.Recover")
+ defer span.End()
+
+ // we expect the Sender, session or client is in an error state, ignore errors
+ closeCtx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ closeCtx = tab.NewContext(closeCtx, span)
+ defer cancel()
+ _ = s.Close(ctx)
+ return s.newSessionAndLink(ctx)
+}
+
+// Close will close the AMQP connection, session and link of the Sender
+func (s *Sender) Close(ctx context.Context) error {
+ ctx, span := s.startProducerSpanFromContext(ctx, "sb.Sender.Close")
+ defer span.End()
+
+ s.clientMu.Lock()
+ defer s.clientMu.Unlock()
+
+ if s.doneRefreshingAuth != nil {
+ s.doneRefreshingAuth()
+ }
+
+ var lastErr error
+ if s.sender != nil {
+ if lastErr = s.sender.Close(ctx); lastErr != nil {
+ tab.For(ctx).Error(lastErr)
+ }
+ }
+
+ if s.session != nil {
+ if err := s.session.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ if s.client != nil {
+ if err := s.client.Close(); err != nil {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ s.sender = nil
+ s.session = nil
+ s.client = nil
+
+ return lastErr
+}
+
+// Send will send a message to the entity path with options
+//
+// This will retry sending the message if the server responds with a busy error.
+func (s *Sender) Send(ctx context.Context, msg *Message, opts ...SendOption) error {
+ ctx, span := s.startProducerSpanFromContext(ctx, "sb.Sender.Send")
+ defer span.End()
+
+ if msg.SessionID == nil {
+ msg.SessionID = &s.session.SessionID
+ next := s.session.getNext()
+ msg.GroupSequence = &next
+ }
+
+ if msg.ID == "" {
+ id, err := uuid.NewV4()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ msg.ID = id.String()
+ }
+
+ for _, opt := range opts {
+ err := opt(msg)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+
+ return s.trySend(ctx, msg)
+}
+
+func (s *Sender) trySend(ctx context.Context, evt eventer) error {
+ ctx, sp := s.startProducerSpanFromContext(ctx, "sb.Sender.trySend")
+ defer sp.End()
+
+ err := sp.Inject(evt)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ msg, err := evt.toMsg()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ if msg.Properties != nil {
+ sp.AddAttributes(tab.StringAttribute("sb.message.id", msg.Properties.MessageID.(string)))
+ }
+
+ for {
+ select {
+ case <-ctx.Done():
+ if ctx.Err() != nil {
+ tab.For(ctx).Error(err)
+ }
+ return ctx.Err()
+ default:
+ // try as long as the context is not dead
+ err = s.sender.Send(ctx, msg)
+ if err == nil {
+ // successful send
+ return err
+ }
+
+ switch err.(type) {
+ case *amqp.Error, *amqp.DetachError:
+ tab.For(ctx).Debug("amqp error, delaying 4 seconds: " + err.Error())
+ skew := time.Duration(rand.Intn(1000)-500) * time.Millisecond
+ time.Sleep(4*time.Second + skew)
+ err := s.Recover(ctx)
+ if err != nil {
+ tab.For(ctx).Debug("failed to recover connection")
+ }
+ tab.For(ctx).Debug("recovered connection")
+ default:
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+ }
+}
+
+func (s *Sender) String() string {
+ return s.Name
+}
+
+func (s *Sender) getAddress() string {
+ return s.entityPath
+}
+
+func (s *Sender) getFullIdentifier() string {
+ return s.namespace.getEntityAudience(s.getAddress())
+}
+
+// newSessionAndLink will replace the existing session and link
+func (s *Sender) newSessionAndLink(ctx context.Context) error {
+ ctx, span := s.startProducerSpanFromContext(ctx, "sb.Sender.newSessionAndLink")
+ defer span.End()
+
+ s.clientMu.Lock()
+ defer s.clientMu.Unlock()
+
+ connection, err := s.namespace.newClient()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ s.client = connection
+
+ err = s.namespace.negotiateClaim(ctx, connection, s.getAddress())
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ amqpSession, err := connection.NewSession()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ amqpSender, err := amqpSession.NewSender(
+ amqp.LinkSenderSettle(amqp.ModeUnsettled),
+ amqp.LinkTargetAddress(s.getAddress()))
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ s.session, err = newSession(amqpSession)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ if s.sessionID != nil {
+ s.session.SessionID = *s.sessionID
+ }
+
+ s.sender = amqpSender
+ return nil
+}
+
+func (s *Sender) periodicallyRefreshAuth() {
+ ctx, done := context.WithCancel(context.Background())
+ s.doneRefreshingAuth = done
+
+ ctx, span := s.startProducerSpanFromContext(ctx, "sb.Sender.periodicallyRefreshAuth")
+ defer span.End()
+
+ doNegotiateClaimLocked := func(ctx context.Context, r *Sender) {
+ r.clientMu.RLock()
+ defer r.clientMu.RUnlock()
+
+ if r.client != nil {
+ if err := r.namespace.negotiateClaim(ctx, r.client, r.entityPath); err != nil {
+ tab.For(ctx).Error(err)
+ }
+ }
+ }
+
+ go func() {
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ default:
+ time.Sleep(5 * time.Minute)
+ doNegotiateClaimLocked(ctx, s)
+ }
+ }
+ }()
+}
+
+// SenderWithSession configures the message to send with a specific session and sequence. By default, a Sender has a
+// default session (uuid.NewV4()) and sequence generator.
+func SenderWithSession(sessionID *string) SenderOption {
+ return func(sender *Sender) error {
+ sender.sessionID = sessionID
+ return nil
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/session.go b/vendor/github.com/Azure/azure-service-bus-go/session.go
new file mode 100644
index 00000000..13cc5191
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/session.go
@@ -0,0 +1,574 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "sync"
+ "sync/atomic"
+
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "github.com/devigned/tab"
+ "pack.ag/amqp"
+)
+
+type (
+ // session is a wrapper for the AMQP session with some added information to help with Service Bus messaging
+ session struct {
+ *amqp.Session
+ SessionID string
+ counter uint32
+ }
+
+ sessionIdentifiable struct {
+ sessionID *string
+ }
+
+ lockedRPC struct {
+ rpcClient *rpcClient
+ rpcClientMu sync.Mutex
+ }
+
+ // QueueSession wraps Service Bus session functionality over a Queue
+ QueueSession struct {
+ sessionIdentifiable
+ lockedRPC
+ builder SendAndReceiveBuilder
+ builderMu sync.Mutex
+ receiver *Receiver
+ sender *Sender
+ }
+
+ // SubscriptionSession wraps Service Bus session functionality over a Subscription
+ SubscriptionSession struct {
+ sessionIdentifiable
+ lockedRPC
+ builder ReceiveBuilder
+ builderMu sync.Mutex
+ receiver *Receiver
+ }
+
+ // TopicSession wraps Service Bus session functionality over a Topic
+ TopicSession struct {
+ sessionIdentifiable
+ builder SenderBuilder
+ builderMu sync.Mutex
+ sender *Sender
+ }
+
+ // ReceiverBuilder describes the ability of an entity to build receiver links
+ ReceiverBuilder interface {
+ NewReceiver(ctx context.Context, opts ...ReceiverOption) (*Receiver, error)
+ }
+
+ // SenderBuilder describes the ability of an entity to build sender links
+ SenderBuilder interface {
+ NewSender(ctx context.Context, opts ...SenderOption) (*Sender, error)
+ }
+
+ // EntityManagementAddresser describes the ability of an entity to provide an addressable path to it's management
+ // endpoint
+ EntityManagementAddresser interface {
+ ManagementPath() string
+ }
+
+ // SendAndReceiveBuilder is a ReceiverBuilder, SenderBuilder and EntityManagementAddresser
+ SendAndReceiveBuilder interface {
+ ReceiveBuilder
+ SenderBuilder
+ }
+
+ // ReceiveBuilder is a ReceiverBuilder and EntityManagementAddresser
+ ReceiveBuilder interface {
+ ReceiverBuilder
+ entityConnector
+ }
+)
+
+// newSession is a constructor for a Service Bus session which will pre-populate the SessionID with a new UUID
+func newSession(amqpSession *amqp.Session) (*session, error) {
+ id, err := uuid.NewV4()
+ if err != nil {
+ return nil, err
+ }
+
+ return &session{
+ Session: amqpSession,
+ SessionID: id.String(),
+ counter: 0,
+ }, nil
+}
+
+// getNext gets and increments the next group sequence number for the session
+func (s *session) getNext() uint32 {
+ return atomic.AddUint32(&s.counter, 1)
+}
+
+func (s *session) String() string {
+ return s.SessionID
+}
+
+// NewQueueSession creates a new session sender and receiver to communicate with a Service Bus queue.
+//
+// Microsoft Azure Service Bus sessions enable joint and ordered handling of unbounded sequences of related messages.
+// To realize a FIFO guarantee in Service Bus, use Sessions. Service Bus is not prescriptive about the nature of the
+// relationship between the messages, and also does not define a particular model for determining where a message
+// sequence starts or ends.
+func NewQueueSession(builder SendAndReceiveBuilder, sessionID *string) *QueueSession {
+ return &QueueSession{
+ sessionIdentifiable: sessionIdentifiable{
+ sessionID: sessionID,
+ },
+ builder: builder,
+ }
+}
+
+// ReceiveOne waits for the lock on a particular session to become available, takes it, then process the session.
+// The session can contain multiple messages. ReceiveOne will receive all messages within that session.
+//
+// Handler must call a disposition action such as Complete, Abandon, Deadletter on the message. If the messages does not
+// have a disposition set, the Queue's DefaultDisposition will be used.
+//
+// If the handler returns an error, the receive loop will be terminated.
+func (qs *QueueSession) ReceiveOne(ctx context.Context, handler SessionHandler) error {
+ ctx, span := qs.startSpanFromContext(ctx, "sb.QueueSession.ReceiveOne")
+ defer span.End()
+
+ if err := qs.ensureReceiver(ctx); err != nil {
+ return err
+ }
+
+ ms, err := newMessageSession(qs.receiver, qs.builder, qs.sessionID)
+ if err != nil {
+ return err
+ }
+
+ err = handler.Start(ms)
+ if err != nil {
+ return err
+ }
+
+ defer handler.End()
+ handle := qs.receiver.Listen(ctx, handler)
+
+ select {
+ case <-handle.Done():
+ return handle.Err()
+ case <-ms.done:
+ return nil
+ }
+}
+
+// ReceiveDeferred will receive and handle a set of deferred messages
+//
+// When a queue or subscription client receives a message that it is willing to process, but for which processing is
+// not currently possible due to special circumstances inside of the application, it has the option of "deferring"
+// retrieval of the message to a later point. The message remains in the queue or subscription, but it is set aside.
+//
+// Deferral is a feature specifically created for workflow processing scenarios. Workflow frameworks may require certain
+// operations to be processed in a particular order, and may have to postpone processing of some received messages
+// until prescribed prior work that is informed by other messages has been completed.
+//
+// A simple illustrative example is an order processing sequence in which a payment notification from an external
+// payment provider appears in a system before the matching purchase order has been propagated from the store front
+// to the fulfillment system. In that case, the fulfillment system might defer processing the payment notification
+// until there is an order with which to associate it. In rendezvous scenarios, where messages from different sources
+// drive a workflow forward, the real-time execution order may indeed be correct, but the messages reflecting the
+// outcomes may arrive out of order.
+//
+// Ultimately, deferral aids in reordering messages from the arrival order into an order in which they can be
+// processed, while leaving those messages safely in the message store for which processing needs to be postponed.
+func (qs *QueueSession) ReceiveDeferred(ctx context.Context, handler Handler, mode ReceiveMode, sequenceNumbers ...int64) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.Queue.ReceiveDeferred")
+ defer span.End()
+
+ if err := qs.ensureRPCClient(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ messages, err := qs.rpcClient.ReceiveDeferred(ctx, mode, sequenceNumbers...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ for _, msg := range messages {
+ if err := handler.Handle(ctx, msg); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+ return nil
+}
+
+// Send the message to the queue within a session
+func (qs *QueueSession) Send(ctx context.Context, msg *Message) error {
+ ctx, span := qs.startSpanFromContext(ctx, "sb.QueueSession.Send")
+ defer span.End()
+
+ if err := qs.ensureSender(ctx); err != nil {
+ return err
+ }
+
+ if msg.SessionID == nil {
+ msg.SessionID = qs.sessionID
+ }
+ return qs.sender.Send(ctx, msg)
+}
+
+// Close the underlying connection to Service Bus
+func (qs *QueueSession) Close(ctx context.Context) error {
+ ctx, span := qs.startSpanFromContext(ctx, "sb.QueueSession.Close")
+ defer span.End()
+
+ var lastErr error
+ if qs.receiver != nil {
+ if err := qs.receiver.Close(ctx); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ if qs.sender != nil {
+ if err := qs.sender.Close(ctx); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ if qs.rpcClient != nil {
+ if err := qs.rpcClient.Close(); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ return lastErr
+}
+
+// SessionID is the identifier for the Service Bus session
+func (qs *QueueSession) SessionID() *string {
+ return qs.sessionID
+}
+
+// ManagementPath provides an addressable path to the Entity management endpoint
+func (qs *QueueSession) ManagementPath() string {
+ return qs.builder.ManagementPath()
+}
+
+func (qs *QueueSession) ensureRPCClient(ctx context.Context) error {
+ ctx, span := qs.startSpanFromContext(ctx, "sb.QueueSession.ensureRPCConn")
+ defer span.End()
+
+ qs.rpcClientMu.Lock()
+ defer qs.rpcClientMu.Unlock()
+
+ if qs.rpcClient != nil {
+ return nil
+ }
+
+ client, err := newRPCClient(ctx, qs.builder, rpcClientWithSession(qs.sessionID))
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ qs.rpcClient = client
+ return nil
+}
+
+func (qs *QueueSession) ensureSender(ctx context.Context) error {
+ ctx, span := qs.startSpanFromContext(ctx, "sb.QueueSession.ensureSender")
+ defer span.End()
+
+ qs.builderMu.Lock()
+ defer qs.builderMu.Unlock()
+
+ if qs.sender != nil {
+ return nil
+ }
+
+ s, err := qs.builder.NewSender(ctx, SenderWithSession(qs.sessionID))
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ qs.sender = s
+ return nil
+}
+
+func (qs *QueueSession) ensureReceiver(ctx context.Context) error {
+ ctx, span := qs.startSpanFromContext(ctx, "sb.QueueSession.ensureReceiver")
+ defer span.End()
+
+ qs.builderMu.Lock()
+ defer qs.builderMu.Unlock()
+
+ if qs.receiver != nil {
+ return nil
+ }
+
+ r, err := qs.builder.NewReceiver(ctx, ReceiverWithSession(qs.sessionID))
+ if err != nil {
+ return err
+ }
+
+ qs.receiver = r
+ return nil
+}
+
+// NewSubscriptionSession creates a new session receiver to receive from a Service Bus subscription.
+//
+// Microsoft Azure Service Bus sessions enable joint and ordered handling of unbounded sequences of related messages.
+// To realize a FIFO guarantee in Service Bus, use Sessions. Service Bus is not prescriptive about the nature of the
+// relationship between the messages, and also does not define a particular model for determining where a message
+// sequence starts or ends.
+func NewSubscriptionSession(builder ReceiveBuilder, sessionID *string) *SubscriptionSession {
+ return &SubscriptionSession{
+ sessionIdentifiable: sessionIdentifiable{
+ sessionID: sessionID,
+ },
+ builder: builder,
+ }
+}
+
+// ReceiveOne waits for the lock on a particular session to become available, takes it, then process the session.
+// The session can contain multiple messages. ReceiveOneSession will receive all messages within that session.
+//
+// Handler must call a disposition action such as Complete, Abandon, Deadletter on the message. If the messages does not
+// have a disposition set, the Queue's DefaultDisposition will be used.
+//
+// If the handler returns an error, the receive loop will be terminated.
+func (ss *SubscriptionSession) ReceiveOne(ctx context.Context, handler SessionHandler) error {
+ ctx, span := ss.startSpanFromContext(ctx, "sb.SubscriptionSession.ReceiveOne")
+ defer span.End()
+
+ if err := ss.ensureReceiver(ctx); err != nil {
+ return err
+ }
+
+ ms, err := newMessageSession(ss.receiver, ss.builder, ss.sessionID)
+ if err != nil {
+ return err
+ }
+
+ err = handler.Start(ms)
+ if err != nil {
+ return err
+ }
+
+ defer handler.End()
+ handle := ss.receiver.Listen(ctx, handler)
+
+ select {
+ case <-handle.Done():
+ err := handle.Err()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ _ = ss.receiver.Close(ctx)
+ }
+ return err
+ case <-ms.done:
+ return nil
+ }
+}
+
+// ReceiveDeferred will receive and handle a set of deferred messages
+//
+// When a queue or subscription client receives a message that it is willing to process, but for which processing is
+// not currently possible due to special circumstances inside of the application, it has the option of "deferring"
+// retrieval of the message to a later point. The message remains in the queue or subscription, but it is set aside.
+//
+// Deferral is a feature specifically created for workflow processing scenarios. Workflow frameworks may require certain
+// operations to be processed in a particular order, and may have to postpone processing of some received messages
+// until prescribed prior work that is informed by other messages has been completed.
+//
+// A simple illustrative example is an order processing sequence in which a payment notification from an external
+// payment provider appears in a system before the matching purchase order has been propagated from the store front
+// to the fulfillment system. In that case, the fulfillment system might defer processing the payment notification
+// until there is an order with which to associate it. In rendezvous scenarios, where messages from different sources
+// drive a workflow forward, the real-time execution order may indeed be correct, but the messages reflecting the
+// outcomes may arrive out of order.
+//
+// Ultimately, deferral aids in reordering messages from the arrival order into an order in which they can be
+// processed, while leaving those messages safely in the message store for which processing needs to be postponed.
+func (ss *SubscriptionSession) ReceiveDeferred(ctx context.Context, handler Handler, mode ReceiveMode, sequenceNumbers ...int64) error {
+ ctx, span := startConsumerSpanFromContext(ctx, "sb.Queue.ReceiveDeferred")
+ defer span.End()
+
+ if err := ss.ensureRPCClient(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ messages, err := ss.rpcClient.ReceiveDeferred(ctx, mode, sequenceNumbers...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ for _, msg := range messages {
+ if err := handler.Handle(ctx, msg); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+ return nil
+}
+
+// Close the underlying connection to Service Bus
+func (ss *SubscriptionSession) Close(ctx context.Context) error {
+ ctx, span := ss.startSpanFromContext(ctx, "sb.SubscriptionSession.Close")
+ defer span.End()
+
+ var lastErr error
+ if ss.receiver != nil {
+ if err := ss.receiver.Close(ctx); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ if ss.rpcClient != nil {
+ if err := ss.rpcClient.Close(); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ }
+
+ return lastErr
+}
+
+func (ss *SubscriptionSession) ensureReceiver(ctx context.Context) error {
+ ctx, span := ss.startSpanFromContext(ctx, "sb.SubscriptionSession.ensureReceiver")
+ defer span.End()
+
+ ss.builderMu.Lock()
+ defer ss.builderMu.Unlock()
+
+ r, err := ss.builder.NewReceiver(ctx, ReceiverWithSession(ss.sessionID))
+ if err != nil {
+ return err
+ }
+
+ ss.receiver = r
+ return nil
+}
+
+// SessionID is the identifier for the Service Bus session
+func (ss *SubscriptionSession) SessionID() *string {
+ return ss.sessionID
+}
+
+// ManagementPath provides an addressable path to the Entity management endpoint
+func (ss *SubscriptionSession) ManagementPath() string {
+ return ss.builder.ManagementPath()
+}
+
+func (ss *SubscriptionSession) ensureRPCClient(ctx context.Context) error {
+ ctx, span := ss.startSpanFromContext(ctx, "sb.SubscriptionSession.ensureRpcConn")
+ defer span.End()
+
+ ss.rpcClientMu.Lock()
+ defer ss.rpcClientMu.Unlock()
+
+ if ss.rpcClient != nil {
+ return nil
+ }
+
+ client, err := newRPCClient(ctx, ss.builder, rpcClientWithSession(ss.sessionID))
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ ss.rpcClient = client
+ return nil
+}
+
+// NewTopicSession creates a new session receiver to receive from a Service Bus topic.
+//
+// Microsoft Azure Service Bus sessions enable joint and ordered handling of unbounded sequences of related messages.
+// To realize a FIFO guarantee in Service Bus, use Sessions. Service Bus is not prescriptive about the nature of the
+// relationship between the messages, and also does not define a particular model for determining where a message
+// sequence starts or ends.
+func NewTopicSession(builder SenderBuilder, sessionID *string) *TopicSession {
+ return &TopicSession{
+ sessionIdentifiable: sessionIdentifiable{
+ sessionID: sessionID,
+ },
+ builder: builder,
+ }
+}
+
+// Send the message to the queue within a session
+func (ts *TopicSession) Send(ctx context.Context, msg *Message) error {
+ ctx, span := ts.startSpanFromContext(ctx, "sb.TopicSession.Send")
+ defer span.End()
+
+ if err := ts.ensureSender(ctx); err != nil {
+ return err
+ }
+
+ if msg.SessionID == nil {
+ msg.SessionID = ts.sessionID
+ }
+ return ts.sender.Send(ctx, msg)
+}
+
+// Close the underlying connection to Service Bus
+func (ts *TopicSession) Close(ctx context.Context) error {
+ ctx, span := ts.startSpanFromContext(ctx, "sb.TopicSession.Close")
+ defer span.End()
+
+ if ts.sender != nil {
+ if err := ts.sender.Close(ctx); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+ return nil
+}
+
+// SessionID is the identifier for the Service Bus session
+func (ts *TopicSession) SessionID() *string {
+ return ts.sessionID
+}
+
+func (ts *TopicSession) ensureSender(ctx context.Context) error {
+ ctx, span := ts.startSpanFromContext(ctx, "sb.TopicSession.ensureSender")
+ defer span.End()
+
+ ts.builderMu.Lock()
+ defer ts.builderMu.Unlock()
+
+ s, err := ts.builder.NewSender(ctx, SenderWithSession(ts.sessionID))
+ if err != nil {
+ return err
+ }
+
+ ts.sender = s
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/subscription.go b/vendor/github.com/Azure/azure-service-bus-go/subscription.go
new file mode 100644
index 00000000..25b3e4dc
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/subscription.go
@@ -0,0 +1,253 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "strings"
+ "sync"
+
+ "github.com/devigned/tab"
+)
+
+type (
+ // Subscription represents a Service Bus Subscription entity which are used to receive topic messages. A topic
+ // subscription resembles a virtual queue that receives copies of the messages that are sent to the topic.
+ //Messages are received from a subscription identically to the way they are received from a queue.
+ Subscription struct {
+ *receivingEntity
+ Topic *Topic
+ receiver *Receiver
+ receiverMu sync.Mutex
+ receiveMode ReceiveMode
+ requiredSessionID *string
+ prefetchCount *uint32
+ }
+
+ // SubscriptionOption configures the Subscription Azure Service Bus client
+ SubscriptionOption func(*Subscription) error
+)
+
+// SubscriptionWithReceiveAndDelete configures a subscription to pop and delete messages off of the queue upon receiving the message.
+// This differs from the default, PeekLock, where PeekLock receives a message, locks it for a period of time, then sends
+// a disposition to the broker when the message has been processed.
+func SubscriptionWithReceiveAndDelete() SubscriptionOption {
+ return func(s *Subscription) error {
+ s.receiveMode = ReceiveAndDeleteMode
+ return nil
+ }
+}
+
+// SubscriptionWithPrefetchCount configures the subscription to attempt to fetch the number of messages specified by the
+// prefetch count at one time.
+//
+// The default is 1 message at a time.
+//
+// Caution: Using PeekLock, messages have a set lock timeout, which can be renewed. By setting a high prefetch count, a
+// local queue of messages could build up and cause message locks to expire before the message lands in the handler. If
+// this happens, the message disposition will fail and will be re-queued and processed again.
+func SubscriptionWithPrefetchCount(prefetch uint32) SubscriptionOption {
+ return func(q *Subscription) error {
+ q.prefetchCount = &prefetch
+ return nil
+ }
+}
+
+// NewSubscription creates a new Topic Subscription client
+func (t *Topic) NewSubscription(name string, opts ...SubscriptionOption) (*Subscription, error) {
+ entity := newEntity(name, subscriptionManagementPath(t.Name, name), t.namespace)
+ sub := &Subscription{
+ receivingEntity: newReceivingEntity(entity),
+ Topic: t,
+ }
+
+ for i := range opts {
+ if err := opts[i](sub); err != nil {
+ return nil, err
+ }
+ }
+ return sub, nil
+}
+
+// ReceiveOne will listen to receive a single message. ReceiveOne will only wait as long as the context allows.
+//
+// Handler must call a disposition action such as Complete, Abandon, Deadletter on the message. If the messages does not
+// have a disposition set, the Queue's DefaultDisposition will be used.
+func (s *Subscription) ReceiveOne(ctx context.Context, handler Handler) error {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.ReceiveOne")
+ defer span.End()
+
+ if err := s.ensureReceiver(ctx); err != nil {
+ return err
+ }
+
+ return s.receiver.ReceiveOne(ctx, handler)
+}
+
+// Receive subscribes for messages sent to the Subscription
+//
+// Handler must call a disposition action such as Complete, Abandon, Deadletter on the message. If the messages does not
+// have a disposition set, the Queue's DefaultDisposition will be used.
+//
+// If the handler returns an error, the receive loop will be terminated.
+func (s *Subscription) Receive(ctx context.Context, handler Handler) error {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.Receive")
+ defer span.End()
+
+ if err := s.ensureReceiver(ctx); err != nil {
+ return err
+ }
+ handle := s.receiver.Listen(ctx, handler)
+ <-handle.Done()
+ return handle.Err()
+}
+
+// NewSession will create a new session based receiver for the subscription
+//
+// Microsoft Azure Service Bus sessions enable joint and ordered handling of unbounded sequences of related messages.
+// To realize a FIFO guarantee in Service Bus, use Sessions. Service Bus is not prescriptive about the nature of the
+// relationship between the messages, and also does not define a particular model for determining where a message
+// sequence starts or ends.
+func (s *Subscription) NewSession(sessionID *string) *SubscriptionSession {
+ return NewSubscriptionSession(s, sessionID)
+}
+
+// NewReceiver will create a new Receiver for receiving messages off of the queue
+func (s *Subscription) NewReceiver(ctx context.Context, opts ...ReceiverOption) (*Receiver, error) {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.NewReceiver")
+ defer span.End()
+
+ opts = append(opts, ReceiverWithReceiveMode(s.receiveMode))
+
+ if s.prefetchCount != nil {
+ opts = append(opts, ReceiverWithPrefetchCount(*s.prefetchCount))
+ }
+
+ return s.namespace.NewReceiver(ctx, s.Topic.Name+"/Subscriptions/"+s.Name, opts...)
+}
+
+// NewDeadLetter creates an entity that represents the dead letter sub queue of the queue
+//
+// Azure Service Bus queues and topic subscriptions provide a secondary sub-queue, called a dead-letter queue
+// (DLQ). The dead-letter queue does not need to be explicitly created and cannot be deleted or otherwise managed
+// independent of the main entity.
+//
+// The purpose of the dead-letter queue is to hold messages that cannot be delivered to any receiver, or messages
+// that could not be processed. Messages can then be removed from the DLQ and inspected. An application might, with
+// help of an operator, correct issues and resubmit the message, log the fact that there was an error, and take
+// corrective action.
+//
+// From an API and protocol perspective, the DLQ is mostly similar to any other queue, except that messages can only
+// be submitted via the dead-letter operation of the parent entity. In addition, time-to-live is not observed, and
+// you can't dead-letter a message from a DLQ. The dead-letter queue fully supports peek-lock delivery and
+// transactional operations.
+//
+// Note that there is no automatic cleanup of the DLQ. Messages remain in the DLQ until you explicitly retrieve
+// them from the DLQ and call Complete() on the dead-letter message.
+func (s *Subscription) NewDeadLetter() *DeadLetter {
+ return NewDeadLetter(s)
+}
+
+// NewDeadLetterReceiver builds a receiver for the Subscriptions's dead letter queue
+func (s *Subscription) NewDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error) {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.NewDeadLetterReceiver")
+ defer span.End()
+
+ deadLetterEntityPath := strings.Join([]string{s.Topic.Name, "Subscriptions", s.Name, DeadLetterQueueName}, "/")
+ return s.namespace.NewReceiver(ctx, deadLetterEntityPath, opts...)
+}
+
+// NewTransferDeadLetter creates an entity that represents the transfer dead letter sub queue of the subscription
+//
+// Messages will be sent to the transfer dead-letter queue under the following conditions:
+// - A message passes through more than 3 queues or topics that are chained together.
+// - The destination queue or topic is disabled or deleted.
+// - The destination queue or topic exceeds the maximum entity size.
+func (s *Subscription) NewTransferDeadLetter() *TransferDeadLetter {
+ return NewTransferDeadLetter(s)
+}
+
+// NewTransferDeadLetterReceiver builds a receiver for the Queue's transfer dead letter queue
+//
+// Messages will be sent to the transfer dead-letter queue under the following conditions:
+// - A message passes through more than 3 queues or topics that are chained together.
+// - The destination queue or topic is disabled or deleted.
+// - The destination queue or topic exceeds the maximum entity size.
+func (s *Subscription) NewTransferDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error) {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.NewTransferDeadLetterReceiver")
+ defer span.End()
+
+ transferDeadLetterEntityPath := strings.Join([]string{s.Topic.Name, "subscriptions", s.Name, TransferDeadLetterQueueName}, "/")
+ return s.namespace.NewReceiver(ctx, transferDeadLetterEntityPath, opts...)
+}
+
+// Close the underlying connection to Service Bus
+func (s *Subscription) Close(ctx context.Context) error {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.Close")
+ defer span.End()
+
+ var lastErr error
+ if s.receiver != nil {
+ if err := s.receiver.Close(ctx); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ s.receiver = nil
+ }
+
+ if s.rpcClient != nil {
+ if err := s.rpcClient.Close(); err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ lastErr = err
+ }
+ s.rpcClient = nil
+ }
+
+ return lastErr
+}
+
+func (s *Subscription) ensureReceiver(ctx context.Context, opts ...ReceiverOption) error {
+ ctx, span := s.startSpanFromContext(ctx, "sb.Subscription.ensureReceiver")
+ defer span.End()
+
+ s.receiverMu.Lock()
+ defer s.receiverMu.Unlock()
+
+ // if a receiver is already in established, just return
+ if s.receiver != nil {
+ return nil
+ }
+
+ receiver, err := s.NewReceiver(ctx, opts...)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ s.receiver = receiver
+ return nil
+}
+
+func subscriptionManagementPath(topicName, subscriptionName string) string {
+ return strings.Join([]string{topicName, "subscriptions", subscriptionName, "$management"}, "/")
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/subscription_manager.go b/vendor/github.com/Azure/azure-service-bus-go/subscription_manager.go
new file mode 100644
index 00000000..70b74c0c
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/subscription_manager.go
@@ -0,0 +1,564 @@
+package servicebus
+
+import (
+ "context"
+ "encoding/xml"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "strings"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/devigned/tab"
+
+ "github.com/Azure/azure-service-bus-go/atom"
+)
+
+type (
+ // SubscriptionManager provides CRUD functionality for Service Bus Subscription
+ SubscriptionManager struct {
+ *entityManager
+ Topic *Topic
+ }
+
+ // FilterDescriber can transform itself into a FilterDescription
+ FilterDescriber interface {
+ ToFilterDescription() FilterDescription
+ }
+
+ // ActionDescriber can transform itself into a ActionDescription
+ ActionDescriber interface {
+ ToActionDescription() ActionDescription
+ }
+
+ // RuleDescription is the content type for Subscription Rule management requests
+ RuleDescription struct {
+ XMLName xml.Name `xml:"RuleDescription"`
+ BaseEntityDescription
+ CreatedAt *date.Time `xml:"CreatedAt,omitempty"`
+ Filter FilterDescription `xml:"Filter"`
+ Action *ActionDescription `xml:"Action,omitempty"`
+ }
+
+ // FilterDescription describes a filter which can be applied to a subscription to filter messages from the topic.
+ //
+ // Subscribers can define which messages they want to receive from a topic. These messages are specified in the
+ // form of one or more named subscription rules. Each rule consists of a condition that selects particular messages
+ // and an action that annotates the selected message. For each matching rule condition, the subscription produces a
+ // copy of the message, which may be differently annotated for each matching rule.
+ //
+ // Each newly created topic subscription has an initial default subscription rule. If you don't explicitly specify a
+ // filter condition for the rule, the applied filter is the true filter that enables all messages to be selected
+ // into the subscription. The default rule has no associated annotation action.
+ FilterDescription struct {
+ XMLName xml.Name `xml:"Filter"`
+ CorrelationFilter
+ Type string `xml:"http://www.w3.org/2001/XMLSchema-instance type,attr"`
+ SQLExpression *string `xml:"SqlExpression,omitempty"`
+ CompatibilityLevel int `xml:"CompatibilityLevel,omitempty"`
+ }
+
+ // ActionDescription describes an action upon a message that matches a filter
+ //
+ // With SQL filter conditions, you can define an action that can annotate the message by adding, removing, or
+ // replacing properties and their values. The action uses a SQL-like expression that loosely leans on the SQL
+ // UPDATE statement syntax. The action is performed on the message after it has been matched and before the message
+ // is selected into the subscription. The changes to the message properties are private to the message copied into
+ // the subscription.
+ ActionDescription struct {
+ Type string `xml:"http://www.w3.org/2001/XMLSchema-instance type,attr"`
+ SQLExpression string `xml:"SqlExpression"`
+ RequiresPreprocessing bool `xml:"RequiresPreprocessing"`
+ CompatibilityLevel int `xml:"CompatibilityLevel,omitempty"`
+ }
+
+ // RuleEntity is the Azure Service Bus description of a Subscription Rule for management activities
+ RuleEntity struct {
+ *RuleDescription
+ *Entity
+ }
+
+ // ruleContent is a specialized Subscription body for an Atom entry
+ ruleContent struct {
+ XMLName xml.Name `xml:"content"`
+ Type string `xml:"type,attr"`
+ RuleDescription RuleDescription `xml:"RuleDescription"`
+ }
+
+ ruleEntry struct {
+ *atom.Entry
+ Content *ruleContent `xml:"content"`
+ }
+
+ ruleFeed struct {
+ *atom.Feed
+ Entries []ruleEntry `xml:"entry"`
+ }
+
+ // SubscriptionDescription is the content type for Subscription management requests
+ SubscriptionDescription struct {
+ XMLName xml.Name `xml:"SubscriptionDescription"`
+ BaseEntityDescription
+ LockDuration *string `xml:"LockDuration,omitempty"` // LockDuration - ISO 8601 timespan duration of a peek-lock; that is, the amount of time that the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default value is 1 minute.
+ RequiresSession *bool `xml:"RequiresSession,omitempty"`
+ DefaultMessageTimeToLive *string `xml:"DefaultMessageTimeToLive,omitempty"` // DefaultMessageTimeToLive - ISO 8601 default message timespan to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
+ DeadLetteringOnMessageExpiration *bool `xml:"DeadLetteringOnMessageExpiration,omitempty"` // DeadLetteringOnMessageExpiration - A value that indicates whether this queue has dead letter support when a message expires.
+ DeadLetteringOnFilterEvaluationExceptions *bool `xml:"DeadLetteringOnFilterEvaluationExceptions,omitempty"`
+ MessageCount *int64 `xml:"MessageCount,omitempty"` // MessageCount - The number of messages in the queue.
+ MaxDeliveryCount *int32 `xml:"MaxDeliveryCount,omitempty"` // MaxDeliveryCount - The maximum delivery count. A message is automatically deadlettered after this number of deliveries. default value is 10.
+ EnableBatchedOperations *bool `xml:"EnableBatchedOperations,omitempty"` // EnableBatchedOperations - Value that indicates whether server-side batched operations are enabled.
+ Status *EntityStatus `xml:"Status,omitempty"`
+ CreatedAt *date.Time `xml:"CreatedAt,omitempty"`
+ UpdatedAt *date.Time `xml:"UpdatedAt,omitempty"`
+ AccessedAt *date.Time `xml:"AccessedAt,omitempty"`
+ AutoDeleteOnIdle *string `xml:"AutoDeleteOnIdle,omitempty"`
+ ForwardTo *string `xml:"ForwardTo,omitempty"` // ForwardTo - absolute URI of the entity to forward messages
+ ForwardDeadLetteredMessagesTo *string `xml:"ForwardDeadLetteredMessagesTo,omitempty"` // ForwardDeadLetteredMessagesTo - absolute URI of the entity to forward dead letter messages
+ CountDetails *CountDetails `xml:"CountDetails,omitempty"`
+ }
+
+ // SubscriptionEntity is the Azure Service Bus description of a topic Subscription for management activities
+ SubscriptionEntity struct {
+ *SubscriptionDescription
+ *Entity
+ }
+
+ // subscriptionFeed is a specialized feed containing Topic Subscriptions
+ subscriptionFeed struct {
+ *atom.Feed
+ Entries []subscriptionEntry `xml:"entry"`
+ }
+
+ // subscriptionEntryContent is a specialized Topic feed Subscription
+ subscriptionEntry struct {
+ *atom.Entry
+ Content *subscriptionContent `xml:"content"`
+ }
+
+ // subscriptionContent is a specialized Subscription body for an Atom entry
+ subscriptionContent struct {
+ XMLName xml.Name `xml:"content"`
+ Type string `xml:"type,attr"`
+ SubscriptionDescription SubscriptionDescription `xml:"SubscriptionDescription"`
+ }
+
+ // SubscriptionManagementOption represents named options for assisting Subscription creation
+ SubscriptionManagementOption func(*SubscriptionDescription) error
+)
+
+// NewSubscriptionManager creates a new SubscriptionManager for a Service Bus Topic
+func (t *Topic) NewSubscriptionManager() *SubscriptionManager {
+ return &SubscriptionManager{
+ entityManager: newEntityManager(t.namespace.getHTTPSHostURI(), t.namespace.TokenProvider),
+ Topic: t,
+ }
+}
+
+// NewSubscriptionManager creates a new SubscriptionManger for a Service Bus Namespace
+func (ns *Namespace) NewSubscriptionManager(topicName string) (*SubscriptionManager, error) {
+ t, err := ns.NewTopic(topicName)
+ if err != nil {
+ return nil, err
+ }
+ return &SubscriptionManager{
+ entityManager: newEntityManager(t.namespace.getHTTPSHostURI(), t.namespace.TokenProvider),
+ Topic: t,
+ }, nil
+}
+
+// Delete deletes a Service Bus Topic entity by name
+func (sm *SubscriptionManager) Delete(ctx context.Context, name string) error {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.Delete")
+ defer span.End()
+
+ res, err := sm.entityManager.Delete(ctx, sm.getResourceURI(name))
+ defer closeRes(ctx, res)
+
+ return err
+}
+
+// Put creates or updates a Service Bus Topic
+func (sm *SubscriptionManager) Put(ctx context.Context, name string, opts ...SubscriptionManagementOption) (*SubscriptionEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.Put")
+ defer span.End()
+
+ sd := new(SubscriptionDescription)
+ for _, opt := range opts {
+ if err := opt(sd); err != nil {
+ return nil, err
+ }
+ }
+
+ sd.ServiceBusSchema = to.StringPtr(serviceBusSchema)
+
+ qe := &subscriptionEntry{
+ Entry: &atom.Entry{
+ AtomSchema: atomSchema,
+ },
+ Content: &subscriptionContent{
+ Type: applicationXML,
+ SubscriptionDescription: *sd,
+ },
+ }
+
+ var mw []MiddlewareFunc
+ if sd.ForwardTo != nil {
+ mw = append(mw, addSupplementalAuthorization(*sd.ForwardTo, sm.TokenProvider()))
+ }
+
+ if sd.ForwardDeadLetteredMessagesTo != nil {
+ mw = append(mw, addDeadLetterSupplementalAuthorization(*sd.ForwardDeadLetteredMessagesTo, sm.TokenProvider()))
+ }
+
+ reqBytes, err := xml.Marshal(qe)
+ if err != nil {
+ return nil, err
+ }
+
+ reqBytes = xmlDoc(reqBytes)
+ res, err := sm.entityManager.Put(ctx, sm.getResourceURI(name), reqBytes, mw...)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var entry subscriptionEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+ return subscriptionEntryToEntity(&entry), nil
+}
+
+// List fetches all of the Topics for a Service Bus Namespace
+func (sm *SubscriptionManager) List(ctx context.Context) ([]*SubscriptionEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.List")
+ defer span.End()
+
+ res, err := sm.entityManager.Get(ctx, "/"+sm.Topic.Name+"/subscriptions")
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var feed subscriptionFeed
+ err = xml.Unmarshal(b, &feed)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+
+ subs := make([]*SubscriptionEntity, len(feed.Entries))
+ for idx, entry := range feed.Entries {
+ subs[idx] = subscriptionEntryToEntity(&entry)
+ }
+ return subs, nil
+}
+
+// Get fetches a Service Bus Topic entity by name
+func (sm *SubscriptionManager) Get(ctx context.Context, name string) (*SubscriptionEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.Get")
+ defer span.End()
+
+ res, err := sm.entityManager.Get(ctx, sm.getResourceURI(name))
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if res.StatusCode == http.StatusNotFound {
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var entry subscriptionEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ if isEmptyFeed(b) {
+ // seems the only way to catch 404 is if the feed is empty. If no subscriptions exist, the GET returns 200
+ // and an empty feed.
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+ return nil, formatManagementError(b)
+ }
+ return subscriptionEntryToEntity(&entry), nil
+}
+
+// ListRules returns the slice of subscription filter rules
+//
+// By default when the subscription is created, there exists a single "true" filter which matches all messages.
+func (sm *SubscriptionManager) ListRules(ctx context.Context, subscriptionName string) ([]*RuleEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.ListRules")
+ defer span.End()
+
+ res, err := sm.entityManager.Get(ctx, sm.getRulesResourceURI(subscriptionName))
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ return nil, err
+ }
+
+ if res.StatusCode == http.StatusNotFound {
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var feed ruleFeed
+ err = xml.Unmarshal(b, &feed)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+
+ rules := make([]*RuleEntity, len(feed.Entries))
+ for idx, entry := range feed.Entries {
+ rules[idx] = ruleEntryToEntity(&entry)
+ }
+ return rules, nil
+}
+
+// PutRuleWithAction creates a new Subscription rule to filter messages from the topic and then perform an action
+func (sm *SubscriptionManager) PutRuleWithAction(ctx context.Context, subscriptionName, ruleName string, filter FilterDescriber, action ActionDescriber) (*RuleEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.PutRuleWithAction")
+ defer span.End()
+
+ ad := action.ToActionDescription()
+ rd := &RuleDescription{
+ BaseEntityDescription: BaseEntityDescription{
+ ServiceBusSchema: to.StringPtr(serviceBusSchema),
+ InstanceMetadataSchema: to.StringPtr(schemaInstance),
+ },
+ Filter: filter.ToFilterDescription(),
+ Action: &ad,
+ }
+
+ return sm.putRule(ctx, subscriptionName, ruleName, rd)
+}
+
+// PutRule creates a new Subscription rule to filter messages from the topic
+func (sm *SubscriptionManager) PutRule(ctx context.Context, subscriptionName, ruleName string, filter FilterDescriber) (*RuleEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.PutRule")
+ defer span.End()
+
+ rd := &RuleDescription{
+ BaseEntityDescription: BaseEntityDescription{
+ ServiceBusSchema: to.StringPtr(serviceBusSchema),
+ InstanceMetadataSchema: to.StringPtr(schemaInstance),
+ },
+ Filter: filter.ToFilterDescription(),
+ }
+
+ return sm.putRule(ctx, subscriptionName, ruleName, rd)
+}
+
+func (sm *SubscriptionManager) putRule(ctx context.Context, subscriptionName, ruleName string, rd *RuleDescription) (*RuleEntity, error) {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.putRule")
+ defer span.End()
+
+ re := &ruleEntry{
+ Entry: &atom.Entry{
+ AtomSchema: atomSchema,
+ },
+ Content: &ruleContent{
+ Type: applicationXML,
+ RuleDescription: *rd,
+ },
+ }
+
+ reqBytes, err := xml.Marshal(re)
+ if err != nil {
+ return nil, err
+ }
+
+ // TODO: fix the unmarshal / marshal of xml with this attribute or ask the service to fix it. This is sad, but works.
+ str := string(reqBytes)
+ str = strings.Replace(str, `xmlns:XMLSchema-instance="`+schemaInstance+`" XMLSchema-instance:type`, "i:type", -1)
+
+ reqBytes = xmlDoc([]byte(str))
+ res, err := sm.entityManager.Put(ctx, sm.getRuleResourceURI(subscriptionName, ruleName), reqBytes)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var entry ruleEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+ return ruleEntryToEntity(&entry), nil
+}
+
+// DeleteRule will delete a rule on the subscription
+func (sm *SubscriptionManager) DeleteRule(ctx context.Context, subscriptionName, ruleName string) error {
+ ctx, span := sm.startSpanFromContext(ctx, "sb.SubscriptionManager.DeleteRule")
+ defer span.End()
+
+ res, err := sm.entityManager.Delete(ctx, sm.getRuleResourceURI(subscriptionName, ruleName))
+ defer closeRes(ctx, res)
+
+ return err
+}
+
+func ruleEntryToEntity(entry *ruleEntry) *RuleEntity {
+ return &RuleEntity{
+ RuleDescription: &entry.Content.RuleDescription,
+ Entity: &Entity{
+ Name: entry.Title,
+ ID: entry.ID,
+ },
+ }
+}
+
+func subscriptionEntryToEntity(entry *subscriptionEntry) *SubscriptionEntity {
+ return &SubscriptionEntity{
+ SubscriptionDescription: &entry.Content.SubscriptionDescription,
+ Entity: &Entity{
+ Name: entry.Title,
+ ID: entry.ID,
+ },
+ }
+}
+
+func (sm *SubscriptionManager) getResourceURI(name string) string {
+ return "/" + sm.Topic.Name + "/subscriptions/" + name
+}
+
+func (sm *SubscriptionManager) getRulesResourceURI(subscriptionName string) string {
+ return sm.getResourceURI(subscriptionName) + "/rules"
+}
+
+func (sm *SubscriptionManager) getRuleResourceURI(subscriptionName, ruleName string) string {
+ return sm.getResourceURI(subscriptionName) + "/rules/" + ruleName
+}
+
+// SubscriptionWithBatchedOperations configures the subscription to batch server-side operations.
+func SubscriptionWithBatchedOperations() SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ s.EnableBatchedOperations = ptrBool(true)
+ return nil
+ }
+}
+
+// SubscriptionWithForwardDeadLetteredMessagesTo configures the queue to automatically forward dead letter messages to
+// the specified target entity.
+//
+// The ability to forward dead letter messages to a target requires the connection have management authorization. If
+// the connection string or Azure Active Directory identity used does not have management authorization, an unauthorized
+// error will be returned on the PUT.
+func SubscriptionWithForwardDeadLetteredMessagesTo(target Targetable) SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ uri := target.TargetURI()
+ s.ForwardDeadLetteredMessagesTo = &uri
+ return nil
+ }
+}
+
+// SubscriptionWithAutoForward configures the queue to automatically forward messages to the specified entity path
+//
+// The ability to AutoForward to a target requires the connection have management authorization. If the connection
+// string or Azure Active Directory identity used does not have management authorization, an unauthorized error will be
+// returned on the PUT.
+func SubscriptionWithAutoForward(target Targetable) SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ uri := target.TargetURI()
+ s.ForwardTo = &uri
+ return nil
+ }
+}
+
+// SubscriptionWithLockDuration configures the subscription to have a duration of a peek-lock; that is, the amount of
+// time that the message is locked for other receivers. The maximum value for LockDuration is 5 minutes; the default
+// value is 1 minute.
+func SubscriptionWithLockDuration(window *time.Duration) SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ if window == nil {
+ duration := time.Duration(1 * time.Minute)
+ window = &duration
+ }
+ s.LockDuration = ptrString(durationTo8601Seconds(*window))
+ return nil
+ }
+}
+
+// SubscriptionWithRequiredSessions will ensure the subscription requires senders and receivers to have sessionIDs
+func SubscriptionWithRequiredSessions() SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ s.RequiresSession = ptrBool(true)
+ return nil
+ }
+}
+
+// SubscriptionWithDeadLetteringOnMessageExpiration will ensure the Subscription sends expired messages to the dead
+// letter queue
+func SubscriptionWithDeadLetteringOnMessageExpiration() SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ s.DeadLetteringOnMessageExpiration = ptrBool(true)
+ return nil
+ }
+}
+
+// SubscriptionWithAutoDeleteOnIdle configures the subscription to automatically delete after the specified idle
+// interval. The minimum duration is 5 minutes.
+func SubscriptionWithAutoDeleteOnIdle(window *time.Duration) SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ if window != nil {
+ if window.Minutes() < 5 {
+ return errors.New("window must be greater than 5 minutes")
+ }
+ s.AutoDeleteOnIdle = ptrString(durationTo8601Seconds(*window))
+ }
+ return nil
+ }
+}
+
+// SubscriptionWithMessageTimeToLive configures the subscription to set a time to live on messages. This is the duration
+// after which the message expires, starting from when the message is sent to Service Bus. This is the default value
+// used when TimeToLive is not set on a message itself. If nil, defaults to 14 days.
+func SubscriptionWithMessageTimeToLive(window *time.Duration) SubscriptionManagementOption {
+ return func(s *SubscriptionDescription) error {
+ if window == nil {
+ duration := time.Duration(14 * 24 * time.Hour)
+ window = &duration
+ }
+ s.DefaultMessageTimeToLive = ptrString(durationTo8601Seconds(*window))
+ return nil
+ }
+}
+
+func closeRes(ctx context.Context, res *http.Response) {
+ if res == nil {
+ return
+ }
+
+ if err := res.Body.Close(); err != nil {
+ tab.For(ctx).Error(err)
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/topic.go b/vendor/github.com/Azure/azure-service-bus-go/topic.go
new file mode 100644
index 00000000..82e023be
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/topic.go
@@ -0,0 +1,221 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "encoding/xml"
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/Azure/azure-amqp-common-go/v2/uuid"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/devigned/tab"
+)
+
+type (
+ // Topic in contrast to queues, in which each message is processed by a single consumer, topics and subscriptions
+ // provide a one-to-many form of communication, in a publish/subscribe pattern. Useful for scaling to very large
+ // numbers of recipients, each published message is made available to each subscription registered with the topic.
+ // Messages are sent to a topic and delivered to one or more associated subscriptions, depending on filter rules
+ // that can be set on a per-subscription basis. The subscriptions can use additional filters to restrict the
+ // messages that they want to receive. Messages are sent to a topic in the same way they are sent to a queue,
+ // but messages are not received from the topic directly. Instead, they are received from subscriptions. A topic
+ // subscription resembles a virtual queue that receives copies of the messages that are sent to the topic.
+ // Messages are received from a subscription identically to the way they are received from a queue.
+ Topic struct {
+ *sendingEntity
+ sender *Sender
+ senderMu sync.Mutex
+ }
+
+ // TopicDescription is the content type for Topic management requests
+ TopicDescription struct {
+ XMLName xml.Name `xml:"TopicDescription"`
+ BaseEntityDescription
+ DefaultMessageTimeToLive *string `xml:"DefaultMessageTimeToLive,omitempty"` // DefaultMessageTimeToLive - ISO 8601 default message time span to live value. This is the duration after which the message expires, starting from when the message is sent to Service Bus. This is the default value used when TimeToLive is not set on a message itself.
+ MaxSizeInMegabytes *int32 `xml:"MaxSizeInMegabytes,omitempty"` // MaxSizeInMegabytes - The maximum size of the queue in megabytes, which is the size of memory allocated for the queue. Default is 1024.
+ RequiresDuplicateDetection *bool `xml:"RequiresDuplicateDetection,omitempty"` // RequiresDuplicateDetection - A value indicating if this queue requires duplicate detection.
+ DuplicateDetectionHistoryTimeWindow *string `xml:"DuplicateDetectionHistoryTimeWindow,omitempty"` // DuplicateDetectionHistoryTimeWindow - ISO 8601 timeSpan structure that defines the duration of the duplicate detection history. The default value is 10 minutes.
+ EnableBatchedOperations *bool `xml:"EnableBatchedOperations,omitempty"` // EnableBatchedOperations - Value that indicates whether server-side batched operations are enabled.
+ SizeInBytes *int64 `xml:"SizeInBytes,omitempty"` // SizeInBytes - The size of the queue, in bytes.
+ FilteringMessagesBeforePublishing *bool `xml:"FilteringMessagesBeforePublishing,omitempty"`
+ IsAnonymousAccessible *bool `xml:"IsAnonymousAccessible,omitempty"`
+ Status *EntityStatus `xml:"Status,omitempty"`
+ CreatedAt *date.Time `xml:"CreatedAt,omitempty"`
+ UpdatedAt *date.Time `xml:"UpdatedAt,omitempty"`
+ SupportOrdering *bool `xml:"SupportOrdering,omitempty"`
+ AutoDeleteOnIdle *string `xml:"AutoDeleteOnIdle,omitempty"`
+ EnablePartitioning *bool `xml:"EnablePartitioning,omitempty"`
+ EnableSubscriptionPartitioning *bool `xml:"EnableSubscriptionPartitioning,omitempty"`
+ EnableExpress *bool `xml:"EnableExpress,omitempty"`
+ CountDetails *CountDetails `xml:"CountDetails,omitempty"`
+ }
+
+ // TopicOption represents named options for assisting Topic message handling
+ TopicOption func(*Topic) error
+)
+
+// NewTopic creates a new Topic Sender
+func (ns *Namespace) NewTopic(name string, opts ...TopicOption) (*Topic, error) {
+ topic := &Topic{
+ sendingEntity: newSendingEntity(newEntity(name, topicManagementPath(name), ns)),
+ }
+
+ for i := range opts {
+ if err := opts[i](topic); err != nil {
+ return nil, err
+ }
+ }
+
+ return topic, nil
+}
+
+// Send sends messages to the Topic
+func (t *Topic) Send(ctx context.Context, event *Message, opts ...SendOption) error {
+ ctx, span := t.startSpanFromContext(ctx, "sb.Topic.Send")
+ defer span.End()
+
+ err := t.ensureSender(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ return t.sender.Send(ctx, event, opts...)
+}
+
+// SendBatch sends a batch of messages to the Topic
+func (t *Topic) SendBatch(ctx context.Context, iterator BatchIterator) error {
+ ctx, span := t.startSpanFromContext(ctx, "sb.Topic.SendBatch")
+ defer span.End()
+
+ err := t.ensureSender(ctx)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ for !iterator.Done() {
+ id, err := uuid.NewV4()
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ batch, err := iterator.Next(id.String(), &BatchOptions{
+ SessionID: t.sender.sessionID,
+ })
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+
+ if err := t.sender.trySend(ctx, batch); err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// NewSession will create a new session based sender for the topic
+//
+// Microsoft Azure Service Bus sessions enable joint and ordered handling of unbounded sequences of related messages.
+// To realize a FIFO guarantee in Service Bus, use Sessions. Service Bus is not prescriptive about the nature of the
+// relationship between the messages, and also does not define a particular model for determining where a message
+// sequence starts or ends.
+func (t *Topic) NewSession(sessionID *string) *TopicSession {
+ return NewTopicSession(t, sessionID)
+}
+
+// NewSender will create a new Sender for sending messages to the queue
+func (t *Topic) NewSender(ctx context.Context, opts ...SenderOption) (*Sender, error) {
+ return t.namespace.NewSender(ctx, t.Name)
+}
+
+// Close the underlying connection to Service Bus
+func (t *Topic) Close(ctx context.Context) error {
+ ctx, span := t.startSpanFromContext(ctx, "sb.Topic.Close")
+ defer span.End()
+
+ if t.sender != nil {
+ err := t.sender.Close(ctx)
+ t.sender = nil
+ if err != nil && !isConnectionClosed(err) {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+// NewTransferDeadLetter creates an entity that represents the transfer dead letter sub queue of the topic
+//
+// Messages will be sent to the transfer dead-letter queue under the following conditions:
+// - A message passes through more than 3 queues or topics that are chained together.
+// - The destination queue or topic is disabled or deleted.
+// - The destination queue or topic exceeds the maximum entity size.
+func (t *Topic) NewTransferDeadLetter() *TransferDeadLetter {
+ return NewTransferDeadLetter(t)
+}
+
+// NewTransferDeadLetterReceiver builds a receiver for the Queue's transfer dead letter queue
+//
+// Messages will be sent to the transfer dead-letter queue under the following conditions:
+// - A message passes through more than 3 queues or topics that are chained together.
+// - The destination queue or topic is disabled or deleted.
+// - The destination queue or topic exceeds the maximum entity size.
+func (t *Topic) NewTransferDeadLetterReceiver(ctx context.Context, opts ...ReceiverOption) (ReceiveOner, error) {
+ ctx, span := t.startSpanFromContext(ctx, "sb.Topic.NewTransferDeadLetterReceiver")
+ defer span.End()
+
+ transferDeadLetterEntityPath := strings.Join([]string{t.Name, TransferDeadLetterQueueName}, "/")
+ return t.namespace.NewReceiver(ctx, transferDeadLetterEntityPath, opts...)
+}
+
+func topicManagementPath(name string) string {
+ return fmt.Sprintf("%s/$management", name)
+}
+
+func (t *Topic) ensureSender(ctx context.Context) error {
+ ctx, span := t.startSpanFromContext(ctx, "sb.Topic.ensureSender")
+ defer span.End()
+
+ t.senderMu.Lock()
+ defer t.senderMu.Unlock()
+
+ if t.sender != nil {
+ return nil
+ }
+
+ s, err := t.namespace.NewSender(ctx, t.Name)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return err
+ }
+ t.sender = s
+ return nil
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/topic_manager.go b/vendor/github.com/Azure/azure-service-bus-go/topic_manager.go
new file mode 100644
index 00000000..3f97325d
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/topic_manager.go
@@ -0,0 +1,285 @@
+package servicebus
+
+import (
+ "context"
+ "encoding/xml"
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest/to"
+ "github.com/devigned/tab"
+
+ "github.com/Azure/azure-service-bus-go/atom"
+)
+
+type (
+ // TopicManager provides CRUD functionality for Service Bus Topics
+ TopicManager struct {
+ *entityManager
+ }
+
+ // TopicEntity is the Azure Service Bus description of a Topic for management activities
+ TopicEntity struct {
+ *TopicDescription
+ *Entity
+ }
+
+ // topicEntry is a specialized Topic feed entry
+ topicEntry struct {
+ *atom.Entry
+ Content *topicContent `xml:"content"`
+ }
+
+ // topicContent is a specialized Topic body for an Atom entry
+ topicContent struct {
+ XMLName xml.Name `xml:"content"`
+ Type string `xml:"type,attr"`
+ TopicDescription TopicDescription `xml:"TopicDescription"`
+ }
+
+ // topicFeed is a specialized feed containing Topic Entries
+ topicFeed struct {
+ *atom.Feed
+ Entries []topicEntry `xml:"entry"`
+ }
+
+ // TopicManagementOption represents named options for assisting Topic creation
+ TopicManagementOption func(*TopicDescription) error
+)
+
+// NewTopicManager creates a new TopicManager for a Service Bus Namespace
+func (ns *Namespace) NewTopicManager() *TopicManager {
+ return &TopicManager{
+ entityManager: newEntityManager(ns.getHTTPSHostURI(), ns.TokenProvider),
+ }
+}
+
+// Delete deletes a Service Bus Topic entity by name
+func (tm *TopicManager) Delete(ctx context.Context, name string) error {
+ ctx, span := tm.startSpanFromContext(ctx, "sb.TopicManager.Delete")
+ defer span.End()
+
+ res, err := tm.entityManager.Delete(ctx, "/"+name)
+ defer closeRes(ctx, res)
+
+ return err
+}
+
+// Put creates or updates a Service Bus Topic
+func (tm *TopicManager) Put(ctx context.Context, name string, opts ...TopicManagementOption) (*TopicEntity, error) {
+ ctx, span := tm.startSpanFromContext(ctx, "sb.TopicManager.Put")
+ defer span.End()
+
+ td := new(TopicDescription)
+ for _, opt := range opts {
+ if err := opt(td); err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+ }
+
+ td.ServiceBusSchema = to.StringPtr(serviceBusSchema)
+
+ qe := &topicEntry{
+ Entry: &atom.Entry{
+ AtomSchema: atomSchema,
+ },
+ Content: &topicContent{
+ Type: applicationXML,
+ TopicDescription: *td,
+ },
+ }
+
+ reqBytes, err := xml.Marshal(qe)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ reqBytes = xmlDoc(reqBytes)
+ res, err := tm.entityManager.Put(ctx, "/"+name, reqBytes)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var entry topicEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+ return topicEntryToEntity(&entry), nil
+}
+
+// List fetches all of the Topics for a Service Bus Namespace
+func (tm *TopicManager) List(ctx context.Context) ([]*TopicEntity, error) {
+ ctx, span := tm.startSpanFromContext(ctx, "sb.TopicManager.List")
+ defer span.End()
+
+ res, err := tm.entityManager.Get(ctx, `/$Resources/Topics`)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var feed topicFeed
+ err = xml.Unmarshal(b, &feed)
+ if err != nil {
+ return nil, formatManagementError(b)
+ }
+
+ topics := make([]*TopicEntity, len(feed.Entries))
+ for idx, entry := range feed.Entries {
+ topics[idx] = topicEntryToEntity(&entry)
+ }
+ return topics, nil
+}
+
+// Get fetches a Service Bus Topic entity by name
+func (tm *TopicManager) Get(ctx context.Context, name string) (*TopicEntity, error) {
+ ctx, span := tm.startSpanFromContext(ctx, "sb.TopicManager.Get")
+ defer span.End()
+
+ res, err := tm.entityManager.Get(ctx, name)
+ defer closeRes(ctx, res)
+
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ if res.StatusCode == http.StatusNotFound {
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+
+ b, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ tab.For(ctx).Error(err)
+ return nil, err
+ }
+
+ var entry topicEntry
+ err = xml.Unmarshal(b, &entry)
+ if err != nil {
+ if isEmptyFeed(b) {
+ return nil, ErrNotFound{EntityPath: res.Request.URL.Path}
+ }
+ return nil, formatManagementError(b)
+ }
+ return topicEntryToEntity(&entry), nil
+}
+
+func topicEntryToEntity(entry *topicEntry) *TopicEntity {
+ return &TopicEntity{
+ TopicDescription: &entry.Content.TopicDescription,
+ Entity: &Entity{
+ Name: entry.Title,
+ ID: entry.ID,
+ },
+ }
+}
+
+// TopicWithMaxSizeInMegabytes configures the maximum size of the topic in megabytes (1 * 1024 - 5 * 1024), which is the size of
+// the memory allocated for the topic. Default is 1 MB (1 * 1024).
+//
+// size must be between 1024 and 5 * 1024 for the Standard sku and up to 80 * 1024 for Premium sku
+func TopicWithMaxSizeInMegabytes(size int) TopicManagementOption {
+ return func(t *TopicDescription) error {
+ if size < 1024 || size > 80*1024 {
+ return errors.New("TopicWithMaxSizeInMegabytes: must be between 1024 and 5 * 1024 for the Standard sku and up to 80 * 1024 for Premium sku")
+ }
+ size32 := int32(size)
+ t.MaxSizeInMegabytes = &size32
+ return nil
+ }
+}
+
+// TopicWithPartitioning configures the topic to be partitioned across multiple message brokers.
+func TopicWithPartitioning() TopicManagementOption {
+ return func(t *TopicDescription) error {
+ t.EnablePartitioning = ptrBool(true)
+ return nil
+ }
+}
+
+// TopicWithOrdering configures the topic to support ordering of messages.
+func TopicWithOrdering() TopicManagementOption {
+ return func(t *TopicDescription) error {
+ t.SupportOrdering = ptrBool(true)
+ return nil
+ }
+}
+
+// TopicWithDuplicateDetection configures the topic to detect duplicates for a given time window. If window
+// is not specified, then it uses the default of 10 minutes.
+func TopicWithDuplicateDetection(window *time.Duration) TopicManagementOption {
+ return func(t *TopicDescription) error {
+ t.RequiresDuplicateDetection = ptrBool(true)
+ if window != nil {
+ t.DuplicateDetectionHistoryTimeWindow = ptrString(durationTo8601Seconds(*window))
+ }
+ return nil
+ }
+}
+
+// TopicWithExpress configures the topic to hold a message in memory temporarily before writing it to persistent storage.
+func TopicWithExpress() TopicManagementOption {
+ return func(t *TopicDescription) error {
+ t.EnableExpress = ptrBool(true)
+ return nil
+ }
+}
+
+// TopicWithBatchedOperations configures the topic to batch server-side operations.
+func TopicWithBatchedOperations() TopicManagementOption {
+ return func(t *TopicDescription) error {
+ t.EnableBatchedOperations = ptrBool(true)
+ return nil
+ }
+}
+
+// TopicWithAutoDeleteOnIdle configures the topic to automatically delete after the specified idle interval. The
+// minimum duration is 5 minutes.
+func TopicWithAutoDeleteOnIdle(window *time.Duration) TopicManagementOption {
+ return func(t *TopicDescription) error {
+ if window != nil {
+ if window.Minutes() < 5 {
+ return errors.New("TopicWithAutoDeleteOnIdle: window must be greater than 5 minutes")
+ }
+ t.AutoDeleteOnIdle = ptrString(durationTo8601Seconds(*window))
+ }
+ return nil
+ }
+}
+
+// TopicWithMessageTimeToLive configures the topic to set a time to live on messages. This is the duration after which
+// the message expires, starting from when the message is sent to Service Bus. This is the default value used when
+// TimeToLive is not set on a message itself. If nil, defaults to 14 days.
+func TopicWithMessageTimeToLive(window *time.Duration) TopicManagementOption {
+ return func(t *TopicDescription) error {
+ if window == nil {
+ duration := time.Duration(14 * 24 * time.Hour)
+ window = &duration
+ }
+ t.DefaultMessageTimeToLive = ptrString(durationTo8601Seconds(*window))
+ return nil
+ }
+}
diff --git a/vendor/github.com/Azure/azure-service-bus-go/tracing.go b/vendor/github.com/Azure/azure-service-bus-go/tracing.go
new file mode 100644
index 00000000..b653742e
--- /dev/null
+++ b/vendor/github.com/Azure/azure-service-bus-go/tracing.go
@@ -0,0 +1,129 @@
+package servicebus
+
+// MIT License
+//
+// Copyright (c) Microsoft Corporation. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE
+
+import (
+ "context"
+ "net/http"
+ "os"
+
+ "github.com/devigned/tab"
+)
+
+func (ns *Namespace) startSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ return ctx, span
+}
+
+func (m *Message) startSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ attrs := []tab.Attribute{tab.StringAttribute("amqp.message.id", m.ID)}
+ if m.SessionID != nil {
+ attrs = append(attrs, tab.StringAttribute("amqp.session.id", *m.SessionID))
+ }
+ if m.GroupSequence != nil {
+ attrs = append(attrs, tab.Int64Attribute("amqp.sequence_number", int64(*m.GroupSequence)))
+ }
+ return ctx, span
+}
+
+func (em *entityManager) startSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ span.AddAttributes(tab.StringAttribute("span.kind", "client"))
+ return ctx, span
+}
+
+func applyRequestInfo(span tab.Spanner, req *http.Request) {
+ span.AddAttributes(
+ tab.StringAttribute("http.url", req.URL.String()),
+ tab.StringAttribute("http.method", req.Method),
+ )
+}
+
+func applyResponseInfo(span tab.Spanner, res *http.Response) {
+ if res != nil {
+ span.AddAttributes(tab.Int64Attribute("http.status_code", int64(res.StatusCode)))
+ }
+}
+
+func (e *entity) startSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ span.AddAttributes(tab.StringAttribute("message_bus.destination", e.ManagementPath()))
+ return ctx, span
+}
+
+func (si sessionIdentifiable) startSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ return ctx, span
+}
+
+func (s *Sender) startProducerSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ span.AddAttributes(
+ tab.StringAttribute("span.kind", "producer"),
+ tab.StringAttribute("message_bus.destination", s.getFullIdentifier()),
+ )
+ return ctx, span
+}
+
+func (r *Receiver) startConsumerSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := startConsumerSpanFromContext(ctx, operationName)
+ span.AddAttributes(tab.StringAttribute("message_bus.destination", r.entityPath))
+ return ctx, span
+}
+
+func (r *rpcClient) startSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := startConsumerSpanFromContext(ctx, operationName)
+ span.AddAttributes(tab.StringAttribute("message_bus.destination", r.ec.ManagementPath()))
+ return ctx, span
+}
+
+func startConsumerSpanFromContext(ctx context.Context, operationName string) (context.Context, tab.Spanner) {
+ ctx, span := tab.StartSpan(ctx, operationName)
+ applyComponentInfo(span)
+ span.AddAttributes(tab.StringAttribute("span.kind", "consumer"))
+ return ctx, span
+}
+
+func applyComponentInfo(span tab.Spanner) {
+ span.AddAttributes(
+ tab.StringAttribute("component", "github.com/Azure/azure-service-bus-go"),
+ tab.StringAttribute("version", Version),
+ )
+ applyNetworkInfo(span)
+}
+
+func applyNetworkInfo(span tab.Spanner) {
+ hostname, err := os.Hostname()
+ if err == nil {
+ span.AddAttributes(
+ tab.StringAttribute("peer.hostname", hostname),
+ )
+ }
+}
diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE
deleted file mode 100644
index e3d9a64d..00000000
--- a/vendor/github.com/Azure/go-ansiterm/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2015 Microsoft Corporation
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md
deleted file mode 100644
index 261c041e..00000000
--- a/vendor/github.com/Azure/go-ansiterm/README.md
+++ /dev/null
@@ -1,12 +0,0 @@
-# go-ansiterm
-
-This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent.
-
-For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position.
-
-The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go).
-
-See parser_test.go for examples exercising the state machine and generating appropriate function calls.
-
------
-This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go
deleted file mode 100644
index 96504a33..00000000
--- a/vendor/github.com/Azure/go-ansiterm/constants.go
+++ /dev/null
@@ -1,188 +0,0 @@
-package ansiterm
-
-const LogEnv = "DEBUG_TERMINAL"
-
-// ANSI constants
-// References:
-// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm
-// -- http://man7.org/linux/man-pages/man4/console_codes.4.html
-// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
-// -- http://en.wikipedia.org/wiki/ANSI_escape_code
-// -- http://vt100.net/emu/dec_ansi_parser
-// -- http://vt100.net/emu/vt500_parser.svg
-// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html
-// -- http://www.inwap.com/pdp10/ansicode.txt
-const (
- // ECMA-48 Set Graphics Rendition
- // Note:
- // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved
- // -- Fonts could possibly be supported via SetCurrentConsoleFontEx
- // -- Windows does not expose the per-window cursor (i.e., caret) blink times
- ANSI_SGR_RESET = 0
- ANSI_SGR_BOLD = 1
- ANSI_SGR_DIM = 2
- _ANSI_SGR_ITALIC = 3
- ANSI_SGR_UNDERLINE = 4
- _ANSI_SGR_BLINKSLOW = 5
- _ANSI_SGR_BLINKFAST = 6
- ANSI_SGR_REVERSE = 7
- _ANSI_SGR_INVISIBLE = 8
- _ANSI_SGR_LINETHROUGH = 9
- _ANSI_SGR_FONT_00 = 10
- _ANSI_SGR_FONT_01 = 11
- _ANSI_SGR_FONT_02 = 12
- _ANSI_SGR_FONT_03 = 13
- _ANSI_SGR_FONT_04 = 14
- _ANSI_SGR_FONT_05 = 15
- _ANSI_SGR_FONT_06 = 16
- _ANSI_SGR_FONT_07 = 17
- _ANSI_SGR_FONT_08 = 18
- _ANSI_SGR_FONT_09 = 19
- _ANSI_SGR_FONT_10 = 20
- _ANSI_SGR_DOUBLEUNDERLINE = 21
- ANSI_SGR_BOLD_DIM_OFF = 22
- _ANSI_SGR_ITALIC_OFF = 23
- ANSI_SGR_UNDERLINE_OFF = 24
- _ANSI_SGR_BLINK_OFF = 25
- _ANSI_SGR_RESERVED_00 = 26
- ANSI_SGR_REVERSE_OFF = 27
- _ANSI_SGR_INVISIBLE_OFF = 28
- _ANSI_SGR_LINETHROUGH_OFF = 29
- ANSI_SGR_FOREGROUND_BLACK = 30
- ANSI_SGR_FOREGROUND_RED = 31
- ANSI_SGR_FOREGROUND_GREEN = 32
- ANSI_SGR_FOREGROUND_YELLOW = 33
- ANSI_SGR_FOREGROUND_BLUE = 34
- ANSI_SGR_FOREGROUND_MAGENTA = 35
- ANSI_SGR_FOREGROUND_CYAN = 36
- ANSI_SGR_FOREGROUND_WHITE = 37
- _ANSI_SGR_RESERVED_01 = 38
- ANSI_SGR_FOREGROUND_DEFAULT = 39
- ANSI_SGR_BACKGROUND_BLACK = 40
- ANSI_SGR_BACKGROUND_RED = 41
- ANSI_SGR_BACKGROUND_GREEN = 42
- ANSI_SGR_BACKGROUND_YELLOW = 43
- ANSI_SGR_BACKGROUND_BLUE = 44
- ANSI_SGR_BACKGROUND_MAGENTA = 45
- ANSI_SGR_BACKGROUND_CYAN = 46
- ANSI_SGR_BACKGROUND_WHITE = 47
- _ANSI_SGR_RESERVED_02 = 48
- ANSI_SGR_BACKGROUND_DEFAULT = 49
- // 50 - 65: Unsupported
-
- ANSI_MAX_CMD_LENGTH = 4096
-
- MAX_INPUT_EVENTS = 128
- DEFAULT_WIDTH = 80
- DEFAULT_HEIGHT = 24
-
- ANSI_BEL = 0x07
- ANSI_BACKSPACE = 0x08
- ANSI_TAB = 0x09
- ANSI_LINE_FEED = 0x0A
- ANSI_VERTICAL_TAB = 0x0B
- ANSI_FORM_FEED = 0x0C
- ANSI_CARRIAGE_RETURN = 0x0D
- ANSI_ESCAPE_PRIMARY = 0x1B
- ANSI_ESCAPE_SECONDARY = 0x5B
- ANSI_OSC_STRING_ENTRY = 0x5D
- ANSI_COMMAND_FIRST = 0x40
- ANSI_COMMAND_LAST = 0x7E
- DCS_ENTRY = 0x90
- CSI_ENTRY = 0x9B
- OSC_STRING = 0x9D
- ANSI_PARAMETER_SEP = ";"
- ANSI_CMD_G0 = '('
- ANSI_CMD_G1 = ')'
- ANSI_CMD_G2 = '*'
- ANSI_CMD_G3 = '+'
- ANSI_CMD_DECPNM = '>'
- ANSI_CMD_DECPAM = '='
- ANSI_CMD_OSC = ']'
- ANSI_CMD_STR_TERM = '\\'
-
- KEY_CONTROL_PARAM_2 = ";2"
- KEY_CONTROL_PARAM_3 = ";3"
- KEY_CONTROL_PARAM_4 = ";4"
- KEY_CONTROL_PARAM_5 = ";5"
- KEY_CONTROL_PARAM_6 = ";6"
- KEY_CONTROL_PARAM_7 = ";7"
- KEY_CONTROL_PARAM_8 = ";8"
- KEY_ESC_CSI = "\x1B["
- KEY_ESC_N = "\x1BN"
- KEY_ESC_O = "\x1BO"
-
- FILL_CHARACTER = ' '
-)
-
-func getByteRange(start byte, end byte) []byte {
- bytes := make([]byte, 0, 32)
- for i := start; i <= end; i++ {
- bytes = append(bytes, byte(i))
- }
-
- return bytes
-}
-
-var toGroundBytes = getToGroundBytes()
-var executors = getExecuteBytes()
-
-// SPACE 20+A0 hex Always and everywhere a blank space
-// Intermediate 20-2F hex !"#$%&'()*+,-./
-var intermeds = getByteRange(0x20, 0x2F)
-
-// Parameters 30-3F hex 0123456789:;<=>?
-// CSI Parameters 30-39, 3B hex 0123456789;
-var csiParams = getByteRange(0x30, 0x3F)
-
-var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...)
-
-// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_
-var upperCase = getByteRange(0x40, 0x5F)
-
-// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~
-var lowerCase = getByteRange(0x60, 0x7E)
-
-// Alphabetics 40-7E hex (all of upper and lower case)
-var alphabetics = append(upperCase, lowerCase...)
-
-var printables = getByteRange(0x20, 0x7F)
-
-var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E)
-var escapeToGroundBytes = getEscapeToGroundBytes()
-
-// See http://www.vt100.net/emu/vt500_parser.png for description of the complex
-// byte ranges below
-
-func getEscapeToGroundBytes() []byte {
- escapeToGroundBytes := getByteRange(0x30, 0x4F)
- escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...)
- escapeToGroundBytes = append(escapeToGroundBytes, 0x59)
- escapeToGroundBytes = append(escapeToGroundBytes, 0x5A)
- escapeToGroundBytes = append(escapeToGroundBytes, 0x5C)
- escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...)
- return escapeToGroundBytes
-}
-
-func getExecuteBytes() []byte {
- executeBytes := getByteRange(0x00, 0x17)
- executeBytes = append(executeBytes, 0x19)
- executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...)
- return executeBytes
-}
-
-func getToGroundBytes() []byte {
- groundBytes := []byte{0x18}
- groundBytes = append(groundBytes, 0x1A)
- groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...)
- groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...)
- groundBytes = append(groundBytes, 0x99)
- groundBytes = append(groundBytes, 0x9A)
- groundBytes = append(groundBytes, 0x9C)
- return groundBytes
-}
-
-// Delete 7F hex Always and everywhere ignored
-// C1 Control 80-9F hex 32 additional control characters
-// G1 Displayable A1-FE hex 94 additional displayable characters
-// Special A0+FF hex Same as SPACE and DELETE
diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go
deleted file mode 100644
index 8d66e777..00000000
--- a/vendor/github.com/Azure/go-ansiterm/context.go
+++ /dev/null
@@ -1,7 +0,0 @@
-package ansiterm
-
-type ansiContext struct {
- currentChar byte
- paramBuffer []byte
- interBuffer []byte
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
deleted file mode 100644
index bcbe00d0..00000000
--- a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package ansiterm
-
-type csiEntryState struct {
- baseState
-}
-
-func (csiState csiEntryState) Handle(b byte) (s state, e error) {
- csiState.parser.logf("CsiEntry::Handle %#x", b)
-
- nextState, err := csiState.baseState.Handle(b)
- if nextState != nil || err != nil {
- return nextState, err
- }
-
- switch {
- case sliceContains(alphabetics, b):
- return csiState.parser.ground, nil
- case sliceContains(csiCollectables, b):
- return csiState.parser.csiParam, nil
- case sliceContains(executors, b):
- return csiState, csiState.parser.execute()
- }
-
- return csiState, nil
-}
-
-func (csiState csiEntryState) Transition(s state) error {
- csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name())
- csiState.baseState.Transition(s)
-
- switch s {
- case csiState.parser.ground:
- return csiState.parser.csiDispatch()
- case csiState.parser.csiParam:
- switch {
- case sliceContains(csiParams, csiState.parser.context.currentChar):
- csiState.parser.collectParam()
- case sliceContains(intermeds, csiState.parser.context.currentChar):
- csiState.parser.collectInter()
- }
- }
-
- return nil
-}
-
-func (csiState csiEntryState) Enter() error {
- csiState.parser.clear()
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
deleted file mode 100644
index 7ed5e01c..00000000
--- a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package ansiterm
-
-type csiParamState struct {
- baseState
-}
-
-func (csiState csiParamState) Handle(b byte) (s state, e error) {
- csiState.parser.logf("CsiParam::Handle %#x", b)
-
- nextState, err := csiState.baseState.Handle(b)
- if nextState != nil || err != nil {
- return nextState, err
- }
-
- switch {
- case sliceContains(alphabetics, b):
- return csiState.parser.ground, nil
- case sliceContains(csiCollectables, b):
- csiState.parser.collectParam()
- return csiState, nil
- case sliceContains(executors, b):
- return csiState, csiState.parser.execute()
- }
-
- return csiState, nil
-}
-
-func (csiState csiParamState) Transition(s state) error {
- csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name())
- csiState.baseState.Transition(s)
-
- switch s {
- case csiState.parser.ground:
- return csiState.parser.csiDispatch()
- }
-
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
deleted file mode 100644
index 1c719db9..00000000
--- a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package ansiterm
-
-type escapeIntermediateState struct {
- baseState
-}
-
-func (escState escapeIntermediateState) Handle(b byte) (s state, e error) {
- escState.parser.logf("escapeIntermediateState::Handle %#x", b)
- nextState, err := escState.baseState.Handle(b)
- if nextState != nil || err != nil {
- return nextState, err
- }
-
- switch {
- case sliceContains(intermeds, b):
- return escState, escState.parser.collectInter()
- case sliceContains(executors, b):
- return escState, escState.parser.execute()
- case sliceContains(escapeIntermediateToGroundBytes, b):
- return escState.parser.ground, nil
- }
-
- return escState, nil
-}
-
-func (escState escapeIntermediateState) Transition(s state) error {
- escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name())
- escState.baseState.Transition(s)
-
- switch s {
- case escState.parser.ground:
- return escState.parser.escDispatch()
- }
-
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go
deleted file mode 100644
index 6390abd2..00000000
--- a/vendor/github.com/Azure/go-ansiterm/escape_state.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package ansiterm
-
-type escapeState struct {
- baseState
-}
-
-func (escState escapeState) Handle(b byte) (s state, e error) {
- escState.parser.logf("escapeState::Handle %#x", b)
- nextState, err := escState.baseState.Handle(b)
- if nextState != nil || err != nil {
- return nextState, err
- }
-
- switch {
- case b == ANSI_ESCAPE_SECONDARY:
- return escState.parser.csiEntry, nil
- case b == ANSI_OSC_STRING_ENTRY:
- return escState.parser.oscString, nil
- case sliceContains(executors, b):
- return escState, escState.parser.execute()
- case sliceContains(escapeToGroundBytes, b):
- return escState.parser.ground, nil
- case sliceContains(intermeds, b):
- return escState.parser.escapeIntermediate, nil
- }
-
- return escState, nil
-}
-
-func (escState escapeState) Transition(s state) error {
- escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name())
- escState.baseState.Transition(s)
-
- switch s {
- case escState.parser.ground:
- return escState.parser.escDispatch()
- case escState.parser.escapeIntermediate:
- return escState.parser.collectInter()
- }
-
- return nil
-}
-
-func (escState escapeState) Enter() error {
- escState.parser.clear()
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go
deleted file mode 100644
index 98087b38..00000000
--- a/vendor/github.com/Azure/go-ansiterm/event_handler.go
+++ /dev/null
@@ -1,90 +0,0 @@
-package ansiterm
-
-type AnsiEventHandler interface {
- // Print
- Print(b byte) error
-
- // Execute C0 commands
- Execute(b byte) error
-
- // CUrsor Up
- CUU(int) error
-
- // CUrsor Down
- CUD(int) error
-
- // CUrsor Forward
- CUF(int) error
-
- // CUrsor Backward
- CUB(int) error
-
- // Cursor to Next Line
- CNL(int) error
-
- // Cursor to Previous Line
- CPL(int) error
-
- // Cursor Horizontal position Absolute
- CHA(int) error
-
- // Vertical line Position Absolute
- VPA(int) error
-
- // CUrsor Position
- CUP(int, int) error
-
- // Horizontal and Vertical Position (depends on PUM)
- HVP(int, int) error
-
- // Text Cursor Enable Mode
- DECTCEM(bool) error
-
- // Origin Mode
- DECOM(bool) error
-
- // 132 Column Mode
- DECCOLM(bool) error
-
- // Erase in Display
- ED(int) error
-
- // Erase in Line
- EL(int) error
-
- // Insert Line
- IL(int) error
-
- // Delete Line
- DL(int) error
-
- // Insert Character
- ICH(int) error
-
- // Delete Character
- DCH(int) error
-
- // Set Graphics Rendition
- SGR([]int) error
-
- // Pan Down
- SU(int) error
-
- // Pan Up
- SD(int) error
-
- // Device Attributes
- DA([]string) error
-
- // Set Top and Bottom Margins
- DECSTBM(int, int) error
-
- // Index
- IND() error
-
- // Reverse Index
- RI() error
-
- // Flush updates from previous commands
- Flush() error
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go
deleted file mode 100644
index 52451e94..00000000
--- a/vendor/github.com/Azure/go-ansiterm/ground_state.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package ansiterm
-
-type groundState struct {
- baseState
-}
-
-func (gs groundState) Handle(b byte) (s state, e error) {
- gs.parser.context.currentChar = b
-
- nextState, err := gs.baseState.Handle(b)
- if nextState != nil || err != nil {
- return nextState, err
- }
-
- switch {
- case sliceContains(printables, b):
- return gs, gs.parser.print()
-
- case sliceContains(executors, b):
- return gs, gs.parser.execute()
- }
-
- return gs, nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
deleted file mode 100644
index 593b10ab..00000000
--- a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go
+++ /dev/null
@@ -1,31 +0,0 @@
-package ansiterm
-
-type oscStringState struct {
- baseState
-}
-
-func (oscState oscStringState) Handle(b byte) (s state, e error) {
- oscState.parser.logf("OscString::Handle %#x", b)
- nextState, err := oscState.baseState.Handle(b)
- if nextState != nil || err != nil {
- return nextState, err
- }
-
- switch {
- case isOscStringTerminator(b):
- return oscState.parser.ground, nil
- }
-
- return oscState, nil
-}
-
-// See below for OSC string terminators for linux
-// http://man7.org/linux/man-pages/man4/console_codes.4.html
-func isOscStringTerminator(b byte) bool {
-
- if b == ANSI_BEL || b == 0x5C {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go
deleted file mode 100644
index 03cec7ad..00000000
--- a/vendor/github.com/Azure/go-ansiterm/parser.go
+++ /dev/null
@@ -1,151 +0,0 @@
-package ansiterm
-
-import (
- "errors"
- "log"
- "os"
-)
-
-type AnsiParser struct {
- currState state
- eventHandler AnsiEventHandler
- context *ansiContext
- csiEntry state
- csiParam state
- dcsEntry state
- escape state
- escapeIntermediate state
- error state
- ground state
- oscString state
- stateMap []state
-
- logf func(string, ...interface{})
-}
-
-type Option func(*AnsiParser)
-
-func WithLogf(f func(string, ...interface{})) Option {
- return func(ap *AnsiParser) {
- ap.logf = f
- }
-}
-
-func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser {
- ap := &AnsiParser{
- eventHandler: evtHandler,
- context: &ansiContext{},
- }
- for _, o := range opts {
- o(ap)
- }
-
- if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" {
- logFile, _ := os.Create("ansiParser.log")
- logger := log.New(logFile, "", log.LstdFlags)
- if ap.logf != nil {
- l := ap.logf
- ap.logf = func(s string, v ...interface{}) {
- l(s, v...)
- logger.Printf(s, v...)
- }
- } else {
- ap.logf = logger.Printf
- }
- }
-
- if ap.logf == nil {
- ap.logf = func(string, ...interface{}) {}
- }
-
- ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}}
- ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}}
- ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}}
- ap.escape = escapeState{baseState{name: "Escape", parser: ap}}
- ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}}
- ap.error = errorState{baseState{name: "Error", parser: ap}}
- ap.ground = groundState{baseState{name: "Ground", parser: ap}}
- ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}}
-
- ap.stateMap = []state{
- ap.csiEntry,
- ap.csiParam,
- ap.dcsEntry,
- ap.escape,
- ap.escapeIntermediate,
- ap.error,
- ap.ground,
- ap.oscString,
- }
-
- ap.currState = getState(initialState, ap.stateMap)
-
- ap.logf("CreateParser: parser %p", ap)
- return ap
-}
-
-func getState(name string, states []state) state {
- for _, el := range states {
- if el.Name() == name {
- return el
- }
- }
-
- return nil
-}
-
-func (ap *AnsiParser) Parse(bytes []byte) (int, error) {
- for i, b := range bytes {
- if err := ap.handle(b); err != nil {
- return i, err
- }
- }
-
- return len(bytes), ap.eventHandler.Flush()
-}
-
-func (ap *AnsiParser) handle(b byte) error {
- ap.context.currentChar = b
- newState, err := ap.currState.Handle(b)
- if err != nil {
- return err
- }
-
- if newState == nil {
- ap.logf("WARNING: newState is nil")
- return errors.New("New state of 'nil' is invalid.")
- }
-
- if newState != ap.currState {
- if err := ap.changeState(newState); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (ap *AnsiParser) changeState(newState state) error {
- ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name())
-
- // Exit old state
- if err := ap.currState.Exit(); err != nil {
- ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err)
- return err
- }
-
- // Perform transition action
- if err := ap.currState.Transition(newState); err != nil {
- ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err)
- return err
- }
-
- // Enter new state
- if err := newState.Enter(); err != nil {
- ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err)
- return err
- }
-
- ap.currState = newState
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
deleted file mode 100644
index de0a1f9c..00000000
--- a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package ansiterm
-
-import (
- "strconv"
-)
-
-func parseParams(bytes []byte) ([]string, error) {
- paramBuff := make([]byte, 0, 0)
- params := []string{}
-
- for _, v := range bytes {
- if v == ';' {
- if len(paramBuff) > 0 {
- // Completed parameter, append it to the list
- s := string(paramBuff)
- params = append(params, s)
- paramBuff = make([]byte, 0, 0)
- }
- } else {
- paramBuff = append(paramBuff, v)
- }
- }
-
- // Last parameter may not be terminated with ';'
- if len(paramBuff) > 0 {
- s := string(paramBuff)
- params = append(params, s)
- }
-
- return params, nil
-}
-
-func parseCmd(context ansiContext) (string, error) {
- return string(context.currentChar), nil
-}
-
-func getInt(params []string, dflt int) int {
- i := getInts(params, 1, dflt)[0]
- return i
-}
-
-func getInts(params []string, minCount int, dflt int) []int {
- ints := []int{}
-
- for _, v := range params {
- i, _ := strconv.Atoi(v)
- // Zero is mapped to the default value in VT100.
- if i == 0 {
- i = dflt
- }
- ints = append(ints, i)
- }
-
- if len(ints) < minCount {
- remaining := minCount - len(ints)
- for i := 0; i < remaining; i++ {
- ints = append(ints, dflt)
- }
- }
-
- return ints
-}
-
-func (ap *AnsiParser) modeDispatch(param string, set bool) error {
- switch param {
- case "?3":
- return ap.eventHandler.DECCOLM(set)
- case "?6":
- return ap.eventHandler.DECOM(set)
- case "?25":
- return ap.eventHandler.DECTCEM(set)
- }
- return nil
-}
-
-func (ap *AnsiParser) hDispatch(params []string) error {
- if len(params) == 1 {
- return ap.modeDispatch(params[0], true)
- }
-
- return nil
-}
-
-func (ap *AnsiParser) lDispatch(params []string) error {
- if len(params) == 1 {
- return ap.modeDispatch(params[0], false)
- }
-
- return nil
-}
-
-func getEraseParam(params []string) int {
- param := getInt(params, 0)
- if param < 0 || 3 < param {
- param = 0
- }
-
- return param
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go
deleted file mode 100644
index 0bb5e51e..00000000
--- a/vendor/github.com/Azure/go-ansiterm/parser_actions.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package ansiterm
-
-func (ap *AnsiParser) collectParam() error {
- currChar := ap.context.currentChar
- ap.logf("collectParam %#x", currChar)
- ap.context.paramBuffer = append(ap.context.paramBuffer, currChar)
- return nil
-}
-
-func (ap *AnsiParser) collectInter() error {
- currChar := ap.context.currentChar
- ap.logf("collectInter %#x", currChar)
- ap.context.paramBuffer = append(ap.context.interBuffer, currChar)
- return nil
-}
-
-func (ap *AnsiParser) escDispatch() error {
- cmd, _ := parseCmd(*ap.context)
- intermeds := ap.context.interBuffer
- ap.logf("escDispatch currentChar: %#x", ap.context.currentChar)
- ap.logf("escDispatch: %v(%v)", cmd, intermeds)
-
- switch cmd {
- case "D": // IND
- return ap.eventHandler.IND()
- case "E": // NEL, equivalent to CRLF
- err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN)
- if err == nil {
- err = ap.eventHandler.Execute(ANSI_LINE_FEED)
- }
- return err
- case "M": // RI
- return ap.eventHandler.RI()
- }
-
- return nil
-}
-
-func (ap *AnsiParser) csiDispatch() error {
- cmd, _ := parseCmd(*ap.context)
- params, _ := parseParams(ap.context.paramBuffer)
- ap.logf("Parsed params: %v with length: %d", params, len(params))
-
- ap.logf("csiDispatch: %v(%v)", cmd, params)
-
- switch cmd {
- case "@":
- return ap.eventHandler.ICH(getInt(params, 1))
- case "A":
- return ap.eventHandler.CUU(getInt(params, 1))
- case "B":
- return ap.eventHandler.CUD(getInt(params, 1))
- case "C":
- return ap.eventHandler.CUF(getInt(params, 1))
- case "D":
- return ap.eventHandler.CUB(getInt(params, 1))
- case "E":
- return ap.eventHandler.CNL(getInt(params, 1))
- case "F":
- return ap.eventHandler.CPL(getInt(params, 1))
- case "G":
- return ap.eventHandler.CHA(getInt(params, 1))
- case "H":
- ints := getInts(params, 2, 1)
- x, y := ints[0], ints[1]
- return ap.eventHandler.CUP(x, y)
- case "J":
- param := getEraseParam(params)
- return ap.eventHandler.ED(param)
- case "K":
- param := getEraseParam(params)
- return ap.eventHandler.EL(param)
- case "L":
- return ap.eventHandler.IL(getInt(params, 1))
- case "M":
- return ap.eventHandler.DL(getInt(params, 1))
- case "P":
- return ap.eventHandler.DCH(getInt(params, 1))
- case "S":
- return ap.eventHandler.SU(getInt(params, 1))
- case "T":
- return ap.eventHandler.SD(getInt(params, 1))
- case "c":
- return ap.eventHandler.DA(params)
- case "d":
- return ap.eventHandler.VPA(getInt(params, 1))
- case "f":
- ints := getInts(params, 2, 1)
- x, y := ints[0], ints[1]
- return ap.eventHandler.HVP(x, y)
- case "h":
- return ap.hDispatch(params)
- case "l":
- return ap.lDispatch(params)
- case "m":
- return ap.eventHandler.SGR(getInts(params, 1, 0))
- case "r":
- ints := getInts(params, 2, 1)
- top, bottom := ints[0], ints[1]
- return ap.eventHandler.DECSTBM(top, bottom)
- default:
- ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)
- return nil
- }
-
-}
-
-func (ap *AnsiParser) print() error {
- return ap.eventHandler.Print(ap.context.currentChar)
-}
-
-func (ap *AnsiParser) clear() error {
- ap.context = &ansiContext{}
- return nil
-}
-
-func (ap *AnsiParser) execute() error {
- return ap.eventHandler.Execute(ap.context.currentChar)
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go
deleted file mode 100644
index f2ea1fcd..00000000
--- a/vendor/github.com/Azure/go-ansiterm/states.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package ansiterm
-
-type stateID int
-
-type state interface {
- Enter() error
- Exit() error
- Handle(byte) (state, error)
- Name() string
- Transition(state) error
-}
-
-type baseState struct {
- name string
- parser *AnsiParser
-}
-
-func (base baseState) Enter() error {
- return nil
-}
-
-func (base baseState) Exit() error {
- return nil
-}
-
-func (base baseState) Handle(b byte) (s state, e error) {
-
- switch {
- case b == CSI_ENTRY:
- return base.parser.csiEntry, nil
- case b == DCS_ENTRY:
- return base.parser.dcsEntry, nil
- case b == ANSI_ESCAPE_PRIMARY:
- return base.parser.escape, nil
- case b == OSC_STRING:
- return base.parser.oscString, nil
- case sliceContains(toGroundBytes, b):
- return base.parser.ground, nil
- }
-
- return nil, nil
-}
-
-func (base baseState) Name() string {
- return base.name
-}
-
-func (base baseState) Transition(s state) error {
- if s == base.parser.ground {
- execBytes := []byte{0x18}
- execBytes = append(execBytes, 0x1A)
- execBytes = append(execBytes, getByteRange(0x80, 0x8F)...)
- execBytes = append(execBytes, getByteRange(0x91, 0x97)...)
- execBytes = append(execBytes, 0x99)
- execBytes = append(execBytes, 0x9A)
-
- if sliceContains(execBytes, base.parser.context.currentChar) {
- return base.parser.execute()
- }
- }
-
- return nil
-}
-
-type dcsEntryState struct {
- baseState
-}
-
-type errorState struct {
- baseState
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go
deleted file mode 100644
index 39211449..00000000
--- a/vendor/github.com/Azure/go-ansiterm/utilities.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package ansiterm
-
-import (
- "strconv"
-)
-
-func sliceContains(bytes []byte, b byte) bool {
- for _, v := range bytes {
- if v == b {
- return true
- }
- }
-
- return false
-}
-
-func convertBytesToInteger(bytes []byte) int {
- s := string(bytes)
- i, _ := strconv.Atoi(s)
- return i
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
deleted file mode 100644
index a6732797..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go
+++ /dev/null
@@ -1,182 +0,0 @@
-// +build windows
-
-package winterm
-
-import (
- "fmt"
- "os"
- "strconv"
- "strings"
- "syscall"
-
- "github.com/Azure/go-ansiterm"
-)
-
-// Windows keyboard constants
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx.
-const (
- VK_PRIOR = 0x21 // PAGE UP key
- VK_NEXT = 0x22 // PAGE DOWN key
- VK_END = 0x23 // END key
- VK_HOME = 0x24 // HOME key
- VK_LEFT = 0x25 // LEFT ARROW key
- VK_UP = 0x26 // UP ARROW key
- VK_RIGHT = 0x27 // RIGHT ARROW key
- VK_DOWN = 0x28 // DOWN ARROW key
- VK_SELECT = 0x29 // SELECT key
- VK_PRINT = 0x2A // PRINT key
- VK_EXECUTE = 0x2B // EXECUTE key
- VK_SNAPSHOT = 0x2C // PRINT SCREEN key
- VK_INSERT = 0x2D // INS key
- VK_DELETE = 0x2E // DEL key
- VK_HELP = 0x2F // HELP key
- VK_F1 = 0x70 // F1 key
- VK_F2 = 0x71 // F2 key
- VK_F3 = 0x72 // F3 key
- VK_F4 = 0x73 // F4 key
- VK_F5 = 0x74 // F5 key
- VK_F6 = 0x75 // F6 key
- VK_F7 = 0x76 // F7 key
- VK_F8 = 0x77 // F8 key
- VK_F9 = 0x78 // F9 key
- VK_F10 = 0x79 // F10 key
- VK_F11 = 0x7A // F11 key
- VK_F12 = 0x7B // F12 key
-
- RIGHT_ALT_PRESSED = 0x0001
- LEFT_ALT_PRESSED = 0x0002
- RIGHT_CTRL_PRESSED = 0x0004
- LEFT_CTRL_PRESSED = 0x0008
- SHIFT_PRESSED = 0x0010
- NUMLOCK_ON = 0x0020
- SCROLLLOCK_ON = 0x0040
- CAPSLOCK_ON = 0x0080
- ENHANCED_KEY = 0x0100
-)
-
-type ansiCommand struct {
- CommandBytes []byte
- Command string
- Parameters []string
- IsSpecial bool
-}
-
-func newAnsiCommand(command []byte) *ansiCommand {
-
- if isCharacterSelectionCmdChar(command[1]) {
- // Is Character Set Selection commands
- return &ansiCommand{
- CommandBytes: command,
- Command: string(command),
- IsSpecial: true,
- }
- }
-
- // last char is command character
- lastCharIndex := len(command) - 1
-
- ac := &ansiCommand{
- CommandBytes: command,
- Command: string(command[lastCharIndex]),
- IsSpecial: false,
- }
-
- // more than a single escape
- if lastCharIndex != 0 {
- start := 1
- // skip if double char escape sequence
- if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY {
- start++
- }
- // convert this to GetNextParam method
- ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP)
- }
-
- return ac
-}
-
-func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 {
- if index < 0 || index >= len(ac.Parameters) {
- return defaultValue
- }
-
- param, err := strconv.ParseInt(ac.Parameters[index], 10, 16)
- if err != nil {
- return defaultValue
- }
-
- return int16(param)
-}
-
-func (ac *ansiCommand) String() string {
- return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
- bytesToHex(ac.CommandBytes),
- ac.Command,
- strings.Join(ac.Parameters, "\",\""))
-}
-
-// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands.
-// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html.
-func isAnsiCommandChar(b byte) bool {
- switch {
- case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY:
- return true
- case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM:
- // non-CSI escape sequence terminator
- return true
- case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL:
- // String escape sequence terminator
- return true
- }
- return false
-}
-
-func isXtermOscSequence(command []byte, current byte) bool {
- return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL)
-}
-
-func isCharacterSelectionCmdChar(b byte) bool {
- return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3)
-}
-
-// bytesToHex converts a slice of bytes to a human-readable string.
-func bytesToHex(b []byte) string {
- hex := make([]string, len(b))
- for i, ch := range b {
- hex[i] = fmt.Sprintf("%X", ch)
- }
- return strings.Join(hex, "")
-}
-
-// ensureInRange adjusts the passed value, if necessary, to ensure it is within
-// the passed min / max range.
-func ensureInRange(n int16, min int16, max int16) int16 {
- if n < min {
- return min
- } else if n > max {
- return max
- } else {
- return n
- }
-}
-
-func GetStdFile(nFile int) (*os.File, uintptr) {
- var file *os.File
- switch nFile {
- case syscall.STD_INPUT_HANDLE:
- file = os.Stdin
- case syscall.STD_OUTPUT_HANDLE:
- file = os.Stdout
- case syscall.STD_ERROR_HANDLE:
- file = os.Stderr
- default:
- panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile))
- }
-
- fd, err := syscall.GetStdHandle(nFile)
- if err != nil {
- panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err))
- }
-
- return file, uintptr(fd)
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go
deleted file mode 100644
index 6055e33b..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/api.go
+++ /dev/null
@@ -1,327 +0,0 @@
-// +build windows
-
-package winterm
-
-import (
- "fmt"
- "syscall"
- "unsafe"
-)
-
-//===========================================================================================================
-// IMPORTANT NOTE:
-//
-// The methods below make extensive use of the "unsafe" package to obtain the required pointers.
-// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack
-// variables) the pointers reference *before* the API completes.
-//
-// As a result, in those cases, the code must hint that the variables remain in active by invoking the
-// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer
-// require unsafe pointers.
-//
-// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform
-// the garbage collector the variables remain in use if:
-//
-// -- The value is not a pointer (e.g., int32, struct)
-// -- The value is not referenced by the method after passing the pointer to Windows
-//
-// See http://golang.org/doc/go1.3.
-//===========================================================================================================
-
-var (
- kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
-
- getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo")
- setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo")
- setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition")
- setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode")
- getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
- setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize")
- scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA")
- setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
- setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo")
- writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW")
- readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW")
- waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject")
-)
-
-// Windows Console constants
-const (
- // Console modes
- // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
- ENABLE_PROCESSED_INPUT = 0x0001
- ENABLE_LINE_INPUT = 0x0002
- ENABLE_ECHO_INPUT = 0x0004
- ENABLE_WINDOW_INPUT = 0x0008
- ENABLE_MOUSE_INPUT = 0x0010
- ENABLE_INSERT_MODE = 0x0020
- ENABLE_QUICK_EDIT_MODE = 0x0040
- ENABLE_EXTENDED_FLAGS = 0x0080
- ENABLE_AUTO_POSITION = 0x0100
- ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200
-
- ENABLE_PROCESSED_OUTPUT = 0x0001
- ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002
- ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004
- DISABLE_NEWLINE_AUTO_RETURN = 0x0008
- ENABLE_LVB_GRID_WORLDWIDE = 0x0010
-
- // Character attributes
- // Note:
- // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
- // Clearing all foreground or background colors results in black; setting all creates white.
- // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
- FOREGROUND_BLUE uint16 = 0x0001
- FOREGROUND_GREEN uint16 = 0x0002
- FOREGROUND_RED uint16 = 0x0004
- FOREGROUND_INTENSITY uint16 = 0x0008
- FOREGROUND_MASK uint16 = 0x000F
-
- BACKGROUND_BLUE uint16 = 0x0010
- BACKGROUND_GREEN uint16 = 0x0020
- BACKGROUND_RED uint16 = 0x0040
- BACKGROUND_INTENSITY uint16 = 0x0080
- BACKGROUND_MASK uint16 = 0x00F0
-
- COMMON_LVB_MASK uint16 = 0xFF00
- COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000
- COMMON_LVB_UNDERSCORE uint16 = 0x8000
-
- // Input event types
- // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
- KEY_EVENT = 0x0001
- MOUSE_EVENT = 0x0002
- WINDOW_BUFFER_SIZE_EVENT = 0x0004
- MENU_EVENT = 0x0008
- FOCUS_EVENT = 0x0010
-
- // WaitForSingleObject return codes
- WAIT_ABANDONED = 0x00000080
- WAIT_FAILED = 0xFFFFFFFF
- WAIT_SIGNALED = 0x0000000
- WAIT_TIMEOUT = 0x00000102
-
- // WaitForSingleObject wait duration
- WAIT_INFINITE = 0xFFFFFFFF
- WAIT_ONE_SECOND = 1000
- WAIT_HALF_SECOND = 500
- WAIT_QUARTER_SECOND = 250
-)
-
-// Windows API Console types
-// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD)
-// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment
-type (
- CHAR_INFO struct {
- UnicodeChar uint16
- Attributes uint16
- }
-
- CONSOLE_CURSOR_INFO struct {
- Size uint32
- Visible int32
- }
-
- CONSOLE_SCREEN_BUFFER_INFO struct {
- Size COORD
- CursorPosition COORD
- Attributes uint16
- Window SMALL_RECT
- MaximumWindowSize COORD
- }
-
- COORD struct {
- X int16
- Y int16
- }
-
- SMALL_RECT struct {
- Left int16
- Top int16
- Right int16
- Bottom int16
- }
-
- // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest
- // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx.
- INPUT_RECORD struct {
- EventType uint16
- KeyEvent KEY_EVENT_RECORD
- }
-
- KEY_EVENT_RECORD struct {
- KeyDown int32
- RepeatCount uint16
- VirtualKeyCode uint16
- VirtualScanCode uint16
- UnicodeChar uint16
- ControlKeyState uint32
- }
-
- WINDOW_BUFFER_SIZE struct {
- Size COORD
- }
-)
-
-// boolToBOOL converts a Go bool into a Windows int32.
-func boolToBOOL(f bool) int32 {
- if f {
- return int32(1)
- } else {
- return int32(0)
- }
-}
-
-// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx.
-func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
- r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
- return checkError(r1, r2, err)
-}
-
-// SetConsoleCursorInfo sets the size and visiblity of the console cursor.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx.
-func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error {
- r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0)
- return checkError(r1, r2, err)
-}
-
-// SetConsoleCursorPosition location of the console cursor.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx.
-func SetConsoleCursorPosition(handle uintptr, coord COORD) error {
- r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord))
- use(coord)
- return checkError(r1, r2, err)
-}
-
-// GetConsoleMode gets the console mode for given file descriptor
-// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx.
-func GetConsoleMode(handle uintptr) (mode uint32, err error) {
- err = syscall.GetConsoleMode(syscall.Handle(handle), &mode)
- return mode, err
-}
-
-// SetConsoleMode sets the console mode for given file descriptor
-// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx.
-func SetConsoleMode(handle uintptr, mode uint32) error {
- r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0)
- use(mode)
- return checkError(r1, r2, err)
-}
-
-// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
-// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx.
-func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
- info := CONSOLE_SCREEN_BUFFER_INFO{}
- err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0))
- if err != nil {
- return nil, err
- }
- return &info, nil
-}
-
-func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error {
- r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char)))
- use(scrollRect)
- use(clipRect)
- use(destOrigin)
- use(char)
- return checkError(r1, r2, err)
-}
-
-// SetConsoleScreenBufferSize sets the size of the console screen buffer.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx.
-func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error {
- r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord))
- use(coord)
- return checkError(r1, r2, err)
-}
-
-// SetConsoleTextAttribute sets the attributes of characters written to the
-// console screen buffer by the WriteFile or WriteConsole function.
-// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
-func SetConsoleTextAttribute(handle uintptr, attribute uint16) error {
- r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0)
- use(attribute)
- return checkError(r1, r2, err)
-}
-
-// SetConsoleWindowInfo sets the size and position of the console screen buffer's window.
-// Note that the size and location must be within and no larger than the backing console screen buffer.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx.
-func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error {
- r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect)))
- use(isAbsolute)
- use(rect)
- return checkError(r1, r2, err)
-}
-
-// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx.
-func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error {
- r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion)))
- use(buffer)
- use(bufferSize)
- use(bufferCoord)
- return checkError(r1, r2, err)
-}
-
-// ReadConsoleInput reads (and removes) data from the console input buffer.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx.
-func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error {
- r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count)))
- use(buffer)
- return checkError(r1, r2, err)
-}
-
-// WaitForSingleObject waits for the passed handle to be signaled.
-// It returns true if the handle was signaled; false otherwise.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx.
-func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) {
- r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait)))
- switch r1 {
- case WAIT_ABANDONED, WAIT_TIMEOUT:
- return false, nil
- case WAIT_SIGNALED:
- return true, nil
- }
- use(msWait)
- return false, err
-}
-
-// String helpers
-func (info CONSOLE_SCREEN_BUFFER_INFO) String() string {
- return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize)
-}
-
-func (coord COORD) String() string {
- return fmt.Sprintf("%v,%v", coord.X, coord.Y)
-}
-
-func (rect SMALL_RECT) String() string {
- return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom)
-}
-
-// checkError evaluates the results of a Windows API call and returns the error if it failed.
-func checkError(r1, r2 uintptr, err error) error {
- // Windows APIs return non-zero to indicate success
- if r1 != 0 {
- return nil
- }
-
- // Return the error if provided, otherwise default to EINVAL
- if err != nil {
- return err
- }
- return syscall.EINVAL
-}
-
-// coordToPointer converts a COORD into a uintptr (by fooling the type system).
-func coordToPointer(c COORD) uintptr {
- // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass.
- return uintptr(*((*uint32)(unsafe.Pointer(&c))))
-}
-
-// use is a no-op, but the compiler cannot see that it is.
-// Calling use(p) ensures that p is kept live until that point.
-func use(p interface{}) {}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
deleted file mode 100644
index cbec8f72..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go
+++ /dev/null
@@ -1,100 +0,0 @@
-// +build windows
-
-package winterm
-
-import "github.com/Azure/go-ansiterm"
-
-const (
- FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
- BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
-)
-
-// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the
-// request represented by the passed ANSI mode.
-func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) {
- switch ansiMode {
-
- // Mode styles
- case ansiterm.ANSI_SGR_BOLD:
- windowsMode = windowsMode | FOREGROUND_INTENSITY
-
- case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF:
- windowsMode &^= FOREGROUND_INTENSITY
-
- case ansiterm.ANSI_SGR_UNDERLINE:
- windowsMode = windowsMode | COMMON_LVB_UNDERSCORE
-
- case ansiterm.ANSI_SGR_REVERSE:
- inverted = true
-
- case ansiterm.ANSI_SGR_REVERSE_OFF:
- inverted = false
-
- case ansiterm.ANSI_SGR_UNDERLINE_OFF:
- windowsMode &^= COMMON_LVB_UNDERSCORE
-
- // Foreground colors
- case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT:
- windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK)
-
- case ansiterm.ANSI_SGR_FOREGROUND_BLACK:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK)
-
- case ansiterm.ANSI_SGR_FOREGROUND_RED:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED
-
- case ansiterm.ANSI_SGR_FOREGROUND_GREEN:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN
-
- case ansiterm.ANSI_SGR_FOREGROUND_YELLOW:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN
-
- case ansiterm.ANSI_SGR_FOREGROUND_BLUE:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE
-
- case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE
-
- case ansiterm.ANSI_SGR_FOREGROUND_CYAN:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE
-
- case ansiterm.ANSI_SGR_FOREGROUND_WHITE:
- windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE
-
- // Background colors
- case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT:
- // Black with no intensity
- windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK)
-
- case ansiterm.ANSI_SGR_BACKGROUND_BLACK:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK)
-
- case ansiterm.ANSI_SGR_BACKGROUND_RED:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED
-
- case ansiterm.ANSI_SGR_BACKGROUND_GREEN:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN
-
- case ansiterm.ANSI_SGR_BACKGROUND_YELLOW:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN
-
- case ansiterm.ANSI_SGR_BACKGROUND_BLUE:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE
-
- case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE
-
- case ansiterm.ANSI_SGR_BACKGROUND_CYAN:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE
-
- case ansiterm.ANSI_SGR_BACKGROUND_WHITE:
- windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE
- }
-
- return windowsMode, inverted
-}
-
-// invertAttributes inverts the foreground and background colors of a Windows attributes value
-func invertAttributes(windowsMode uint16) uint16 {
- return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4)
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
deleted file mode 100644
index 3ee06ea7..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// +build windows
-
-package winterm
-
-const (
- horizontal = iota
- vertical
-)
-
-func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT {
- if h.originMode {
- sr := h.effectiveSr(info.Window)
- return SMALL_RECT{
- Top: sr.top,
- Bottom: sr.bottom,
- Left: 0,
- Right: info.Size.X - 1,
- }
- } else {
- return SMALL_RECT{
- Top: info.Window.Top,
- Bottom: info.Window.Bottom,
- Left: 0,
- Right: info.Size.X - 1,
- }
- }
-}
-
-// setCursorPosition sets the cursor to the specified position, bounded to the screen size
-func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error {
- position.X = ensureInRange(position.X, window.Left, window.Right)
- position.Y = ensureInRange(position.Y, window.Top, window.Bottom)
- err := SetConsoleCursorPosition(h.fd, position)
- if err != nil {
- return err
- }
- h.logf("Cursor position set: (%d, %d)", position.X, position.Y)
- return err
-}
-
-func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error {
- return h.moveCursor(vertical, param)
-}
-
-func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error {
- return h.moveCursor(horizontal, param)
-}
-
-func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- position := info.CursorPosition
- switch moveMode {
- case horizontal:
- position.X += int16(param)
- case vertical:
- position.Y += int16(param)
- }
-
- if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *windowsAnsiEventHandler) moveCursorLine(param int) error {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- position := info.CursorPosition
- position.X = 0
- position.Y += int16(param)
-
- if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- position := info.CursorPosition
- position.X = int16(param) - 1
-
- if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
deleted file mode 100644
index 244b5fa2..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go
+++ /dev/null
@@ -1,84 +0,0 @@
-// +build windows
-
-package winterm
-
-import "github.com/Azure/go-ansiterm"
-
-func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error {
- // Ignore an invalid (negative area) request
- if toCoord.Y < fromCoord.Y {
- return nil
- }
-
- var err error
-
- var coordStart = COORD{}
- var coordEnd = COORD{}
-
- xCurrent, yCurrent := fromCoord.X, fromCoord.Y
- xEnd, yEnd := toCoord.X, toCoord.Y
-
- // Clear any partial initial line
- if xCurrent > 0 {
- coordStart.X, coordStart.Y = xCurrent, yCurrent
- coordEnd.X, coordEnd.Y = xEnd, yCurrent
-
- err = h.clearRect(attributes, coordStart, coordEnd)
- if err != nil {
- return err
- }
-
- xCurrent = 0
- yCurrent += 1
- }
-
- // Clear intervening rectangular section
- if yCurrent < yEnd {
- coordStart.X, coordStart.Y = xCurrent, yCurrent
- coordEnd.X, coordEnd.Y = xEnd, yEnd-1
-
- err = h.clearRect(attributes, coordStart, coordEnd)
- if err != nil {
- return err
- }
-
- xCurrent = 0
- yCurrent = yEnd
- }
-
- // Clear remaining partial ending line
- coordStart.X, coordStart.Y = xCurrent, yCurrent
- coordEnd.X, coordEnd.Y = xEnd, yEnd
-
- err = h.clearRect(attributes, coordStart, coordEnd)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error {
- region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X}
- width := toCoord.X - fromCoord.X + 1
- height := toCoord.Y - fromCoord.Y + 1
- size := uint32(width) * uint32(height)
-
- if size <= 0 {
- return nil
- }
-
- buffer := make([]CHAR_INFO, size)
-
- char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes}
- for i := 0; i < int(size); i++ {
- buffer[i] = char
- }
-
- err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion)
- if err != nil {
- return err
- }
-
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
deleted file mode 100644
index 2d27fa1d..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go
+++ /dev/null
@@ -1,118 +0,0 @@
-// +build windows
-
-package winterm
-
-// effectiveSr gets the current effective scroll region in buffer coordinates
-func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion {
- top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom)
- bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom)
- if top >= bottom {
- top = window.Top
- bottom = window.Bottom
- }
- return scrollRegion{top: top, bottom: bottom}
-}
-
-func (h *windowsAnsiEventHandler) scrollUp(param int) error {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- sr := h.effectiveSr(info.Window)
- return h.scroll(param, sr, info)
-}
-
-func (h *windowsAnsiEventHandler) scrollDown(param int) error {
- return h.scrollUp(-param)
-}
-
-func (h *windowsAnsiEventHandler) deleteLines(param int) error {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- start := info.CursorPosition.Y
- sr := h.effectiveSr(info.Window)
- // Lines cannot be inserted or deleted outside the scrolling region.
- if start >= sr.top && start <= sr.bottom {
- sr.top = start
- return h.scroll(param, sr, info)
- } else {
- return nil
- }
-}
-
-func (h *windowsAnsiEventHandler) insertLines(param int) error {
- return h.deleteLines(-param)
-}
-
-// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates.
-func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error {
- h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom)
- h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom)
-
- // Copy from and clip to the scroll region (full buffer width)
- scrollRect := SMALL_RECT{
- Top: sr.top,
- Bottom: sr.bottom,
- Left: 0,
- Right: info.Size.X - 1,
- }
-
- // Origin to which area should be copied
- destOrigin := COORD{
- X: 0,
- Y: sr.top - int16(param),
- }
-
- char := CHAR_INFO{
- UnicodeChar: ' ',
- Attributes: h.attributes,
- }
-
- if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
- return err
- }
- return nil
-}
-
-func (h *windowsAnsiEventHandler) deleteCharacters(param int) error {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
- return h.scrollLine(param, info.CursorPosition, info)
-}
-
-func (h *windowsAnsiEventHandler) insertCharacters(param int) error {
- return h.deleteCharacters(-param)
-}
-
-// scrollLine scrolls a line horizontally starting at the provided position by a number of columns.
-func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error {
- // Copy from and clip to the scroll region (full buffer width)
- scrollRect := SMALL_RECT{
- Top: position.Y,
- Bottom: position.Y,
- Left: position.X,
- Right: info.Size.X - 1,
- }
-
- // Origin to which area should be copied
- destOrigin := COORD{
- X: position.X - int16(columns),
- Y: position.Y,
- }
-
- char := CHAR_INFO{
- UnicodeChar: ' ',
- Attributes: h.attributes,
- }
-
- if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil {
- return err
- }
- return nil
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
deleted file mode 100644
index afa7635d..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build windows
-
-package winterm
-
-// AddInRange increments a value by the passed quantity while ensuring the values
-// always remain within the supplied min / max range.
-func addInRange(n int16, increment int16, min int16, max int16) int16 {
- return ensureInRange(n+increment, min, max)
-}
diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
deleted file mode 100644
index 2d40fb75..00000000
--- a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go
+++ /dev/null
@@ -1,743 +0,0 @@
-// +build windows
-
-package winterm
-
-import (
- "bytes"
- "log"
- "os"
- "strconv"
-
- "github.com/Azure/go-ansiterm"
-)
-
-type windowsAnsiEventHandler struct {
- fd uintptr
- file *os.File
- infoReset *CONSOLE_SCREEN_BUFFER_INFO
- sr scrollRegion
- buffer bytes.Buffer
- attributes uint16
- inverted bool
- wrapNext bool
- drewMarginByte bool
- originMode bool
- marginByte byte
- curInfo *CONSOLE_SCREEN_BUFFER_INFO
- curPos COORD
- logf func(string, ...interface{})
-}
-
-type Option func(*windowsAnsiEventHandler)
-
-func WithLogf(f func(string, ...interface{})) Option {
- return func(w *windowsAnsiEventHandler) {
- w.logf = f
- }
-}
-
-func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler {
- infoReset, err := GetConsoleScreenBufferInfo(fd)
- if err != nil {
- return nil
- }
-
- h := &windowsAnsiEventHandler{
- fd: fd,
- file: file,
- infoReset: infoReset,
- attributes: infoReset.Attributes,
- }
- for _, o := range opts {
- o(h)
- }
-
- if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" {
- logFile, _ := os.Create("winEventHandler.log")
- logger := log.New(logFile, "", log.LstdFlags)
- if h.logf != nil {
- l := h.logf
- h.logf = func(s string, v ...interface{}) {
- l(s, v...)
- logger.Printf(s, v...)
- }
- } else {
- h.logf = logger.Printf
- }
- }
-
- if h.logf == nil {
- h.logf = func(string, ...interface{}) {}
- }
-
- return h
-}
-
-type scrollRegion struct {
- top int16
- bottom int16
-}
-
-// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the
-// current cursor position and scroll region settings, in which case it returns
-// true. If no special handling is necessary, then it does nothing and returns
-// false.
-//
-// In the false case, the caller should ensure that a carriage return
-// and line feed are inserted or that the text is otherwise wrapped.
-func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) {
- if h.wrapNext {
- if err := h.Flush(); err != nil {
- return false, err
- }
- h.clearWrap()
- }
- pos, info, err := h.getCurrentInfo()
- if err != nil {
- return false, err
- }
- sr := h.effectiveSr(info.Window)
- if pos.Y == sr.bottom {
- // Scrolling is necessary. Let Windows automatically scroll if the scrolling region
- // is the full window.
- if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom {
- if includeCR {
- pos.X = 0
- h.updatePos(pos)
- }
- return false, nil
- }
-
- // A custom scroll region is active. Scroll the window manually to simulate
- // the LF.
- if err := h.Flush(); err != nil {
- return false, err
- }
- h.logf("Simulating LF inside scroll region")
- if err := h.scrollUp(1); err != nil {
- return false, err
- }
- if includeCR {
- pos.X = 0
- if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
- return false, err
- }
- }
- return true, nil
-
- } else if pos.Y < info.Window.Bottom {
- // Let Windows handle the LF.
- pos.Y++
- if includeCR {
- pos.X = 0
- }
- h.updatePos(pos)
- return false, nil
- } else {
- // The cursor is at the bottom of the screen but outside the scroll
- // region. Skip the LF.
- h.logf("Simulating LF outside scroll region")
- if includeCR {
- if err := h.Flush(); err != nil {
- return false, err
- }
- pos.X = 0
- if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
- return false, err
- }
- }
- return true, nil
- }
-}
-
-// executeLF executes a LF without a CR.
-func (h *windowsAnsiEventHandler) executeLF() error {
- handled, err := h.simulateLF(false)
- if err != nil {
- return err
- }
- if !handled {
- // Windows LF will reset the cursor column position. Write the LF
- // and restore the cursor position.
- pos, _, err := h.getCurrentInfo()
- if err != nil {
- return err
- }
- h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
- if pos.X != 0 {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("Resetting cursor position for LF without CR")
- if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (h *windowsAnsiEventHandler) Print(b byte) error {
- if h.wrapNext {
- h.buffer.WriteByte(h.marginByte)
- h.clearWrap()
- if _, err := h.simulateLF(true); err != nil {
- return err
- }
- }
- pos, info, err := h.getCurrentInfo()
- if err != nil {
- return err
- }
- if pos.X == info.Size.X-1 {
- h.wrapNext = true
- h.marginByte = b
- } else {
- pos.X++
- h.updatePos(pos)
- h.buffer.WriteByte(b)
- }
- return nil
-}
-
-func (h *windowsAnsiEventHandler) Execute(b byte) error {
- switch b {
- case ansiterm.ANSI_TAB:
- h.logf("Execute(TAB)")
- // Move to the next tab stop, but preserve auto-wrap if already set.
- if !h.wrapNext {
- pos, info, err := h.getCurrentInfo()
- if err != nil {
- return err
- }
- pos.X = (pos.X + 8) - pos.X%8
- if pos.X >= info.Size.X {
- pos.X = info.Size.X - 1
- }
- if err := h.Flush(); err != nil {
- return err
- }
- if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
- return err
- }
- }
- return nil
-
- case ansiterm.ANSI_BEL:
- h.buffer.WriteByte(ansiterm.ANSI_BEL)
- return nil
-
- case ansiterm.ANSI_BACKSPACE:
- if h.wrapNext {
- if err := h.Flush(); err != nil {
- return err
- }
- h.clearWrap()
- }
- pos, _, err := h.getCurrentInfo()
- if err != nil {
- return err
- }
- if pos.X > 0 {
- pos.X--
- h.updatePos(pos)
- h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE)
- }
- return nil
-
- case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED:
- // Treat as true LF.
- return h.executeLF()
-
- case ansiterm.ANSI_LINE_FEED:
- // Simulate a CR and LF for now since there is no way in go-ansiterm
- // to tell if the LF should include CR (and more things break when it's
- // missing than when it's incorrectly added).
- handled, err := h.simulateLF(true)
- if handled || err != nil {
- return err
- }
- return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED)
-
- case ansiterm.ANSI_CARRIAGE_RETURN:
- if h.wrapNext {
- if err := h.Flush(); err != nil {
- return err
- }
- h.clearWrap()
- }
- pos, _, err := h.getCurrentInfo()
- if err != nil {
- return err
- }
- if pos.X != 0 {
- pos.X = 0
- h.updatePos(pos)
- h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN)
- }
- return nil
-
- default:
- return nil
- }
-}
-
-func (h *windowsAnsiEventHandler) CUU(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CUU: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorVertical(-param)
-}
-
-func (h *windowsAnsiEventHandler) CUD(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CUD: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorVertical(param)
-}
-
-func (h *windowsAnsiEventHandler) CUF(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CUF: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorHorizontal(param)
-}
-
-func (h *windowsAnsiEventHandler) CUB(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CUB: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorHorizontal(-param)
-}
-
-func (h *windowsAnsiEventHandler) CNL(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CNL: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorLine(param)
-}
-
-func (h *windowsAnsiEventHandler) CPL(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CPL: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorLine(-param)
-}
-
-func (h *windowsAnsiEventHandler) CHA(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CHA: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.moveCursorColumn(param)
-}
-
-func (h *windowsAnsiEventHandler) VPA(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("VPA: [[%d]]", param)
- h.clearWrap()
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
- window := h.getCursorWindow(info)
- position := info.CursorPosition
- position.Y = window.Top + int16(param) - 1
- return h.setCursorPosition(position, window)
-}
-
-func (h *windowsAnsiEventHandler) CUP(row int, col int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("CUP: [[%d %d]]", row, col)
- h.clearWrap()
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- window := h.getCursorWindow(info)
- position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1}
- return h.setCursorPosition(position, window)
-}
-
-func (h *windowsAnsiEventHandler) HVP(row int, col int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("HVP: [[%d %d]]", row, col)
- h.clearWrap()
- return h.CUP(row, col)
-}
-
-func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)})
- h.clearWrap()
- return nil
-}
-
-func (h *windowsAnsiEventHandler) DECOM(enable bool) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)})
- h.clearWrap()
- h.originMode = enable
- return h.CUP(1, 1)
-}
-
-func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)})
- h.clearWrap()
- if err := h.ED(2); err != nil {
- return err
- }
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
- targetWidth := int16(80)
- if use132 {
- targetWidth = 132
- }
- if info.Size.X < targetWidth {
- if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
- h.logf("set buffer failed: %v", err)
- return err
- }
- }
- window := info.Window
- window.Left = 0
- window.Right = targetWidth - 1
- if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
- h.logf("set window failed: %v", err)
- return err
- }
- if info.Size.X > targetWidth {
- if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil {
- h.logf("set buffer failed: %v", err)
- return err
- }
- }
- return SetConsoleCursorPosition(h.fd, COORD{0, 0})
-}
-
-func (h *windowsAnsiEventHandler) ED(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("ED: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
-
- // [J -- Erases from the cursor to the end of the screen, including the cursor position.
- // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position.
- // [2J -- Erases the complete display. The cursor does not move.
- // Notes:
- // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles
-
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- var start COORD
- var end COORD
-
- switch param {
- case 0:
- start = info.CursorPosition
- end = COORD{info.Size.X - 1, info.Size.Y - 1}
-
- case 1:
- start = COORD{0, 0}
- end = info.CursorPosition
-
- case 2:
- start = COORD{0, 0}
- end = COORD{info.Size.X - 1, info.Size.Y - 1}
- }
-
- err = h.clearRange(h.attributes, start, end)
- if err != nil {
- return err
- }
-
- // If the whole buffer was cleared, move the window to the top while preserving
- // the window-relative cursor position.
- if param == 2 {
- pos := info.CursorPosition
- window := info.Window
- pos.Y -= window.Top
- window.Bottom -= window.Top
- window.Top = 0
- if err := SetConsoleCursorPosition(h.fd, pos); err != nil {
- return err
- }
- if err := SetConsoleWindowInfo(h.fd, true, window); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (h *windowsAnsiEventHandler) EL(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("EL: [%v]", strconv.Itoa(param))
- h.clearWrap()
-
- // [K -- Erases from the cursor to the end of the line, including the cursor position.
- // [1K -- Erases from the beginning of the line to the cursor, including the cursor position.
- // [2K -- Erases the complete line.
-
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- var start COORD
- var end COORD
-
- switch param {
- case 0:
- start = info.CursorPosition
- end = COORD{info.Size.X, info.CursorPosition.Y}
-
- case 1:
- start = COORD{0, info.CursorPosition.Y}
- end = info.CursorPosition
-
- case 2:
- start = COORD{0, info.CursorPosition.Y}
- end = COORD{info.Size.X, info.CursorPosition.Y}
- }
-
- err = h.clearRange(h.attributes, start, end)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *windowsAnsiEventHandler) IL(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("IL: [%v]", strconv.Itoa(param))
- h.clearWrap()
- return h.insertLines(param)
-}
-
-func (h *windowsAnsiEventHandler) DL(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("DL: [%v]", strconv.Itoa(param))
- h.clearWrap()
- return h.deleteLines(param)
-}
-
-func (h *windowsAnsiEventHandler) ICH(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("ICH: [%v]", strconv.Itoa(param))
- h.clearWrap()
- return h.insertCharacters(param)
-}
-
-func (h *windowsAnsiEventHandler) DCH(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("DCH: [%v]", strconv.Itoa(param))
- h.clearWrap()
- return h.deleteCharacters(param)
-}
-
-func (h *windowsAnsiEventHandler) SGR(params []int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- strings := []string{}
- for _, v := range params {
- strings = append(strings, strconv.Itoa(v))
- }
-
- h.logf("SGR: [%v]", strings)
-
- if len(params) <= 0 {
- h.attributes = h.infoReset.Attributes
- h.inverted = false
- } else {
- for _, attr := range params {
-
- if attr == ansiterm.ANSI_SGR_RESET {
- h.attributes = h.infoReset.Attributes
- h.inverted = false
- continue
- }
-
- h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr))
- }
- }
-
- attributes := h.attributes
- if h.inverted {
- attributes = invertAttributes(attributes)
- }
- err := SetConsoleTextAttribute(h.fd, attributes)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (h *windowsAnsiEventHandler) SU(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("SU: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.scrollUp(param)
-}
-
-func (h *windowsAnsiEventHandler) SD(param int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("SD: [%v]", []string{strconv.Itoa(param)})
- h.clearWrap()
- return h.scrollDown(param)
-}
-
-func (h *windowsAnsiEventHandler) DA(params []string) error {
- h.logf("DA: [%v]", params)
- // DA cannot be implemented because it must send data on the VT100 input stream,
- // which is not available to go-ansiterm.
- return nil
-}
-
-func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("DECSTBM: [%d, %d]", top, bottom)
-
- // Windows is 0 indexed, Linux is 1 indexed
- h.sr.top = int16(top - 1)
- h.sr.bottom = int16(bottom - 1)
-
- // This command also moves the cursor to the origin.
- h.clearWrap()
- return h.CUP(1, 1)
-}
-
-func (h *windowsAnsiEventHandler) RI() error {
- if err := h.Flush(); err != nil {
- return err
- }
- h.logf("RI: []")
- h.clearWrap()
-
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- sr := h.effectiveSr(info.Window)
- if info.CursorPosition.Y == sr.top {
- return h.scrollDown(1)
- }
-
- return h.moveCursorVertical(-1)
-}
-
-func (h *windowsAnsiEventHandler) IND() error {
- h.logf("IND: []")
- return h.executeLF()
-}
-
-func (h *windowsAnsiEventHandler) Flush() error {
- h.curInfo = nil
- if h.buffer.Len() > 0 {
- h.logf("Flush: [%s]", h.buffer.Bytes())
- if _, err := h.buffer.WriteTo(h.file); err != nil {
- return err
- }
- }
-
- if h.wrapNext && !h.drewMarginByte {
- h.logf("Flush: drawing margin byte '%c'", h.marginByte)
-
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return err
- }
-
- charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}}
- size := COORD{1, 1}
- position := COORD{0, 0}
- region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y}
- if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil {
- return err
- }
- h.drewMarginByte = true
- }
- return nil
-}
-
-// cacheConsoleInfo ensures that the current console screen information has been queried
-// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos.
-func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) {
- if h.curInfo == nil {
- info, err := GetConsoleScreenBufferInfo(h.fd)
- if err != nil {
- return COORD{}, nil, err
- }
- h.curInfo = info
- h.curPos = info.CursorPosition
- }
- return h.curPos, h.curInfo, nil
-}
-
-func (h *windowsAnsiEventHandler) updatePos(pos COORD) {
- if h.curInfo == nil {
- panic("failed to call getCurrentInfo before calling updatePos")
- }
- h.curPos = pos
-}
-
-// clearWrap clears the state where the cursor is in the margin
-// waiting for the next character before wrapping the line. This must
-// be done before most operations that act on the cursor.
-func (h *windowsAnsiEventHandler) clearWrap() {
- h.wrapNext = false
- h.drewMarginByte = false
-}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
index bee5e61d..8c83a917 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go
@@ -19,10 +19,6 @@ import (
"net/url"
)
-const (
- activeDirectoryAPIVersion = "1.0"
-)
-
// OAuthConfig represents the endpoints needed
// in OAuth operations
type OAuthConfig struct {
@@ -46,11 +42,25 @@ func validateStringParam(param, name string) error {
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
+ apiVer := "1.0"
+ return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
+}
+
+// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
+// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
+func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
return nil, err
}
+ api := ""
// it's legal for tenantID to be empty so don't validate it
- const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
+ if apiVersion != nil {
+ if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
+ return nil, err
+ }
+ api = fmt.Sprintf("?api-version=%s", *apiVersion)
+ }
+ const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
u, err := url.Parse(activeDirectoryEndpoint)
if err != nil {
return nil, err
@@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
if err != nil {
return nil, err
}
- authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
+ authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
if err != nil {
return nil, err
}
- tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
+ tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
if err != nil {
return nil, err
}
- deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
+ deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
index 0e5ad14d..834401e0 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go
@@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
-// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
+// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
index 32aea838..effa87ab 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go
@@ -29,13 +29,12 @@ import (
"net"
"net/http"
"net/url"
- "strconv"
"strings"
"sync"
"time"
"github.com/Azure/go-autorest/autorest/date"
- "github.com/Azure/go-autorest/version"
+ "github.com/Azure/go-autorest/tracing"
"github.com/dgrijalva/jwt-go"
)
@@ -97,18 +96,27 @@ type RefresherWithContext interface {
type TokenRefreshCallback func(Token) error
// Token encapsulates the access token used to authorize Azure requests.
+// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
type Token struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
- ExpiresIn string `json:"expires_in"`
- ExpiresOn string `json:"expires_on"`
- NotBefore string `json:"not_before"`
+ ExpiresIn json.Number `json:"expires_in"`
+ ExpiresOn json.Number `json:"expires_on"`
+ NotBefore json.Number `json:"not_before"`
Resource string `json:"resource"`
Type string `json:"token_type"`
}
+func newToken() Token {
+ return Token{
+ ExpiresIn: "0",
+ ExpiresOn: "0",
+ NotBefore: "0",
+ }
+}
+
// IsZero returns true if the token object is zero-initialized.
func (t Token) IsZero() bool {
return t == Token{}
@@ -116,12 +124,12 @@ func (t Token) IsZero() bool {
// Expires returns the time.Time when the Token expires.
func (t Token) Expires() time.Time {
- s, err := strconv.Atoi(t.ExpiresOn)
+ s, err := t.ExpiresOn.Float64()
if err != nil {
s = -3600
}
- expiration := date.NewUnixTimeFromSeconds(float64(s))
+ expiration := date.NewUnixTimeFromSeconds(s)
return time.Time(expiration).UTC()
}
@@ -218,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
token := jwt.New(jwt.SigningMethodRS256)
token.Header["x5t"] = thumbprint
+ x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
+ token.Header["x5c"] = x5c
token.Claims = jwt.MapClaims{
"aud": spt.inner.OauthConfig.TokenEndpoint.String(),
"iss": spt.inner.ClientID,
@@ -375,8 +385,13 @@ func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error {
if err != nil {
return err
}
- spt.refreshLock = &sync.RWMutex{}
- spt.sender = &http.Client{}
+ // Don't override the refreshLock or the sender if those have been already set.
+ if spt.refreshLock == nil {
+ spt.refreshLock = &sync.RWMutex{}
+ }
+ if spt.sender == nil {
+ spt.sender = &http.Client{Transport: tracing.Transport}
+ }
return nil
}
@@ -414,6 +429,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
}
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
+ Token: newToken(),
OauthConfig: oauthConfig,
Secret: secret,
ClientID: id,
@@ -422,7 +438,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
- sender: &http.Client{},
+ sender: &http.Client{Transport: tracing.Transport},
refreshCallbacks: callbacks,
}
return spt, nil
@@ -653,6 +669,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
spt := &ServicePrincipalToken{
inner: servicePrincipalToken{
+ Token: newToken(),
OauthConfig: OAuthConfig{
TokenEndpoint: *msiEndpointURL,
},
@@ -662,7 +679,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
RefreshWithin: defaultRefresh,
},
refreshLock: &sync.RWMutex{},
- sender: &http.Client{},
+ sender: &http.Client{Transport: tracing.Transport},
refreshCallbacks: callbacks,
MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts,
}
@@ -779,7 +796,7 @@ func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource
if err != nil {
return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err)
}
- req.Header.Add("User-Agent", version.UserAgent())
+ req.Header.Add("User-Agent", UserAgent())
req = req.WithContext(ctx)
if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) {
v := url.Values{}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/version.go b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go
new file mode 100644
index 00000000..c867b348
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/adal/version.go
@@ -0,0 +1,45 @@
+package adal
+
+import (
+ "fmt"
+ "runtime"
+)
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+const number = "v1.0.0"
+
+var (
+ ua = fmt.Sprintf("Go/%s (%s-%s) go-autorest/adal/%s",
+ runtime.Version(),
+ runtime.GOARCH,
+ runtime.GOOS,
+ number,
+ )
+)
+
+// UserAgent returns a string containing the Go version, system architecture and OS, and the adal version.
+func UserAgent() string {
+ return ua
+}
+
+// AddToUserAgent adds an extension to the current user agent
+func AddToUserAgent(extension string) error {
+ if extension != "" {
+ ua = fmt.Sprintf("%s %s", ua, extension)
+ return nil
+ }
+ return fmt.Errorf("Extension was empty, User Agent remained as '%s'", ua)
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
index 77eff45b..2e24b4b3 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/authorization.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go
@@ -15,12 +15,14 @@ package autorest
// limitations under the License.
import (
+ "encoding/base64"
"fmt"
"net/http"
"net/url"
"strings"
"github.com/Azure/go-autorest/autorest/adal"
+ "github.com/Azure/go-autorest/tracing"
)
const (
@@ -30,6 +32,8 @@ const (
apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key"
bingAPISdkHeader = "X-BingApis-SDK-Client"
golangBingAPISdkHeaderValue = "Go-SDK"
+ authorization = "Authorization"
+ basic = "Basic"
)
// Authorizer is the interface that provides a PrepareDecorator used to supply request
@@ -68,7 +72,7 @@ func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[str
return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters}
}
-// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters
+// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Parameters.
func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters))
@@ -147,7 +151,7 @@ type BearerAuthorizerCallback struct {
// is invoked when the HTTP request is submitted.
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
if sender == nil {
- sender = &http.Client{}
+ sender = &http.Client{Transport: tracing.Transport}
}
return &BearerAuthorizerCallback{sender: sender, callback: callback}
}
@@ -257,3 +261,27 @@ func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator {
}
return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
}
+
+// BasicAuthorizer implements basic HTTP authorization by adding the Authorization HTTP header
+// with the value "Basic " where is a base64-encoded username:password tuple.
+type BasicAuthorizer struct {
+ userName string
+ password string
+}
+
+// NewBasicAuthorizer creates a new BasicAuthorizer with the specified username and password.
+func NewBasicAuthorizer(userName, password string) *BasicAuthorizer {
+ return &BasicAuthorizer{
+ userName: userName,
+ password: password,
+ }
+}
+
+// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose
+// value is "Basic " followed by the base64-encoded username:password tuple.
+func (ba *BasicAuthorizer) WithAuthorization() PrepareDecorator {
+ headers := make(map[string]interface{})
+ headers[authorization] = basic + " " + base64.StdEncoding.EncodeToString([]byte(fmt.Sprintf("%s:%s", ba.userName, ba.password)))
+
+ return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization()
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
index 9dd7a1d2..02d01196 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go
@@ -26,6 +26,7 @@ import (
"time"
"github.com/Azure/go-autorest/autorest"
+ "github.com/Azure/go-autorest/tracing"
)
const (
@@ -44,24 +45,14 @@ var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.Stat
// Future provides a mechanism to access the status and results of an asynchronous request.
// Since futures are stateful they should be passed by value to avoid race conditions.
type Future struct {
- req *http.Request // legacy
- pt pollingTracker
-}
-
-// NewFuture returns a new Future object initialized with the specified request.
-// Deprecated: Please use NewFutureFromResponse instead.
-func NewFuture(req *http.Request) Future {
- return Future{req: req}
+ pt pollingTracker
}
// NewFutureFromResponse returns a new Future object initialized
// with the initial response from an asynchronous operation.
func NewFutureFromResponse(resp *http.Response) (Future, error) {
pt, err := createPollingTracker(resp)
- if err != nil {
- return Future{}, err
- }
- return Future{pt: pt}, nil
+ return Future{pt: pt}, err
}
// Response returns the last HTTP response.
@@ -88,29 +79,25 @@ func (f Future) PollingMethod() PollingMethodType {
return f.pt.pollingMethod()
}
-// Done queries the service to see if the operation has completed.
-func (f *Future) Done(sender autorest.Sender) (bool, error) {
- // support for legacy Future implementation
- if f.req != nil {
- resp, err := sender.Do(f.req)
- if err != nil {
- return false, err
- }
- pt, err := createPollingTracker(resp)
- if err != nil {
- return false, err
+// DoneWithContext queries the service to see if the operation has completed.
+func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
+ ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.DoneWithContext")
+ defer func() {
+ sc := -1
+ resp := f.Response()
+ if resp != nil {
+ sc = resp.StatusCode
}
- f.pt = pt
- f.req = nil
- }
- // end legacy
+ tracing.EndSpan(ctx, sc, err)
+ }()
+
if f.pt == nil {
return false, autorest.NewError("Future", "Done", "future is not initialized")
}
if f.pt.hasTerminated() {
return true, f.pt.pollingError()
}
- if err := f.pt.pollForStatus(sender); err != nil {
+ if err := f.pt.pollForStatus(ctx, sender); err != nil {
return false, err
}
if err := f.pt.checkForErrors(); err != nil {
@@ -154,24 +141,35 @@ func (f Future) GetPollingDelay() (time.Duration, bool) {
return d, true
}
-// WaitForCompletion will return when one of the following conditions is met: the long
-// running operation has completed, the provided context is cancelled, or the client's
-// polling duration has been exceeded. It will retry failed polling attempts based on
-// the retry value defined in the client up to the maximum retry attempts.
-// Deprecated: Please use WaitForCompletionRef() instead.
-func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error {
- return f.WaitForCompletionRef(ctx, client)
-}
-
// WaitForCompletionRef will return when one of the following conditions is met: the long
// running operation has completed, the provided context is cancelled, or the client's
// polling duration has been exceeded. It will retry failed polling attempts based on
// the retry value defined in the client up to the maximum retry attempts.
-func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error {
- ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
- defer cancel()
- done, err := f.Done(client)
- for attempts := 0; !done; done, err = f.Done(client) {
+// If no deadline is specified in the context then the client.PollingDuration will be
+// used to determine if a default deadline should be used.
+// If PollingDuration is greater than zero the value will be used as the context's timeout.
+// If PollingDuration is zero then no default deadline will be used.
+func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
+ ctx = tracing.StartSpan(ctx, "github.com/Azure/go-autorest/autorest/azure/async.WaitForCompletionRef")
+ defer func() {
+ sc := -1
+ resp := f.Response()
+ if resp != nil {
+ sc = resp.StatusCode
+ }
+ tracing.EndSpan(ctx, sc, err)
+ }()
+ cancelCtx := ctx
+ // if the provided context already has a deadline don't override it
+ _, hasDeadline := ctx.Deadline()
+ if d := client.PollingDuration; !hasDeadline && d != 0 {
+ var cancel context.CancelFunc
+ cancelCtx, cancel = context.WithTimeout(ctx, d)
+ defer cancel()
+ }
+
+ done, err := f.DoneWithContext(ctx, client)
+ for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
if attempts >= client.RetryAttempts {
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
}
@@ -195,12 +193,12 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
attempts++
}
// wait until the delay elapses or the context is cancelled
- delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
+ delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
if !delayElapsed {
- return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
+ return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
}
}
- return err
+ return
}
// MarshalJSON implements the json.Marshaler interface.
@@ -285,7 +283,7 @@ type pollingTracker interface {
initializeState() error
// makes an HTTP request to check the status of the LRO
- pollForStatus(sender autorest.Sender) error
+ pollForStatus(ctx context.Context, sender autorest.Sender) error
// updates internal tracker state, call this after each call to pollForStatus
updatePollingState(provStateApl bool) error
@@ -399,6 +397,10 @@ func (pt *pollingTrackerBase) updateRawBody() error {
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
}
+ // observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
+ if len(b) == 0 {
+ return nil
+ }
// put the body back so it's available to other callers
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
@@ -408,15 +410,13 @@ func (pt *pollingTrackerBase) updateRawBody() error {
return nil
}
-func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
+func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
}
- // attach the context from the original request if available (it will be absent for deserialized futures)
- if pt.resp != nil {
- req = req.WithContext(pt.resp.Request.Context())
- }
+
+ req = req.WithContext(ctx)
pt.resp, err = sender.Do(req)
if err != nil {
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
@@ -445,7 +445,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
re := respErr{}
defer pt.resp.Body.Close()
var b []byte
- if b, err = ioutil.ReadAll(pt.resp.Body); err != nil {
+ if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
goto Default
}
if err = json.Unmarshal(b, &re); err != nil {
@@ -663,7 +663,7 @@ func (pt *pollingTrackerPatch) updatePollingMethod() error {
}
}
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
- // note the absense of the "final GET" mechanism for PATCH
+ // note the absence of the "final GET" mechanism for PATCH
if pt.resp.StatusCode == http.StatusAccepted {
ao, err := getURLFromAsyncOpHeader(pt.resp)
if err != nil {
@@ -794,8 +794,6 @@ func (pt *pollingTrackerPut) updatePollingMethod() error {
pt.URI = lh
pt.Pm = PollingLocation
}
- // when both headers are returned we use the value in the Location header for the final GET
- pt.FinalGetURI = lh
}
// make sure a polling URL was found
if pt.URI == "" {
@@ -885,43 +883,6 @@ func isValidURL(s string) bool {
return err == nil && u.IsAbs()
}
-// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure
-// long-running operation. It will delay between requests for the duration specified in the
-// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via
-// the context associated with the http.Request.
-// Deprecated: Prefer using Futures to allow for non-blocking async operations.
-func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator {
- return func(s autorest.Sender) autorest.Sender {
- return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
- resp, err := s.Do(r)
- if err != nil {
- return resp, err
- }
- if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) {
- return resp, nil
- }
- future, err := NewFutureFromResponse(resp)
- if err != nil {
- return resp, err
- }
- // retry until either the LRO completes or we receive an error
- var done bool
- for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) {
- // check for Retry-After delay, if not present use the specified polling delay
- if pd, ok := future.GetPollingDelay(); ok {
- delay = pd
- }
- // wait until the delay elapses or the context is cancelled
- if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed {
- return future.Response(),
- autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled")
- }
- }
- return future.Response(), err
- })
- }
-}
-
// PollingMethodType defines a type used for enumerating polling mechanisms.
type PollingMethodType string
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
index a14b8790..20855d4a 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go
@@ -31,26 +31,42 @@ import (
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/adal"
"github.com/Azure/go-autorest/autorest/azure"
+ "github.com/Azure/go-autorest/autorest/azure/cli"
"github.com/dimchansky/utfbom"
"golang.org/x/crypto/pkcs12"
)
+// The possible keys in the Values map.
+const (
+ SubscriptionID = "AZURE_SUBSCRIPTION_ID"
+ TenantID = "AZURE_TENANT_ID"
+ ClientID = "AZURE_CLIENT_ID"
+ ClientSecret = "AZURE_CLIENT_SECRET"
+ CertificatePath = "AZURE_CERTIFICATE_PATH"
+ CertificatePassword = "AZURE_CERTIFICATE_PASSWORD"
+ Username = "AZURE_USERNAME"
+ Password = "AZURE_PASSWORD"
+ EnvironmentName = "AZURE_ENVIRONMENT"
+ Resource = "AZURE_AD_RESOURCE"
+ ActiveDirectoryEndpoint = "ActiveDirectoryEndpoint"
+ ResourceManagerEndpoint = "ResourceManagerEndpoint"
+ GraphResourceID = "GraphResourceID"
+ SQLManagementEndpoint = "SQLManagementEndpoint"
+ GalleryEndpoint = "GalleryEndpoint"
+ ManagementEndpoint = "ManagementEndpoint"
+)
+
// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order:
// 1. Client credentials
// 2. Client certificate
// 3. Username password
// 4. MSI
func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
- settings, err := getAuthenticationSettings()
+ settings, err := GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
-
- if settings.resource == "" {
- settings.resource = settings.environment.ResourceManagerEndpoint
- }
-
- return settings.getAuthorizer()
+ return settings.GetAuthorizer()
}
// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order:
@@ -59,134 +75,364 @@ func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) {
// 3. Username password
// 4. MSI
func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) {
- settings, err := getAuthenticationSettings()
+ settings, err := GetSettingsFromEnvironment()
if err != nil {
return nil, err
}
- settings.resource = resource
- return settings.getAuthorizer()
-}
-
-type settings struct {
- tenantID string
- clientID string
- clientSecret string
- certificatePath string
- certificatePassword string
- username string
- password string
- envName string
- resource string
- environment azure.Environment
-}
-
-func getAuthenticationSettings() (s settings, err error) {
- s = settings{
- tenantID: os.Getenv("AZURE_TENANT_ID"),
- clientID: os.Getenv("AZURE_CLIENT_ID"),
- clientSecret: os.Getenv("AZURE_CLIENT_SECRET"),
- certificatePath: os.Getenv("AZURE_CERTIFICATE_PATH"),
- certificatePassword: os.Getenv("AZURE_CERTIFICATE_PASSWORD"),
- username: os.Getenv("AZURE_USERNAME"),
- password: os.Getenv("AZURE_PASSWORD"),
- envName: os.Getenv("AZURE_ENVIRONMENT"),
- resource: os.Getenv("AZURE_AD_RESOURCE"),
- }
-
- if s.envName == "" {
- s.environment = azure.PublicCloud
+ settings.Values[Resource] = resource
+ return settings.GetAuthorizer()
+}
+
+// EnvironmentSettings contains the available authentication settings.
+type EnvironmentSettings struct {
+ Values map[string]string
+ Environment azure.Environment
+}
+
+// GetSettingsFromEnvironment returns the available authentication settings from the environment.
+func GetSettingsFromEnvironment() (s EnvironmentSettings, err error) {
+ s = EnvironmentSettings{
+ Values: map[string]string{},
+ }
+ s.setValue(SubscriptionID)
+ s.setValue(TenantID)
+ s.setValue(ClientID)
+ s.setValue(ClientSecret)
+ s.setValue(CertificatePath)
+ s.setValue(CertificatePassword)
+ s.setValue(Username)
+ s.setValue(Password)
+ s.setValue(EnvironmentName)
+ s.setValue(Resource)
+ if v := s.Values[EnvironmentName]; v == "" {
+ s.Environment = azure.PublicCloud
} else {
- s.environment, err = azure.EnvironmentFromName(s.envName)
+ s.Environment, err = azure.EnvironmentFromName(v)
+ }
+ if s.Values[Resource] == "" {
+ s.Values[Resource] = s.Environment.ResourceManagerEndpoint
}
return
}
-func (settings settings) getAuthorizer() (autorest.Authorizer, error) {
+// GetSubscriptionID returns the available subscription ID or an empty string.
+func (settings EnvironmentSettings) GetSubscriptionID() string {
+ return settings.Values[SubscriptionID]
+}
+
+// adds the specified environment variable value to the Values map if it exists
+func (settings EnvironmentSettings) setValue(key string) {
+ if v := os.Getenv(key); v != "" {
+ settings.Values[key] = v
+ }
+}
+
+// helper to return client and tenant IDs
+func (settings EnvironmentSettings) getClientAndTenant() (string, string) {
+ clientID := settings.Values[ClientID]
+ tenantID := settings.Values[TenantID]
+ return clientID, tenantID
+}
+
+// GetClientCredentials creates a config object from the available client credentials.
+// An error is returned if no client credentials are available.
+func (settings EnvironmentSettings) GetClientCredentials() (ClientCredentialsConfig, error) {
+ secret := settings.Values[ClientSecret]
+ if secret == "" {
+ return ClientCredentialsConfig{}, errors.New("missing client secret")
+ }
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewClientCredentialsConfig(clientID, secret, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config, nil
+}
+
+// GetClientCertificate creates a config object from the available certificate credentials.
+// An error is returned if no certificate credentials are available.
+func (settings EnvironmentSettings) GetClientCertificate() (ClientCertificateConfig, error) {
+ certPath := settings.Values[CertificatePath]
+ if certPath == "" {
+ return ClientCertificateConfig{}, errors.New("missing certificate path")
+ }
+ certPwd := settings.Values[CertificatePassword]
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewClientCertificateConfig(certPath, certPwd, clientID, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config, nil
+}
+
+// GetUsernamePassword creates a config object from the available username/password credentials.
+// An error is returned if no username/password credentials are available.
+func (settings EnvironmentSettings) GetUsernamePassword() (UsernamePasswordConfig, error) {
+ username := settings.Values[Username]
+ password := settings.Values[Password]
+ if username == "" || password == "" {
+ return UsernamePasswordConfig{}, errors.New("missing username/password")
+ }
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewUsernamePasswordConfig(username, password, clientID, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config, nil
+}
+
+// GetMSI creates a MSI config object from the available client ID.
+func (settings EnvironmentSettings) GetMSI() MSIConfig {
+ config := NewMSIConfig()
+ config.Resource = settings.Values[Resource]
+ config.ClientID = settings.Values[ClientID]
+ return config
+}
+
+// GetDeviceFlow creates a device-flow config object from the available client and tenant IDs.
+func (settings EnvironmentSettings) GetDeviceFlow() DeviceFlowConfig {
+ clientID, tenantID := settings.getClientAndTenant()
+ config := NewDeviceFlowConfig(clientID, tenantID)
+ config.AADEndpoint = settings.Environment.ActiveDirectoryEndpoint
+ config.Resource = settings.Values[Resource]
+ return config
+}
+
+// GetAuthorizer creates an Authorizer configured from environment variables in the order:
+// 1. Client credentials
+// 2. Client certificate
+// 3. Username password
+// 4. MSI
+func (settings EnvironmentSettings) GetAuthorizer() (autorest.Authorizer, error) {
//1.Client Credentials
- if settings.clientSecret != "" {
- config := NewClientCredentialsConfig(settings.clientID, settings.clientSecret, settings.tenantID)
- config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint
- config.Resource = settings.resource
- return config.Authorizer()
+ if c, e := settings.GetClientCredentials(); e == nil {
+ return c.Authorizer()
}
//2. Client Certificate
- if settings.certificatePath != "" {
- config := NewClientCertificateConfig(settings.certificatePath, settings.certificatePassword, settings.clientID, settings.tenantID)
- config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint
- config.Resource = settings.resource
- return config.Authorizer()
+ if c, e := settings.GetClientCertificate(); e == nil {
+ return c.Authorizer()
}
//3. Username Password
- if settings.username != "" && settings.password != "" {
- config := NewUsernamePasswordConfig(settings.username, settings.password, settings.clientID, settings.tenantID)
- config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint
- config.Resource = settings.resource
- return config.Authorizer()
+ if c, e := settings.GetUsernamePassword(); e == nil {
+ return c.Authorizer()
}
// 4. MSI
- config := NewMSIConfig()
- config.Resource = settings.resource
- config.ClientID = settings.clientID
- return config.Authorizer()
+ return settings.GetMSI().Authorizer()
}
-// NewAuthorizerFromFile creates an Authorizer configured from a configuration file.
+// NewAuthorizerFromFile creates an Authorizer configured from a configuration file in the following order.
+// 1. Client credentials
+// 2. Client certificate
func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) {
+ settings, err := GetSettingsFromFile()
+ if err != nil {
+ return nil, err
+ }
+ if a, err := settings.ClientCredentialsAuthorizer(baseURI); err == nil {
+ return a, err
+ }
+ if a, err := settings.ClientCertificateAuthorizer(baseURI); err == nil {
+ return a, err
+ }
+ return nil, errors.New("auth file missing client and certificate credentials")
+}
+
+// NewAuthorizerFromFileWithResource creates an Authorizer configured from a configuration file in the following order.
+// 1. Client credentials
+// 2. Client certificate
+func NewAuthorizerFromFileWithResource(resource string) (autorest.Authorizer, error) {
+ s, err := GetSettingsFromFile()
+ if err != nil {
+ return nil, err
+ }
+ if a, err := s.ClientCredentialsAuthorizerWithResource(resource); err == nil {
+ return a, err
+ }
+ if a, err := s.ClientCertificateAuthorizerWithResource(resource); err == nil {
+ return a, err
+ }
+ return nil, errors.New("auth file missing client and certificate credentials")
+}
+
+// NewAuthorizerFromCLI creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
+func NewAuthorizerFromCLI() (autorest.Authorizer, error) {
+ settings, err := GetSettingsFromEnvironment()
+ if err != nil {
+ return nil, err
+ }
+
+ if settings.Values[Resource] == "" {
+ settings.Values[Resource] = settings.Environment.ResourceManagerEndpoint
+ }
+
+ return NewAuthorizerFromCLIWithResource(settings.Values[Resource])
+}
+
+// NewAuthorizerFromCLIWithResource creates an Authorizer configured from Azure CLI 2.0 for local development scenarios.
+func NewAuthorizerFromCLIWithResource(resource string) (autorest.Authorizer, error) {
+ token, err := cli.GetTokenFromCLI(resource)
+ if err != nil {
+ return nil, err
+ }
+
+ adalToken, err := token.ToADALToken()
+ if err != nil {
+ return nil, err
+ }
+
+ return autorest.NewBearerAuthorizer(&adalToken), nil
+}
+
+// GetSettingsFromFile returns the available authentication settings from an Azure CLI authentication file.
+func GetSettingsFromFile() (FileSettings, error) {
+ s := FileSettings{}
fileLocation := os.Getenv("AZURE_AUTH_LOCATION")
if fileLocation == "" {
- return nil, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set")
+ return s, errors.New("environment variable AZURE_AUTH_LOCATION is not set")
}
contents, err := ioutil.ReadFile(fileLocation)
if err != nil {
- return nil, err
+ return s, err
}
// Auth file might be encoded
decoded, err := decode(contents)
+ if err != nil {
+ return s, err
+ }
+
+ authFile := map[string]interface{}{}
+ err = json.Unmarshal(decoded, &authFile)
+ if err != nil {
+ return s, err
+ }
+
+ s.Values = map[string]string{}
+ s.setKeyValue(ClientID, authFile["clientId"])
+ s.setKeyValue(ClientSecret, authFile["clientSecret"])
+ s.setKeyValue(CertificatePath, authFile["clientCertificate"])
+ s.setKeyValue(CertificatePassword, authFile["clientCertificatePassword"])
+ s.setKeyValue(SubscriptionID, authFile["subscriptionId"])
+ s.setKeyValue(TenantID, authFile["tenantId"])
+ s.setKeyValue(ActiveDirectoryEndpoint, authFile["activeDirectoryEndpointUrl"])
+ s.setKeyValue(ResourceManagerEndpoint, authFile["resourceManagerEndpointUrl"])
+ s.setKeyValue(GraphResourceID, authFile["activeDirectoryGraphResourceId"])
+ s.setKeyValue(SQLManagementEndpoint, authFile["sqlManagementEndpointUrl"])
+ s.setKeyValue(GalleryEndpoint, authFile["galleryEndpointUrl"])
+ s.setKeyValue(ManagementEndpoint, authFile["managementEndpointUrl"])
+ return s, nil
+}
+
+// FileSettings contains the available authentication settings.
+type FileSettings struct {
+ Values map[string]string
+}
+
+// GetSubscriptionID returns the available subscription ID or an empty string.
+func (settings FileSettings) GetSubscriptionID() string {
+ return settings.Values[SubscriptionID]
+}
+
+// adds the specified value to the Values map if it isn't nil
+func (settings FileSettings) setKeyValue(key string, val interface{}) {
+ if val != nil {
+ settings.Values[key] = val.(string)
+ }
+}
+
+// returns the specified AAD endpoint or the public cloud endpoint if unspecified
+func (settings FileSettings) getAADEndpoint() string {
+ if v, ok := settings.Values[ActiveDirectoryEndpoint]; ok {
+ return v
+ }
+ return azure.PublicCloud.ActiveDirectoryEndpoint
+}
+
+// ServicePrincipalTokenFromClientCredentials creates a ServicePrincipalToken from the available client credentials.
+func (settings FileSettings) ServicePrincipalTokenFromClientCredentials(baseURI string) (*adal.ServicePrincipalToken, error) {
+ resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
+ return settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
+}
- file := file{}
- err = json.Unmarshal(decoded, &file)
+// ClientCredentialsAuthorizer creates an authorizer from the available client credentials.
+func (settings FileSettings) ClientCredentialsAuthorizer(baseURI string) (autorest.Authorizer, error) {
+ resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
+ return settings.ClientCredentialsAuthorizerWithResource(resource)
+}
- resource, err := getResourceForToken(file, baseURI)
+// ServicePrincipalTokenFromClientCredentialsWithResource creates a ServicePrincipalToken
+// from the available client credentials and the specified resource.
+func (settings FileSettings) ServicePrincipalTokenFromClientCredentialsWithResource(resource string) (*adal.ServicePrincipalToken, error) {
+ if _, ok := settings.Values[ClientSecret]; !ok {
+ return nil, errors.New("missing client secret")
+ }
+ config, err := adal.NewOAuthConfig(settings.getAADEndpoint(), settings.Values[TenantID])
if err != nil {
return nil, err
}
+ return adal.NewServicePrincipalToken(*config, settings.Values[ClientID], settings.Values[ClientSecret], resource)
+}
- config, err := adal.NewOAuthConfig(file.ActiveDirectoryEndpoint, file.TenantID)
+func (settings FileSettings) clientCertificateConfigWithResource(resource string) (ClientCertificateConfig, error) {
+ if _, ok := settings.Values[CertificatePath]; !ok {
+ return ClientCertificateConfig{}, errors.New("missing certificate path")
+ }
+ cfg := NewClientCertificateConfig(settings.Values[CertificatePath], settings.Values[CertificatePassword], settings.Values[ClientID], settings.Values[TenantID])
+ cfg.AADEndpoint = settings.getAADEndpoint()
+ cfg.Resource = resource
+ return cfg, nil
+}
+
+// ClientCredentialsAuthorizerWithResource creates an authorizer from the available client credentials and the specified resource.
+func (settings FileSettings) ClientCredentialsAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
+ spToken, err := settings.ServicePrincipalTokenFromClientCredentialsWithResource(resource)
if err != nil {
return nil, err
}
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
- spToken, err := adal.NewServicePrincipalToken(*config, file.ClientID, file.ClientSecret, resource)
+// ServicePrincipalTokenFromClientCertificate creates a ServicePrincipalToken from the available certificate credentials.
+func (settings FileSettings) ServicePrincipalTokenFromClientCertificate(baseURI string) (*adal.ServicePrincipalToken, error) {
+ resource, err := settings.getResourceForToken(baseURI)
if err != nil {
return nil, err
}
+ return settings.ServicePrincipalTokenFromClientCertificateWithResource(resource)
+}
- return autorest.NewBearerAuthorizer(spToken), nil
+// ClientCertificateAuthorizer creates an authorizer from the available certificate credentials.
+func (settings FileSettings) ClientCertificateAuthorizer(baseURI string) (autorest.Authorizer, error) {
+ resource, err := settings.getResourceForToken(baseURI)
+ if err != nil {
+ return nil, err
+ }
+ return settings.ClientCertificateAuthorizerWithResource(resource)
}
-// File represents the authentication file
-type file struct {
- ClientID string `json:"clientId,omitempty"`
- ClientSecret string `json:"clientSecret,omitempty"`
- SubscriptionID string `json:"subscriptionId,omitempty"`
- TenantID string `json:"tenantId,omitempty"`
- ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"`
- ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"`
- GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"`
- SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"`
- GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"`
- ManagementEndpoint string `json:"managementEndpointUrl,omitempty"`
+// ServicePrincipalTokenFromClientCertificateWithResource creates a ServicePrincipalToken from the available certificate credentials.
+func (settings FileSettings) ServicePrincipalTokenFromClientCertificateWithResource(resource string) (*adal.ServicePrincipalToken, error) {
+ cfg, err := settings.clientCertificateConfigWithResource(resource)
+ if err != nil {
+ return nil, err
+ }
+ return cfg.ServicePrincipalToken()
+}
+
+// ClientCertificateAuthorizerWithResource creates an authorizer from the available certificate credentials and the specified resource.
+func (settings FileSettings) ClientCertificateAuthorizerWithResource(resource string) (autorest.Authorizer, error) {
+ cfg, err := settings.clientCertificateConfigWithResource(resource)
+ if err != nil {
+ return nil, err
+ }
+ return cfg.Authorizer()
}
func decode(b []byte) ([]byte, error) {
@@ -211,7 +457,7 @@ func decode(b []byte) ([]byte, error) {
return ioutil.ReadAll(reader)
}
-func getResourceForToken(f file, baseURI string) (string, error) {
+func (settings FileSettings) getResourceForToken(baseURI string) (string, error) {
// Compare dafault base URI from the SDK to the endpoints from the public cloud
// Base URI and token resource are the same string. This func finds the authentication
// file field that matches the SDK base URI. The SDK defines the public cloud
@@ -221,15 +467,15 @@ func getResourceForToken(f file, baseURI string) (string, error) {
}
switch baseURI {
case azure.PublicCloud.ServiceManagementEndpoint:
- return f.ManagementEndpoint, nil
+ return settings.Values[ManagementEndpoint], nil
case azure.PublicCloud.ResourceManagerEndpoint:
- return f.ResourceManagerEndpoint, nil
+ return settings.Values[ResourceManagerEndpoint], nil
case azure.PublicCloud.ActiveDirectoryEndpoint:
- return f.ActiveDirectoryEndpoint, nil
+ return settings.Values[ActiveDirectoryEndpoint], nil
case azure.PublicCloud.GalleryEndpoint:
- return f.GalleryEndpoint, nil
+ return settings.Values[GalleryEndpoint], nil
case azure.PublicCloud.GraphEndpoint:
- return f.GraphResourceID, nil
+ return settings.Values[GraphResourceID], nil
}
return "", fmt.Errorf("auth: base URI not found in endpoints")
}
@@ -304,18 +550,21 @@ type ClientCredentialsConfig struct {
Resource string
}
-// Authorizer gets the authorizer from client credentials.
-func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
+// ServicePrincipalToken creates a ServicePrincipalToken from client credentials.
+func (ccc ClientCredentialsConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
if err != nil {
return nil, err
}
+ return adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
+}
- spToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource)
+// Authorizer gets the authorizer from client credentials.
+func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := ccc.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err)
}
-
return autorest.NewBearerAuthorizer(spToken), nil
}
@@ -329,26 +578,29 @@ type ClientCertificateConfig struct {
Resource string
}
-// Authorizer gets an authorizer object from client certificate.
-func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
+// ServicePrincipalToken creates a ServicePrincipalToken from client certificate.
+func (ccc ClientCertificateConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID)
-
+ if err != nil {
+ return nil, err
+ }
certData, err := ioutil.ReadFile(ccc.CertificatePath)
if err != nil {
return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err)
}
-
certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword)
if err != nil {
return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err)
}
+ return adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
+}
- spToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource)
-
+// Authorizer gets an authorizer object from client certificate.
+func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := ccc.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err)
}
-
return autorest.NewBearerAuthorizer(spToken), nil
}
@@ -362,26 +614,30 @@ type DeviceFlowConfig struct {
// Authorizer gets the authorizer from device flow.
func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) {
- oauthClient := &autorest.Client{}
+ spToken, err := dfc.ServicePrincipalToken()
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
+ }
+ return autorest.NewBearerAuthorizer(spToken), nil
+}
+
+// ServicePrincipalToken gets the service principal token from device flow.
+func (dfc DeviceFlowConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID)
+ if err != nil {
+ return nil, err
+ }
+ oauthClient := &autorest.Client{}
deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource)
if err != nil {
return nil, fmt.Errorf("failed to start device auth flow: %s", err)
}
-
log.Println(*deviceCode.Message)
-
token, err := adal.WaitForUserCompletion(oauthClient, deviceCode)
if err != nil {
return nil, fmt.Errorf("failed to finish device auth flow: %s", err)
}
-
- spToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
- if err != nil {
- return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err)
- }
-
- return autorest.NewBearerAuthorizer(spToken), nil
+ return adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token)
}
func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) {
@@ -408,17 +664,21 @@ type UsernamePasswordConfig struct {
Resource string
}
-// Authorizer gets the authorizer from a username and a password.
-func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
-
+// ServicePrincipalToken creates a ServicePrincipalToken from username and password.
+func (ups UsernamePasswordConfig) ServicePrincipalToken() (*adal.ServicePrincipalToken, error) {
oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID)
+ if err != nil {
+ return nil, err
+ }
+ return adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
+}
- spToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource)
-
+// Authorizer gets the authorizer from a username and a password.
+func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) {
+ spToken, err := ups.ServicePrincipalToken()
if err != nil {
return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err)
}
-
return autorest.NewBearerAuthorizer(spToken), nil
}
@@ -435,9 +695,17 @@ func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) {
return nil, err
}
- spToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
- if err != nil {
- return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
+ var spToken *adal.ServicePrincipalToken
+ if mc.ClientID == "" {
+ spToken, err = adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err)
+ }
+ } else {
+ spToken, err = adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, mc.Resource, mc.ClientID)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get oauth token from MSI for user assigned identity: %v", err)
+ }
}
return autorest.NewBearerAuthorizer(spToken), nil
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
new file mode 100644
index 00000000..a336b958
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go
@@ -0,0 +1,79 @@
+package cli
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/dimchansky/utfbom"
+ "github.com/mitchellh/go-homedir"
+)
+
+// Profile represents a Profile from the Azure CLI
+type Profile struct {
+ InstallationID string `json:"installationId"`
+ Subscriptions []Subscription `json:"subscriptions"`
+}
+
+// Subscription represents a Subscription from the Azure CLI
+type Subscription struct {
+ EnvironmentName string `json:"environmentName"`
+ ID string `json:"id"`
+ IsDefault bool `json:"isDefault"`
+ Name string `json:"name"`
+ State string `json:"state"`
+ TenantID string `json:"tenantId"`
+ User *User `json:"user"`
+}
+
+// User represents a User from the Azure CLI
+type User struct {
+ Name string `json:"name"`
+ Type string `json:"type"`
+}
+
+const azureProfileJSON = "azureProfile.json"
+
+// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI
+func ProfilePath() (string, error) {
+ if cfgDir := os.Getenv("AZURE_CONFIG_DIR"); cfgDir != "" {
+ return filepath.Join(cfgDir, azureProfileJSON), nil
+ }
+ return homedir.Expand("~/.azure/" + azureProfileJSON)
+}
+
+// LoadProfile restores a Profile object from a file located at 'path'.
+func LoadProfile(path string) (result Profile, err error) {
+ var contents []byte
+ contents, err = ioutil.ReadFile(path)
+ if err != nil {
+ err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
+ return
+ }
+ reader := utfbom.SkipOnly(bytes.NewReader(contents))
+
+ dec := json.NewDecoder(reader)
+ if err = dec.Decode(&result); err != nil {
+ err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err)
+ return
+ }
+
+ return
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
new file mode 100644
index 00000000..810075ba
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go
@@ -0,0 +1,170 @@
+package cli
+
+// Copyright 2017 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strconv"
+ "time"
+
+ "github.com/Azure/go-autorest/autorest/adal"
+ "github.com/Azure/go-autorest/autorest/date"
+ "github.com/mitchellh/go-homedir"
+)
+
+// Token represents an AccessToken from the Azure CLI
+type Token struct {
+ AccessToken string `json:"accessToken"`
+ Authority string `json:"_authority"`
+ ClientID string `json:"_clientId"`
+ ExpiresOn string `json:"expiresOn"`
+ IdentityProvider string `json:"identityProvider"`
+ IsMRRT bool `json:"isMRRT"`
+ RefreshToken string `json:"refreshToken"`
+ Resource string `json:"resource"`
+ TokenType string `json:"tokenType"`
+ UserID string `json:"userId"`
+}
+
+// ToADALToken converts an Azure CLI `Token`` to an `adal.Token``
+func (t Token) ToADALToken() (converted adal.Token, err error) {
+ tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn)
+ if err != nil {
+ err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err)
+ return
+ }
+
+ difference := tokenExpirationDate.Sub(date.UnixEpoch())
+
+ converted = adal.Token{
+ AccessToken: t.AccessToken,
+ Type: t.TokenType,
+ ExpiresIn: "3600",
+ ExpiresOn: json.Number(strconv.Itoa(int(difference.Seconds()))),
+ RefreshToken: t.RefreshToken,
+ Resource: t.Resource,
+ }
+ return
+}
+
+// AccessTokensPath returns the path where access tokens are stored from the Azure CLI
+// TODO(#199): add unit test.
+func AccessTokensPath() (string, error) {
+ // Azure-CLI allows user to customize the path of access tokens thorugh environment variable.
+ var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE")
+ var err error
+
+ // Fallback logic to default path on non-cloud-shell environment.
+ // TODO(#200): remove the dependency on hard-coding path.
+ if accessTokenPath == "" {
+ accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json")
+ }
+
+ return accessTokenPath, err
+}
+
+// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object
+func ParseExpirationDate(input string) (*time.Time, error) {
+ // CloudShell (and potentially the Azure CLI in future)
+ expirationDate, cloudShellErr := time.Parse(time.RFC3339, input)
+ if cloudShellErr != nil {
+ // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone)
+ const cliFormat = "2006-01-02 15:04:05.999999"
+ expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local)
+ if cliErr == nil {
+ return &expirationDate, nil
+ }
+
+ return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr)
+ }
+
+ return &expirationDate, nil
+}
+
+// LoadTokens restores a set of Token objects from a file located at 'path'.
+func LoadTokens(path string) ([]Token, error) {
+ file, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err)
+ }
+ defer file.Close()
+
+ var tokens []Token
+
+ dec := json.NewDecoder(file)
+ if err = dec.Decode(&tokens); err != nil {
+ return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err)
+ }
+
+ return tokens, nil
+}
+
+// GetTokenFromCLI gets a token using Azure CLI 2.0 for local development scenarios.
+func GetTokenFromCLI(resource string) (*Token, error) {
+ // This is the path that a developer can set to tell this class what the install path for Azure CLI is.
+ const azureCLIPath = "AzureCLIPath"
+
+ // The default install paths are used to find Azure CLI. This is for security, so that any path in the calling program's Path environment is not used to execute Azure CLI.
+ azureCLIDefaultPathWindows := fmt.Sprintf("%s\\Microsoft SDKs\\Azure\\CLI2\\wbin; %s\\Microsoft SDKs\\Azure\\CLI2\\wbin", os.Getenv("ProgramFiles(x86)"), os.Getenv("ProgramFiles"))
+
+ // Default path for non-Windows.
+ const azureCLIDefaultPath = "/bin:/sbin:/usr/bin:/usr/local/bin"
+
+ // Validate resource, since it gets sent as a command line argument to Azure CLI
+ const invalidResourceErrorTemplate = "Resource %s is not in expected format. Only alphanumeric characters, [dot], [colon], [hyphen], and [forward slash] are allowed."
+ match, err := regexp.MatchString("^[0-9a-zA-Z-.:/]+$", resource)
+ if err != nil {
+ return nil, err
+ }
+ if !match {
+ return nil, fmt.Errorf(invalidResourceErrorTemplate, resource)
+ }
+
+ // Execute Azure CLI to get token
+ var cliCmd *exec.Cmd
+ if runtime.GOOS == "windows" {
+ cliCmd = exec.Command(fmt.Sprintf("%s\\system32\\cmd.exe", os.Getenv("windir")))
+ cliCmd.Env = os.Environ()
+ cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s;%s", os.Getenv(azureCLIPath), azureCLIDefaultPathWindows))
+ cliCmd.Args = append(cliCmd.Args, "/c", "az")
+ } else {
+ cliCmd = exec.Command("az")
+ cliCmd.Env = os.Environ()
+ cliCmd.Env = append(cliCmd.Env, fmt.Sprintf("PATH=%s:%s", os.Getenv(azureCLIPath), azureCLIDefaultPath))
+ }
+ cliCmd.Args = append(cliCmd.Args, "account", "get-access-token", "-o", "json", "--resource", resource)
+
+ var stderr bytes.Buffer
+ cliCmd.Stderr = &stderr
+
+ output, err := cliCmd.Output()
+ if err != nil {
+ return nil, fmt.Errorf("Invoking Azure CLI failed with the following error: %s", stderr.String())
+ }
+
+ tokenResponse := Token{}
+ err = json.Unmarshal(output, &tokenResponse)
+ if err != nil {
+ return nil, err
+ }
+
+ return &tokenResponse, err
+}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
index 7e41f7fd..cdde4141 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go
@@ -22,9 +22,14 @@ import (
"strings"
)
-// EnvironmentFilepathName captures the name of the environment variable containing the path to the file
-// to be used while populating the Azure Environment.
-const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
+const (
+ // EnvironmentFilepathName captures the name of the environment variable containing the path to the file
+ // to be used while populating the Azure Environment.
+ EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH"
+
+ // NotAvailable is used for endpoints and resource IDs that are not available for a given cloud.
+ NotAvailable = "N/A"
+)
var environments = map[string]Environment{
"AZURECHINACLOUD": ChinaCloud,
@@ -33,28 +38,39 @@ var environments = map[string]Environment{
"AZUREUSGOVERNMENTCLOUD": USGovernmentCloud,
}
+// ResourceIdentifier contains a set of Azure resource IDs.
+type ResourceIdentifier struct {
+ Graph string `json:"graph"`
+ KeyVault string `json:"keyVault"`
+ Datalake string `json:"datalake"`
+ Batch string `json:"batch"`
+ OperationalInsights string `json:"operationalInsights"`
+}
+
// Environment represents a set of endpoints for each of Azure's Clouds.
type Environment struct {
- Name string `json:"name"`
- ManagementPortalURL string `json:"managementPortalURL"`
- PublishSettingsURL string `json:"publishSettingsURL"`
- ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
- ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
- ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
- GalleryEndpoint string `json:"galleryEndpoint"`
- KeyVaultEndpoint string `json:"keyVaultEndpoint"`
- GraphEndpoint string `json:"graphEndpoint"`
- ServiceBusEndpoint string `json:"serviceBusEndpoint"`
- BatchManagementEndpoint string `json:"batchManagementEndpoint"`
- StorageEndpointSuffix string `json:"storageEndpointSuffix"`
- SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
- TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
- KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
- ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
- ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
- ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
- ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
- TokenAudience string `json:"tokenAudience"`
+ Name string `json:"name"`
+ ManagementPortalURL string `json:"managementPortalURL"`
+ PublishSettingsURL string `json:"publishSettingsURL"`
+ ServiceManagementEndpoint string `json:"serviceManagementEndpoint"`
+ ResourceManagerEndpoint string `json:"resourceManagerEndpoint"`
+ ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"`
+ GalleryEndpoint string `json:"galleryEndpoint"`
+ KeyVaultEndpoint string `json:"keyVaultEndpoint"`
+ GraphEndpoint string `json:"graphEndpoint"`
+ ServiceBusEndpoint string `json:"serviceBusEndpoint"`
+ BatchManagementEndpoint string `json:"batchManagementEndpoint"`
+ StorageEndpointSuffix string `json:"storageEndpointSuffix"`
+ SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"`
+ TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"`
+ KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"`
+ ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"`
+ ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"`
+ ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"`
+ ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"`
+ CosmosDBDNSSuffix string `json:"cosmosDBDNSSuffix"`
+ TokenAudience string `json:"tokenAudience"`
+ ResourceIdentifiers ResourceIdentifier `json:"resourceIdentifiers"`
}
var (
@@ -79,7 +95,15 @@ var (
ServiceManagementVMDNSSuffix: "cloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.azure.com",
ContainerRegistryDNSSuffix: "azurecr.io",
+ CosmosDBDNSSuffix: "documents.azure.com",
TokenAudience: "https://management.azure.com/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.windows.net/",
+ KeyVault: "https://vault.azure.net",
+ Datalake: "https://datalake.azure.net/",
+ Batch: "https://batch.core.windows.net/",
+ OperationalInsights: "https://api.loganalytics.io",
+ },
}
// USGovernmentCloud is the cloud environment for the US Government
@@ -102,8 +126,16 @@ var (
ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net",
ServiceManagementVMDNSSuffix: "usgovcloudapp.net",
ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us",
- ContainerRegistryDNSSuffix: "azurecr.io",
+ ContainerRegistryDNSSuffix: "azurecr.us",
+ CosmosDBDNSSuffix: "documents.azure.us",
TokenAudience: "https://management.usgovcloudapi.net/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.windows.net/",
+ KeyVault: "https://vault.usgovcloudapi.net",
+ Datalake: NotAvailable,
+ Batch: "https://batch.core.usgovcloudapi.net/",
+ OperationalInsights: "https://api.loganalytics.us",
+ },
}
// ChinaCloud is the cloud environment operated in China
@@ -126,8 +158,16 @@ var (
ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn",
ServiceManagementVMDNSSuffix: "chinacloudapp.cn",
ResourceManagerVMDNSSuffix: "cloudapp.azure.cn",
- ContainerRegistryDNSSuffix: "azurecr.io",
+ ContainerRegistryDNSSuffix: "azurecr.cn",
+ CosmosDBDNSSuffix: "documents.azure.cn",
TokenAudience: "https://management.chinacloudapi.cn/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.chinacloudapi.cn/",
+ KeyVault: "https://vault.azure.cn",
+ Datalake: NotAvailable,
+ Batch: "https://batch.chinacloudapi.cn/",
+ OperationalInsights: NotAvailable,
+ },
}
// GermanCloud is the cloud environment operated in Germany
@@ -150,8 +190,16 @@ var (
ServiceBusEndpointSuffix: "servicebus.cloudapi.de",
ServiceManagementVMDNSSuffix: "azurecloudapp.de",
ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de",
- ContainerRegistryDNSSuffix: "azurecr.io",
+ ContainerRegistryDNSSuffix: NotAvailable,
+ CosmosDBDNSSuffix: "documents.microsoftazure.de",
TokenAudience: "https://management.microsoftazure.de/",
+ ResourceIdentifiers: ResourceIdentifier{
+ Graph: "https://graph.cloudapi.de/",
+ KeyVault: "https://vault.microsoftazure.de",
+ Datalake: NotAvailable,
+ Batch: "https://batch.cloudapi.de/",
+ OperationalInsights: NotAvailable,
+ },
}
)
diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
index bd34f0ed..86ce9f2b 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go
@@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
}
// poll for registered provisioning state
- now := time.Now()
- for err == nil && time.Since(now) < client.PollingDuration {
+ registrationStartTime := time.Now()
+ for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
// taken from the resources SDK
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
preparer := autorest.CreatePreparer(
@@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
return originalReq.Context().Err()
}
}
- if !(time.Since(now) < client.PollingDuration) {
+ if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
return errors.New("polling for resource provider registration has exceeded the polling duration")
}
return err
diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go
index 5c558c83..cfc7ed75 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/client.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/client.go
@@ -16,6 +16,7 @@ package autorest
import (
"bytes"
+ "crypto/tls"
"fmt"
"io"
"io/ioutil"
@@ -26,7 +27,7 @@ import (
"time"
"github.com/Azure/go-autorest/logger"
- "github.com/Azure/go-autorest/version"
+ "github.com/Azure/go-autorest/tracing"
)
const (
@@ -147,6 +148,7 @@ type Client struct {
PollingDelay time.Duration
// PollingDuration sets the maximum polling time after which an error is returned.
+ // Setting this to zero will use the provided context to control the duration.
PollingDuration time.Duration
// RetryAttempts sets the default number of retry attempts for client.
@@ -168,14 +170,32 @@ type Client struct {
// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed
// string.
func NewClientWithUserAgent(ua string) Client {
+ return newClient(ua, tls.RenegotiateNever)
+}
+
+// ClientOptions contains various Client configuration options.
+type ClientOptions struct {
+ // UserAgent is an optional user-agent string to append to the default user agent.
+ UserAgent string
+
+ // Renegotiation is an optional setting to control client-side TLS renegotiation.
+ Renegotiation tls.RenegotiationSupport
+}
+
+// NewClientWithOptions returns an instance of a Client with the specified values.
+func NewClientWithOptions(options ClientOptions) Client {
+ return newClient(options.UserAgent, options.Renegotiation)
+}
+
+func newClient(ua string, renegotiation tls.RenegotiationSupport) Client {
c := Client{
PollingDelay: DefaultPollingDelay,
PollingDuration: DefaultPollingDuration,
RetryAttempts: DefaultRetryAttempts,
RetryDuration: DefaultRetryDuration,
- UserAgent: version.UserAgent(),
+ UserAgent: UserAgent(),
}
- c.Sender = c.sender()
+ c.Sender = c.sender(renegotiation)
c.AddToUserAgent(ua)
return c
}
@@ -219,18 +239,39 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
return true, v
},
})
- resp, err := SendWithSender(c.sender(), r)
+ resp, err := SendWithSender(c.sender(tls.RenegotiateNever), r)
logger.Instance.WriteResponse(resp, logger.Filter{})
Respond(resp, c.ByInspecting())
return resp, err
}
// sender returns the Sender to which to send requests.
-func (c Client) sender() Sender {
+func (c Client) sender(renengotiation tls.RenegotiationSupport) Sender {
if c.Sender == nil {
+ // Use behaviour compatible with DefaultTransport, but require TLS minimum version.
+ var defaultTransport = http.DefaultTransport.(*http.Transport)
+ transport := tracing.Transport
+ // for non-default values of TLS renegotiation create a new tracing transport.
+ // updating tracing.Transport affects all clients which is not what we want.
+ if renengotiation != tls.RenegotiateNever {
+ transport = tracing.NewTransport()
+ }
+ transport.Base = &http.Transport{
+ Proxy: defaultTransport.Proxy,
+ DialContext: defaultTransport.DialContext,
+ MaxIdleConns: defaultTransport.MaxIdleConns,
+ IdleConnTimeout: defaultTransport.IdleConnTimeout,
+ TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout,
+ ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout,
+ TLSClientConfig: &tls.Config{
+ MinVersion: tls.VersionTLS12,
+ Renegotiation: renengotiation,
+ },
+ }
j, _ := cookiejar.New(nil)
- return &http.Client{Jar: j}
+ return &http.Client{Jar: j, Transport: transport}
}
+
return c.Sender
}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go
index cacbd815..6665d7c0 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/sender.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go
@@ -21,6 +21,8 @@ import (
"net/http"
"strconv"
"time"
+
+ "github.com/Azure/go-autorest/tracing"
)
// Sender is the interface that wraps the Do method to send HTTP requests.
@@ -38,7 +40,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
return sf(r)
}
-// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
+// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
// http.Request and pass it along or, first, pass the http.Request along then react to the
// http.Response result.
type SendDecorator func(Sender) Sender
@@ -68,7 +70,7 @@ func DecorateSender(s Sender, decorators ...SendDecorator) Sender {
//
// Send will not poll or retry requests.
func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) {
- return SendWithSender(&http.Client{}, r, decorators...)
+ return SendWithSender(&http.Client{Transport: tracing.Transport}, r, decorators...)
}
// SendWithSender sends the passed http.Request, through the provided Sender, returning the
@@ -216,8 +218,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
return SenderFunc(func(r *http.Request) (resp *http.Response, err error) {
rr := NewRetriableRequest(r)
// Increment to add the first call (attempts denotes number of retries)
- attempts++
- for attempt := 0; attempt < attempts; {
+ for attempt := 0; attempt < attempts+1; {
err = rr.Prepare()
if err != nil {
return resp, err
@@ -234,7 +235,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
}
delayed := DelayWithRetryAfter(resp, r.Context().Done())
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
- return nil, r.Context().Err()
+ return resp, r.Context().Err()
}
// don't count a 429 against the number of attempts
// so that we continue to retry until it succeeds
diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go
index bfddd90b..08cf11c1 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/utility.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go
@@ -157,7 +157,7 @@ func AsStringSlice(s interface{}) ([]string, error) {
}
// String method converts interface v to string. If interface is a list, it
-// joins list elements using the seperator. Note that only sep[0] will be used for
+// joins list elements using the separator. Note that only sep[0] will be used for
// joining if any separator is specified.
func String(v interface{}, sep ...string) string {
if len(sep) == 0 {
diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
index ae987f8f..65899b69 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go
@@ -398,11 +398,3 @@ func toInt64(v interface{}) (int64, bool) {
}
return 0, false
}
-
-// NewErrorWithValidationError appends package type and method name in
-// validation error.
-//
-// Deprecated: Please use validation.NewError() instead.
-func NewErrorWithValidationError(err error, packageType, method string) error {
- return NewError(packageType, method, err.Error())
-}
diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go
index 3c645154..0c8d9d22 100644
--- a/vendor/github.com/Azure/go-autorest/autorest/version.go
+++ b/vendor/github.com/Azure/go-autorest/autorest/version.go
@@ -1,7 +1,5 @@
package autorest
-import "github.com/Azure/go-autorest/version"
-
// Copyright 2017 Microsoft Corporation
//
// Licensed under the Apache License, Version 2.0 (the "License");
@@ -16,7 +14,28 @@ import "github.com/Azure/go-autorest/version"
// See the License for the specific language governing permissions and
// limitations under the License.
+import (
+ "fmt"
+ "runtime"
+)
+
+const number = "v12.0.0"
+
+var (
+ userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
+ runtime.Version(),
+ runtime.GOARCH,
+ runtime.GOOS,
+ number,
+ )
+)
+
+// UserAgent returns a string containing the Go version, system architecture and OS, and the go-autorest version.
+func UserAgent() string {
+ return userAgent
+}
+
// Version returns the semantic version (see http://semver.org).
func Version() string {
- return version.Number
+ return number
}
diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go
index 756fd80c..da09f394 100644
--- a/vendor/github.com/Azure/go-autorest/logger/logger.go
+++ b/vendor/github.com/Azure/go-autorest/logger/logger.go
@@ -162,7 +162,7 @@ type Writer interface {
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
- // By default no respone content is excluded.
+ // By default no response content is excluded.
WriteResponse(resp *http.Response, filter Filter)
}
@@ -318,7 +318,7 @@ func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
// returns true if the provided body should be included in the log
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
ct := header.Get("Content-Type")
- return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1
+ return fl.logLevel >= LogDebug && body != nil && !strings.Contains(ct, "application/octet-stream")
}
// creates standard header for log entries, it contains a timestamp and the log level
diff --git a/vendor/github.com/Azure/go-autorest/tracing/tracing.go b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
new file mode 100644
index 00000000..28951c28
--- /dev/null
+++ b/vendor/github.com/Azure/go-autorest/tracing/tracing.go
@@ -0,0 +1,195 @@
+package tracing
+
+// Copyright 2018 Microsoft Corporation
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+import (
+ "context"
+ "fmt"
+ "net/http"
+ "os"
+
+ "contrib.go.opencensus.io/exporter/ocagent"
+ "go.opencensus.io/plugin/ochttp"
+ "go.opencensus.io/plugin/ochttp/propagation/tracecontext"
+ "go.opencensus.io/stats/view"
+ "go.opencensus.io/trace"
+)
+
+var (
+ // Transport is the default tracing RoundTripper. The custom options setter will control
+ // if traces are being emitted or not.
+ Transport = NewTransport()
+
+ // enabled is the flag for marking if tracing is enabled.
+ enabled = false
+
+ // Sampler is the tracing sampler. If tracing is disabled it will never sample. Otherwise
+ // it will be using the parent sampler or the default.
+ sampler = trace.NeverSample()
+
+ // Views for metric instrumentation.
+ views = map[string]*view.View{}
+
+ // the trace exporter
+ traceExporter trace.Exporter
+)
+
+func init() {
+ enableFromEnv()
+}
+
+func enableFromEnv() {
+ _, ok := os.LookupEnv("AZURE_SDK_TRACING_ENABLED")
+ _, legacyOk := os.LookupEnv("AZURE_SDK_TRACING_ENABELD")
+ if ok || legacyOk {
+ agentEndpoint, ok := os.LookupEnv("OCAGENT_TRACE_EXPORTER_ENDPOINT")
+
+ if ok {
+ EnableWithAIForwarding(agentEndpoint)
+ } else {
+ Enable()
+ }
+ }
+}
+
+// NewTransport returns a new instance of a tracing-aware RoundTripper.
+func NewTransport() *ochttp.Transport {
+ return &ochttp.Transport{
+ Propagation: &tracecontext.HTTPFormat{},
+ GetStartOptions: getStartOptions,
+ }
+}
+
+// IsEnabled returns true if monitoring is enabled for the sdk.
+func IsEnabled() bool {
+ return enabled
+}
+
+// Enable will start instrumentation for metrics and traces.
+func Enable() error {
+ enabled = true
+ sampler = nil
+
+ err := initStats()
+ return err
+}
+
+// Disable will disable instrumentation for metrics and traces.
+func Disable() {
+ disableStats()
+ sampler = trace.NeverSample()
+ if traceExporter != nil {
+ trace.UnregisterExporter(traceExporter)
+ }
+ enabled = false
+}
+
+// EnableWithAIForwarding will start instrumentation and will connect to app insights forwarder
+// exporter making the metrics and traces available in app insights.
+func EnableWithAIForwarding(agentEndpoint string) (err error) {
+ err = Enable()
+ if err != nil {
+ return err
+ }
+
+ traceExporter, err := ocagent.NewExporter(ocagent.WithInsecure(), ocagent.WithAddress(agentEndpoint))
+ if err != nil {
+ return err
+ }
+ trace.RegisterExporter(traceExporter)
+ return
+}
+
+// getStartOptions is the custom options setter for the ochttp package.
+func getStartOptions(*http.Request) trace.StartOptions {
+ return trace.StartOptions{
+ Sampler: sampler,
+ }
+}
+
+// initStats registers the views for the http metrics
+func initStats() (err error) {
+ clientViews := []*view.View{
+ ochttp.ClientCompletedCount,
+ ochttp.ClientRoundtripLatencyDistribution,
+ ochttp.ClientReceivedBytesDistribution,
+ ochttp.ClientSentBytesDistribution,
+ }
+ for _, cv := range clientViews {
+ vn := fmt.Sprintf("Azure/go-autorest/tracing-%s", cv.Name)
+ views[vn] = cv.WithName(vn)
+ err = view.Register(views[vn])
+ if err != nil {
+ return err
+ }
+ }
+ return
+}
+
+// disableStats will unregister the previously registered metrics
+func disableStats() {
+ for _, v := range views {
+ view.Unregister(v)
+ }
+}
+
+// StartSpan starts a trace span
+func StartSpan(ctx context.Context, name string) context.Context {
+ ctx, _ = trace.StartSpan(ctx, name, trace.WithSampler(sampler))
+ return ctx
+}
+
+// EndSpan ends a previously started span stored in the context
+func EndSpan(ctx context.Context, httpStatusCode int, err error) {
+ span := trace.FromContext(ctx)
+
+ if span == nil {
+ return
+ }
+
+ if err != nil {
+ span.SetStatus(trace.Status{Message: err.Error(), Code: toTraceStatusCode(httpStatusCode)})
+ }
+ span.End()
+}
+
+// toTraceStatusCode converts HTTP Codes to OpenCensus codes as defined
+// at https://github.com/census-instrumentation/opencensus-specs/blob/master/trace/HTTP.md#status
+func toTraceStatusCode(httpStatusCode int) int32 {
+ switch {
+ case http.StatusOK <= httpStatusCode && httpStatusCode < http.StatusBadRequest:
+ return trace.StatusCodeOK
+ case httpStatusCode == http.StatusBadRequest:
+ return trace.StatusCodeInvalidArgument
+ case httpStatusCode == http.StatusUnauthorized: // 401 is actually unauthenticated.
+ return trace.StatusCodeUnauthenticated
+ case httpStatusCode == http.StatusForbidden:
+ return trace.StatusCodePermissionDenied
+ case httpStatusCode == http.StatusNotFound:
+ return trace.StatusCodeNotFound
+ case httpStatusCode == http.StatusTooManyRequests:
+ return trace.StatusCodeResourceExhausted
+ case httpStatusCode == 499:
+ return trace.StatusCodeCancelled
+ case httpStatusCode == http.StatusNotImplemented:
+ return trace.StatusCodeUnimplemented
+ case httpStatusCode == http.StatusServiceUnavailable:
+ return trace.StatusCodeUnavailable
+ case httpStatusCode == http.StatusGatewayTimeout:
+ return trace.StatusCodeDeadlineExceeded
+ default:
+ return trace.StatusCodeUnknown
+ }
+}
diff --git a/vendor/github.com/Azure/go-autorest/version/version.go b/vendor/github.com/Azure/go-autorest/version/version.go
deleted file mode 100644
index ad2d6099..00000000
--- a/vendor/github.com/Azure/go-autorest/version/version.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package version
-
-// Copyright 2017 Microsoft Corporation
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-import (
- "fmt"
- "runtime"
-)
-
-// Number contains the semantic version of this SDK.
-const Number = "v10.15.4"
-
-var (
- userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
- runtime.Version(),
- runtime.GOARCH,
- runtime.GOOS,
- Number,
- )
-)
-
-// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version.
-func UserAgent() string {
- return userAgent
-}
diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE b/vendor/github.com/NYTimes/gziphandler/LICENSE
deleted file mode 100644
index df6192d3..00000000
--- a/vendor/github.com/NYTimes/gziphandler/LICENSE
+++ /dev/null
@@ -1,201 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2016-2017 The New York Times Company
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/vendor/github.com/NYTimes/gziphandler/LICENSE.md b/vendor/github.com/NYTimes/gziphandler/LICENSE.md
new file mode 100644
index 00000000..b7e2ecb6
--- /dev/null
+++ b/vendor/github.com/NYTimes/gziphandler/LICENSE.md
@@ -0,0 +1,13 @@
+Copyright (c) 2015 The New York Times Company
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this library except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/github.com/NYTimes/gziphandler/gzip.go b/vendor/github.com/NYTimes/gziphandler/gzip.go
index f91dcfa1..ea6dba1e 100644
--- a/vendor/github.com/NYTimes/gziphandler/gzip.go
+++ b/vendor/github.com/NYTimes/gziphandler/gzip.go
@@ -28,10 +28,9 @@ const (
// The examples seem to indicate that it is.
DefaultQValue = 1.0
- // 1500 bytes is the MTU size for the internet since that is the largest size allowed at the network layer.
- // If you take a file that is 1300 bytes and compress it to 800 bytes, it’s still transmitted in that same 1500 byte packet regardless, so you’ve gained nothing.
- // That being the case, you should restrict the gzip compression to files with a size greater than a single packet, 1400 bytes (1.4KB) is a safe value.
- DefaultMinSize = 1400
+ // DefaultMinSize defines the minimum size to reach to enable compression.
+ // It's 512 bytes.
+ DefaultMinSize = 512
)
// gzipWriterPools stores a sync.Pool for each compression level for reuse of
@@ -81,16 +80,6 @@ type GzipResponseWriter struct {
minSize int // Specifed the minimum response size to gzip. If the response length is bigger than this value, it is compressed.
buf []byte // Holds the first part of the write before reaching the minSize or the end of the write.
-
- contentTypes []string // Only compress if the response is one of these content-types. All are accepted if empty.
-}
-
-type GzipResponseWriterWithCloseNotify struct {
- *GzipResponseWriter
-}
-
-func (w GzipResponseWriterWithCloseNotify) CloseNotify() <-chan bool {
- return w.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Write appends data to the gzip writer.
@@ -111,10 +100,8 @@ func (w *GzipResponseWriter) Write(b []byte) (int, error) {
// On the first write, w.buf changes from nil to a valid slice
w.buf = append(w.buf, b...)
- // If the global writes are bigger than the minSize and we're about to write
- // a response containing a content type we want to handle, enable
- // compression.
- if len(w.buf) >= w.minSize && handleContentType(w.contentTypes, w) && w.Header().Get(contentEncoding) == "" {
+ // If the global writes are bigger than the minSize, compression is enable.
+ if len(w.buf) >= w.minSize {
err := w.startGzip()
if err != nil {
return 0, err
@@ -143,7 +130,7 @@ func (w *GzipResponseWriter) startGzip() error {
// Initialize the GZIP response.
w.init()
- // Flush the buffer into the gzip response.
+ // Flush the buffer into the gzip reponse.
n, err := w.gw.Write(w.buf)
// This should never happen (per io.Writer docs), but if the write didn't
@@ -159,9 +146,7 @@ func (w *GzipResponseWriter) startGzip() error {
// WriteHeader just saves the response code until close or GZIP effective writes.
func (w *GzipResponseWriter) WriteHeader(code int) {
- if w.code == 0 {
- w.code = code
- }
+ w.code = code
}
// init graps a new gzip writer from the gzipWriterPool and writes the correct
@@ -201,16 +186,10 @@ func (w *GzipResponseWriter) Close() error {
// http.ResponseWriter if it is an http.Flusher. This makes GzipResponseWriter
// an http.Flusher.
func (w *GzipResponseWriter) Flush() {
- if w.gw == nil {
- // Only flush once startGzip has been called.
- //
- // Flush is thus a no-op until the written body
- // exceeds minSize.
- return
+ if w.gw != nil {
+ w.gw.Flush()
}
- w.gw.Flush()
-
if fw, ok := w.ResponseWriter.(http.Flusher); ok {
fw.Flush()
}
@@ -251,44 +230,27 @@ func NewGzipLevelHandler(level int) (func(http.Handler) http.Handler, error) {
// NewGzipLevelAndMinSize behave as NewGzipLevelHandler except it let the caller
// specify the minimum size before compression.
func NewGzipLevelAndMinSize(level, minSize int) (func(http.Handler) http.Handler, error) {
- return GzipHandlerWithOpts(CompressionLevel(level), MinSize(minSize))
-}
-
-func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error) {
- c := &config{
- level: gzip.DefaultCompression,
- minSize: DefaultMinSize,
+ if level != gzip.DefaultCompression && (level < gzip.BestSpeed || level > gzip.BestCompression) {
+ return nil, fmt.Errorf("invalid compression level requested: %d", level)
}
-
- for _, o := range opts {
- o(c)
- }
-
- if err := c.validate(); err != nil {
- return nil, err
+ if minSize < 0 {
+ return nil, fmt.Errorf("minimum size must be more than zero")
}
-
return func(h http.Handler) http.Handler {
- index := poolIndex(c.level)
+ index := poolIndex(level)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add(vary, acceptEncoding)
+
if acceptsGzip(r) {
gw := &GzipResponseWriter{
ResponseWriter: w,
index: index,
- minSize: c.minSize,
- contentTypes: c.contentTypes,
+ minSize: minSize,
}
defer gw.Close()
- if _, ok := w.(http.CloseNotifier); ok {
- gwcn := GzipResponseWriterWithCloseNotify{gw}
- h.ServeHTTP(gwcn, r)
- } else {
- h.ServeHTTP(gw, r)
- }
-
+ h.ServeHTTP(gw, r)
} else {
h.ServeHTTP(w, r)
}
@@ -296,48 +258,6 @@ func GzipHandlerWithOpts(opts ...option) (func(http.Handler) http.Handler, error
}, nil
}
-// Used for functional configuration.
-type config struct {
- minSize int
- level int
- contentTypes []string
-}
-
-func (c *config) validate() error {
- if c.level != gzip.DefaultCompression && (c.level < gzip.BestSpeed || c.level > gzip.BestCompression) {
- return fmt.Errorf("invalid compression level requested: %d", c.level)
- }
-
- if c.minSize < 0 {
- return fmt.Errorf("minimum size must be more than zero")
- }
-
- return nil
-}
-
-type option func(c *config)
-
-func MinSize(size int) option {
- return func(c *config) {
- c.minSize = size
- }
-}
-
-func CompressionLevel(level int) option {
- return func(c *config) {
- c.level = level
- }
-}
-
-func ContentTypes(types []string) option {
- return func(c *config) {
- c.contentTypes = []string{}
- for _, v := range types {
- c.contentTypes = append(c.contentTypes, strings.ToLower(v))
- }
- }
-}
-
// GzipHandler wraps an HTTP handler, to transparently gzip the response body if
// the client supports it (via the Accept-Encoding header). This will compress at
// the default compression level.
@@ -353,23 +273,6 @@ func acceptsGzip(r *http.Request) bool {
return acceptedEncodings["gzip"] > 0.0
}
-// returns true if we've been configured to compress the specific content type.
-func handleContentType(contentTypes []string, w http.ResponseWriter) bool {
- // If contentTypes is empty we handle all content types.
- if len(contentTypes) == 0 {
- return true
- }
-
- ct := strings.ToLower(w.Header().Get(contentType))
- for _, c := range contentTypes {
- if c == ct {
- return true
- }
- }
-
- return false
-}
-
// parseEncodings attempts to parse a list of codings, per RFC 2616, as might
// appear in an Accept-Encoding header. It returns a map of content-codings to
// quality values, and an error containing the errors encountered. It's probably
diff --git a/vendor/github.com/Sirupsen/logrus/.gitignore b/vendor/github.com/Sirupsen/logrus/.gitignore
deleted file mode 100644
index 6b7d7d1e..00000000
--- a/vendor/github.com/Sirupsen/logrus/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-logrus
-vendor
diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml
deleted file mode 100644
index 1f953beb..00000000
--- a/vendor/github.com/Sirupsen/logrus/.travis.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-language: go
-env:
- - GOMAXPROCS=4 GORACE=halt_on_error=1
-matrix:
- include:
- - go: 1.10.x
- install:
- - go get github.com/stretchr/testify/assert
- - go get golang.org/x/crypto/ssh/terminal
- - go get golang.org/x/sys/unix
- - go get golang.org/x/sys/windows
- script:
- - go test -race -v ./...
- - go: 1.11.x
- env: GO111MODULE=on
- install:
- - go mod download
- script:
- - go test -race -v ./...
- - go: 1.11.x
- env: GO111MODULE=off
- install:
- - go get github.com/stretchr/testify/assert
- - go get golang.org/x/crypto/ssh/terminal
- - go get golang.org/x/sys/unix
- - go get golang.org/x/sys/windows
- script:
- - go test -race -v ./...
- - go: 1.10.x
- install:
- - go get github.com/stretchr/testify/assert
- - go get golang.org/x/crypto/ssh/terminal
- - go get golang.org/x/sys/unix
- - go get golang.org/x/sys/windows
- script:
- - go test -race -v -tags appengine ./...
- - go: 1.11.x
- env: GO111MODULE=on
- install:
- - go mod download
- script:
- - go test -race -v -tags appengine ./...
- - go: 1.11.x
- env: GO111MODULE=off
- install:
- - go get github.com/stretchr/testify/assert
- - go get golang.org/x/crypto/ssh/terminal
- - go get golang.org/x/sys/unix
- - go get golang.org/x/sys/windows
- script:
- - go test -race -v -tags appengine ./...
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
deleted file mode 100644
index cb85d9f9..00000000
--- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
+++ /dev/null
@@ -1,165 +0,0 @@
-# 1.2.0
-This new release introduces:
- * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued
- * A new trace level named `Trace` whose level is below `Debug`
- * A configurable exit function to be called upon a Fatal trace
- * The `Level` object now implements `encoding.TextUnmarshaler` interface
-
-# 1.1.1
-This is a bug fix release.
- * fix the build break on Solaris
- * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized
-
-# 1.1.0
-This new release introduces:
- * several fixes:
- * a fix for a race condition on entry formatting
- * proper cleanup of previously used entries before putting them back in the pool
- * the extra new line at the end of message in text formatter has been removed
- * a new global public API to check if a level is activated: IsLevelEnabled
- * the following methods have been added to the Logger object
- * IsLevelEnabled
- * SetFormatter
- * SetOutput
- * ReplaceHooks
- * introduction of go module
- * an indent configuration for the json formatter
- * output colour support for windows
- * the field sort function is now configurable for text formatter
- * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater
-
-# 1.0.6
-
-This new release introduces:
- * a new api WithTime which allows to easily force the time of the log entry
- which is mostly useful for logger wrapper
- * a fix reverting the immutability of the entry given as parameter to the hooks
- a new configuration field of the json formatter in order to put all the fields
- in a nested dictionnary
- * a new SetOutput method in the Logger
- * a new configuration of the textformatter to configure the name of the default keys
- * a new configuration of the text formatter to disable the level truncation
-
-# 1.0.5
-
-* Fix hooks race (#707)
-* Fix panic deadlock (#695)
-
-# 1.0.4
-
-* Fix race when adding hooks (#612)
-* Fix terminal check in AppEngine (#635)
-
-# 1.0.3
-
-* Replace example files with testable examples
-
-# 1.0.2
-
-* bug: quote non-string values in text formatter (#583)
-* Make (*Logger) SetLevel a public method
-
-# 1.0.1
-
-* bug: fix escaping in text formatter (#575)
-
-# 1.0.0
-
-* Officially changed name to lower-case
-* bug: colors on Windows 10 (#541)
-* bug: fix race in accessing level (#512)
-
-# 0.11.5
-
-* feature: add writer and writerlevel to entry (#372)
-
-# 0.11.4
-
-* bug: fix undefined variable on solaris (#493)
-
-# 0.11.3
-
-* formatter: configure quoting of empty values (#484)
-* formatter: configure quoting character (default is `"`) (#484)
-* bug: fix not importing io correctly in non-linux environments (#481)
-
-# 0.11.2
-
-* bug: fix windows terminal detection (#476)
-
-# 0.11.1
-
-* bug: fix tty detection with custom out (#471)
-
-# 0.11.0
-
-* performance: Use bufferpool to allocate (#370)
-* terminal: terminal detection for app-engine (#343)
-* feature: exit handler (#375)
-
-# 0.10.0
-
-* feature: Add a test hook (#180)
-* feature: `ParseLevel` is now case-insensitive (#326)
-* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
-* performance: avoid re-allocations on `WithFields` (#335)
-
-# 0.9.0
-
-* logrus/text_formatter: don't emit empty msg
-* logrus/hooks/airbrake: move out of main repository
-* logrus/hooks/sentry: move out of main repository
-* logrus/hooks/papertrail: move out of main repository
-* logrus/hooks/bugsnag: move out of main repository
-* logrus/core: run tests with `-race`
-* logrus/core: detect TTY based on `stderr`
-* logrus/core: support `WithError` on logger
-* logrus/core: Solaris support
-
-# 0.8.7
-
-* logrus/core: fix possible race (#216)
-* logrus/doc: small typo fixes and doc improvements
-
-
-# 0.8.6
-
-* hooks/raven: allow passing an initialized client
-
-# 0.8.5
-
-* logrus/core: revert #208
-
-# 0.8.4
-
-* formatter/text: fix data race (#218)
-
-# 0.8.3
-
-* logrus/core: fix entry log level (#208)
-* logrus/core: improve performance of text formatter by 40%
-* logrus/core: expose `LevelHooks` type
-* logrus/core: add support for DragonflyBSD and NetBSD
-* formatter/text: print structs more verbosely
-
-# 0.8.2
-
-* logrus: fix more Fatal family functions
-
-# 0.8.1
-
-* logrus: fix not exiting on `Fatalf` and `Fatalln`
-
-# 0.8.0
-
-* logrus: defaults to stderr instead of stdout
-* hooks/sentry: add special field for `*http.Request`
-* formatter/text: ignore Windows for colors
-
-# 0.7.3
-
-* formatter/\*: allow configuration of timestamp layout
-
-# 0.7.2
-
-* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE
deleted file mode 100644
index f090cb42..00000000
--- a/vendor/github.com/Sirupsen/logrus/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Simon Eskildsen
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
deleted file mode 100644
index 093bb13f..00000000
--- a/vendor/github.com/Sirupsen/logrus/README.md
+++ /dev/null
@@ -1,493 +0,0 @@
-# Logrus
[](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
-
-Logrus is a structured logger for Go (golang), completely API compatible with
-the standard library logger.
-
-**Seeing weird case-sensitive problems?** It's in the past been possible to
-import Logrus as both upper- and lower-case. Due to the Go package environment,
-this caused issues in the community and we needed a standard. Some environments
-experienced problems with the upper-case variant, so the lower-case was decided.
-Everything using `logrus` will need to use the lower-case:
-`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
-
-To fix Glide, see [these
-comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
-For an in-depth explanation of the casing issue, see [this
-comment](https://github.com/sirupsen/logrus/issues/570#issuecomment-313933276).
-
-**Are you interested in assisting in maintaining Logrus?** Currently I have a
-lot of obligations, and I am unable to provide Logrus with the maintainership it
-needs. If you'd like to help, please reach out to me at `simon at author's
-username dot com`.
-
-Nicely color-coded in development (when a TTY is attached, otherwise just
-plain text):
-
-
-
-With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
-or Splunk:
-
-```json
-{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
-ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
-
-{"level":"warning","msg":"The group's number increased tremendously!",
-"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
-"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
-
-{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
-"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
-
-{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
-"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
-```
-
-With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
-attached, the output is compatible with the
-[logfmt](http://godoc.org/github.com/kr/logfmt) format:
-
-```text
-time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
-time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
-time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
-time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
-time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
-time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
-```
-To ensure this behaviour even if a TTY is attached, set your formatter as follows:
-
-```go
- log.SetFormatter(&log.TextFormatter{
- DisableColors: true,
- FullTimestamp: true,
- })
-```
-
-#### Logging Method Name
-
-If you wish to add the calling method as a field, instruct the logger via:
-```go
-log.SetReportCaller(true)
-```
-This adds the caller as 'method' like so:
-
-```json
-{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by",
-"time":"2014-03-10 19:57:38.562543129 -0400 EDT"}
-```
-
-```text
-time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin
-```
-Note that this does add measurable overhead - the cost will depend on the version of Go, but is
-between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your
-environment via benchmarks:
-```
-go test -bench=.*CallerTracing
-```
-
-
-#### Case-sensitivity
-
-The organization's name was changed to lower-case--and this will not be changed
-back. If you are getting import conflicts due to case sensitivity, please use
-the lower-case import: `github.com/sirupsen/logrus`.
-
-#### Example
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
-```go
-package main
-
-import (
- log "github.com/sirupsen/logrus"
-)
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- }).Info("A walrus appears")
-}
-```
-
-Note that it's completely api-compatible with the stdlib logger, so you can
-replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
-and you'll now have the flexibility of Logrus. You can customize it all you
-want:
-
-```go
-package main
-
-import (
- "os"
- log "github.com/sirupsen/logrus"
-)
-
-func init() {
- // Log as JSON instead of the default ASCII formatter.
- log.SetFormatter(&log.JSONFormatter{})
-
- // Output to stdout instead of the default stderr
- // Can be any io.Writer, see below for File example
- log.SetOutput(os.Stdout)
-
- // Only log the warning severity or above.
- log.SetLevel(log.WarnLevel)
-}
-
-func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 122,
- }).Warn("The group's number increased tremendously!")
-
- log.WithFields(log.Fields{
- "omg": true,
- "number": 100,
- }).Fatal("The ice breaks!")
-
- // A common pattern is to re-use fields between logging statements by re-using
- // the logrus.Entry returned from WithFields()
- contextLogger := log.WithFields(log.Fields{
- "common": "this is a common field",
- "other": "I also should be logged always",
- })
-
- contextLogger.Info("I'll be logged with common and other field")
- contextLogger.Info("Me too")
-}
-```
-
-For more advanced usage such as logging to multiple locations from the same
-application, you can also create an instance of the `logrus` Logger:
-
-```go
-package main
-
-import (
- "os"
- "github.com/sirupsen/logrus"
-)
-
-// Create a new instance of the logger. You can have any number of instances.
-var log = logrus.New()
-
-func main() {
- // The API for setting attributes is a little different than the package level
- // exported logger. See Godoc.
- log.Out = os.Stdout
-
- // You could set this to any `io.Writer` such as a file
- // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
- // if err == nil {
- // log.Out = file
- // } else {
- // log.Info("Failed to log to file, using default stderr")
- // }
-
- log.WithFields(logrus.Fields{
- "animal": "walrus",
- "size": 10,
- }).Info("A group of walrus emerges from the ocean")
-}
-```
-
-#### Fields
-
-Logrus encourages careful, structured logging through logging fields instead of
-long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
-to send event %s to topic %s with key %d")`, you should log the much more
-discoverable:
-
-```go
-log.WithFields(log.Fields{
- "event": event,
- "topic": topic,
- "key": key,
-}).Fatal("Failed to send event")
-```
-
-We've found this API forces you to think about logging in a way that produces
-much more useful logging messages. We've been in countless situations where just
-a single added field to a log statement that was already there would've saved us
-hours. The `WithFields` call is optional.
-
-In general, with Logrus using any of the `printf`-family functions should be
-seen as a hint you should add a field, however, you can still use the
-`printf`-family functions with Logrus.
-
-#### Default Fields
-
-Often it's helpful to have fields _always_ attached to log statements in an
-application or parts of one. For example, you may want to always log the
-`request_id` and `user_ip` in the context of a request. Instead of writing
-`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
-every line, you can create a `logrus.Entry` to pass around instead:
-
-```go
-requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
-requestLogger.Info("something happened on that request") # will log request_id and user_ip
-requestLogger.Warn("something not great happened")
-```
-
-#### Hooks
-
-You can add hooks for logging levels. For example to send errors to an exception
-tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
-multiple places simultaneously, e.g. syslog.
-
-Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
-`init`:
-
-```go
-import (
- log "github.com/sirupsen/logrus"
- "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake"
- logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
- "log/syslog"
-)
-
-func init() {
-
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(airbrake.NewHook(123, "xyz", "production"))
-
- hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
- if err != nil {
- log.Error("Unable to connect to local syslog daemon")
- } else {
- log.AddHook(hook)
- }
-}
-```
-Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-
-A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks)
-
-
-#### Level logging
-
-Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic.
-
-```go
-log.Trace("Something very low level.")
-log.Debug("Useful debugging information.")
-log.Info("Something noteworthy happened!")
-log.Warn("You should probably take a look at this.")
-log.Error("Something failed but I'm not quitting.")
-// Calls os.Exit(1) after logging
-log.Fatal("Bye.")
-// Calls panic() after logging
-log.Panic("I'm bailing.")
-```
-
-You can set the logging level on a `Logger`, then it will only log entries with
-that severity or anything above it:
-
-```go
-// Will log anything that is info or above (warn, error, fatal, panic). Default.
-log.SetLevel(log.InfoLevel)
-```
-
-It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
-environment if your application has that.
-
-#### Entries
-
-Besides the fields added with `WithField` or `WithFields` some fields are
-automatically added to all logging events:
-
-1. `time`. The timestamp when the entry was created.
-2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
- the `AddFields` call. E.g. `Failed to send event.`
-3. `level`. The logging level. E.g. `info`.
-
-#### Environments
-
-Logrus has no notion of environment.
-
-If you wish for hooks and formatters to only be used in specific environments,
-you should handle that yourself. For example, if your application has a global
-variable `Environment`, which is a string representation of the environment you
-could do:
-
-```go
-import (
- log "github.com/sirupsen/logrus"
-)
-
-init() {
- // do something here to set environment depending on an environment variable
- // or command-line flag
- if Environment == "production" {
- log.SetFormatter(&log.JSONFormatter{})
- } else {
- // The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(&log.TextFormatter{})
- }
-}
-```
-
-This configuration is how `logrus` was intended to be used, but JSON in
-production is mostly only useful if you do log aggregation with tools like
-Splunk or Logstash.
-
-#### Formatters
-
-The built-in logging formatters are:
-
-* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
- without colors.
- * *Note:* to force colored output when there is no TTY, set the `ForceColors`
- field to `true`. To force no colored output even if there is a TTY set the
- `DisableColors` field to `true`. For Windows, see
- [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
- * When colors are enabled, levels are truncated to 4 characters by default. To disable
- truncation set the `DisableLevelTruncation` field to `true`.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
-* `logrus.JSONFormatter`. Logs fields as JSON.
- * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
-
-Third party logging formatters:
-
-* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine.
-* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
-* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
-* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
-
-You can define your formatter by implementing the `Formatter` interface,
-requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
-`Fields` type (`map[string]interface{}`) with all your fields as well as the
-default ones (see Entries section above):
-
-```go
-type MyJSONFormatter struct {
-}
-
-log.SetFormatter(new(MyJSONFormatter))
-
-func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
- // Note this doesn't include Time, Level and Message which are available on
- // the Entry. Consult `godoc` on information about those fields or read the
- // source of the official loggers.
- serialized, err := json.Marshal(entry.Data)
- if err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
- }
- return append(serialized, '\n'), nil
-}
-```
-
-#### Logger as an `io.Writer`
-
-Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
-
-```go
-w := logger.Writer()
-defer w.Close()
-
-srv := http.Server{
- // create a stdlib log.Logger that writes to
- // logrus.Logger.
- ErrorLog: log.New(w, "", 0),
-}
-```
-
-Each line written to that writer will be printed the usual way, using formatters
-and hooks. The level for those entries is `info`.
-
-This means that we can override the standard library logger easily:
-
-```go
-logger := logrus.New()
-logger.Formatter = &logrus.JSONFormatter{}
-
-// Use logrus for standard log output
-// Note that `log` here references stdlib's log
-// Not logrus imported under the name `log`.
-log.SetOutput(logger.Writer())
-```
-
-#### Rotation
-
-Log rotation is not provided with Logrus. Log rotation should be done by an
-external program (like `logrotate(8)`) that can compress and delete old log
-entries. It should not be a feature of the application-level logger.
-
-#### Tools
-
-| Tool | Description |
-| ---- | ----------- |
-|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
-|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
-
-#### Testing
-
-Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
-
-* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
-* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
-
-```go
-import(
- "github.com/sirupsen/logrus"
- "github.com/sirupsen/logrus/hooks/test"
- "github.com/stretchr/testify/assert"
- "testing"
-)
-
-func TestSomething(t*testing.T){
- logger, hook := test.NewNullLogger()
- logger.Error("Helloerror")
-
- assert.Equal(t, 1, len(hook.Entries))
- assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
- assert.Equal(t, "Helloerror", hook.LastEntry().Message)
-
- hook.Reset()
- assert.Nil(t, hook.LastEntry())
-}
-```
-
-#### Fatal handlers
-
-Logrus can register one or more functions that will be called when any `fatal`
-level message is logged. The registered handlers will be executed before
-logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
-to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
-
-```
-...
-handler := func() {
- // gracefully shutdown something...
-}
-logrus.RegisterExitHandler(handler)
-...
-```
-
-#### Thread safety
-
-By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs.
-If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
-
-Situation when locking is not needed includes:
-
-* You have no hooks registered, or hooks calling is already thread-safe.
-
-* Writing to logger.Out is already thread-safe, for example:
-
- 1) logger.Out is protected by locks.
-
- 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
-
- (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
deleted file mode 100644
index 8af90637..00000000
--- a/vendor/github.com/Sirupsen/logrus/alt_exit.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package logrus
-
-// The following code was sourced and modified from the
-// https://github.com/tebeka/atexit package governed by the following license:
-//
-// Copyright (c) 2012 Miki Tebeka .
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy of
-// this software and associated documentation files (the "Software"), to deal in
-// the Software without restriction, including without limitation the rights to
-// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
-// the Software, and to permit persons to whom the Software is furnished to do so,
-// subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in all
-// copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
-// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
-// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
-// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
-// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-import (
- "fmt"
- "os"
-)
-
-var handlers = []func(){}
-
-func runHandler(handler func()) {
- defer func() {
- if err := recover(); err != nil {
- fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
- }
- }()
-
- handler()
-}
-
-func runHandlers() {
- for _, handler := range handlers {
- runHandler(handler)
- }
-}
-
-// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
-func Exit(code int) {
- runHandlers()
- os.Exit(code)
-}
-
-// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
-// all handlers. The handlers will also be invoked when any Fatal log entry is
-// made.
-//
-// This method is useful when a caller wishes to use logrus to log a fatal
-// message but also needs to gracefully shutdown. An example usecase could be
-// closing database connections, or sending a alert that the application is
-// closing.
-func RegisterExitHandler(handler func()) {
- handlers = append(handlers, handler)
-}
diff --git a/vendor/github.com/Sirupsen/logrus/appveyor.yml b/vendor/github.com/Sirupsen/logrus/appveyor.yml
deleted file mode 100644
index 96c2ce15..00000000
--- a/vendor/github.com/Sirupsen/logrus/appveyor.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-version: "{build}"
-platform: x64
-clone_folder: c:\gopath\src\github.com\sirupsen\logrus
-environment:
- GOPATH: c:\gopath
-branches:
- only:
- - master
-install:
- - set PATH=%GOPATH%\bin;c:\go\bin;%PATH%
- - go version
-build_script:
- - go get -t
- - go test
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
deleted file mode 100644
index da67aba0..00000000
--- a/vendor/github.com/Sirupsen/logrus/doc.go
+++ /dev/null
@@ -1,26 +0,0 @@
-/*
-Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
-
-
-The simplest way to use Logrus is simply the package-level exported logger:
-
- package main
-
- import (
- log "github.com/sirupsen/logrus"
- )
-
- func main() {
- log.WithFields(log.Fields{
- "animal": "walrus",
- "number": 1,
- "size": 10,
- }).Info("A walrus appears")
- }
-
-Output:
- time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
-
-For a full guide visit https://github.com/sirupsen/logrus
-*/
-package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
deleted file mode 100644
index cc85d3aa..00000000
--- a/vendor/github.com/Sirupsen/logrus/entry.go
+++ /dev/null
@@ -1,408 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "fmt"
- "os"
- "reflect"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-var (
- bufferPool *sync.Pool
-
- // qualified package name, cached at first use
- logrusPackage string
-
- // Positions in the call stack when tracing to report the calling method
- minimumCallerDepth int
-
- // Used for caller information initialisation
- callerInitOnce sync.Once
-)
-
-const (
- maximumCallerDepth int = 25
- knownLogrusFrames int = 4
-)
-
-func init() {
- bufferPool = &sync.Pool{
- New: func() interface{} {
- return new(bytes.Buffer)
- },
- }
-
- // start at the bottom of the stack before the package-name cache is primed
- minimumCallerDepth = 1
-}
-
-// Defines the key when adding errors using WithError.
-var ErrorKey = "error"
-
-// An entry is the final or intermediate Logrus logging entry. It contains all
-// the fields passed with WithField{,s}. It's finally logged when Trace, Debug,
-// Info, Warn, Error, Fatal or Panic is called on it. These objects can be
-// reused and passed around as much as you wish to avoid field duplication.
-type Entry struct {
- Logger *Logger
-
- // Contains all the fields set by the user.
- Data Fields
-
- // Time at which the log entry was created
- Time time.Time
-
- // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic
- // This field will be set on entry firing and the value will be equal to the one in Logger struct field.
- Level Level
-
- // Calling method, with package name
- Caller *runtime.Frame
-
- // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic
- Message string
-
- // When formatter is called in entry.log(), a Buffer may be set to entry
- Buffer *bytes.Buffer
-
- // err may contain a field formatting error
- err string
-}
-
-func NewEntry(logger *Logger) *Entry {
- return &Entry{
- Logger: logger,
- // Default is three fields, plus one optional. Give a little extra room.
- Data: make(Fields, 6),
- }
-}
-
-// Returns the string representation from the reader and ultimately the
-// formatter.
-func (entry *Entry) String() (string, error) {
- serialized, err := entry.Logger.Formatter.Format(entry)
- if err != nil {
- return "", err
- }
- str := string(serialized)
- return str, nil
-}
-
-// Add an error as single field (using the key defined in ErrorKey) to the Entry.
-func (entry *Entry) WithError(err error) *Entry {
- return entry.WithField(ErrorKey, err)
-}
-
-// Add a single field to the Entry.
-func (entry *Entry) WithField(key string, value interface{}) *Entry {
- return entry.WithFields(Fields{key: value})
-}
-
-// Add a map of fields to the Entry.
-func (entry *Entry) WithFields(fields Fields) *Entry {
- data := make(Fields, len(entry.Data)+len(fields))
- for k, v := range entry.Data {
- data[k] = v
- }
- var field_err string
- for k, v := range fields {
- if t := reflect.TypeOf(v); t != nil && t.Kind() == reflect.Func {
- field_err = fmt.Sprintf("can not add field %q", k)
- if entry.err != "" {
- field_err = entry.err + ", " + field_err
- }
- } else {
- data[k] = v
- }
- }
- return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: field_err}
-}
-
-// Overrides the time of the Entry.
-func (entry *Entry) WithTime(t time.Time) *Entry {
- return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t}
-}
-
-// getPackageName reduces a fully qualified function name to the package name
-// There really ought to be to be a better way...
-func getPackageName(f string) string {
- for {
- lastPeriod := strings.LastIndex(f, ".")
- lastSlash := strings.LastIndex(f, "/")
- if lastPeriod > lastSlash {
- f = f[:lastPeriod]
- } else {
- break
- }
- }
-
- return f
-}
-
-// getCaller retrieves the name of the first non-logrus calling function
-func getCaller() *runtime.Frame {
- // Restrict the lookback frames to avoid runaway lookups
- pcs := make([]uintptr, maximumCallerDepth)
- depth := runtime.Callers(minimumCallerDepth, pcs)
- frames := runtime.CallersFrames(pcs[:depth])
-
- // cache this package's fully-qualified name
- callerInitOnce.Do(func() {
- logrusPackage = getPackageName(runtime.FuncForPC(pcs[0]).Name())
-
- // now that we have the cache, we can skip a minimum count of known-logrus functions
- // XXX this is dubious, the number of frames may vary store an entry in a logger interface
- minimumCallerDepth = knownLogrusFrames
- })
-
- for f, again := frames.Next(); again; f, again = frames.Next() {
- pkg := getPackageName(f.Function)
-
- // If the caller isn't part of this package, we're done
- if pkg != logrusPackage {
- return &f
- }
- }
-
- // if we got here, we failed to find the caller's context
- return nil
-}
-
-func (entry Entry) HasCaller() (has bool) {
- return entry.Logger != nil &&
- entry.Logger.ReportCaller &&
- entry.Caller != nil
-}
-
-// This function is not declared with a pointer value because otherwise
-// race conditions will occur when using multiple goroutines
-func (entry Entry) log(level Level, msg string) {
- var buffer *bytes.Buffer
-
- // Default to now, but allow users to override if they want.
- //
- // We don't have to worry about polluting future calls to Entry#log()
- // with this assignment because this function is declared with a
- // non-pointer receiver.
- if entry.Time.IsZero() {
- entry.Time = time.Now()
- }
-
- entry.Level = level
- entry.Message = msg
- if entry.Logger.ReportCaller {
- entry.Caller = getCaller()
- }
-
- entry.fireHooks()
-
- buffer = bufferPool.Get().(*bytes.Buffer)
- buffer.Reset()
- defer bufferPool.Put(buffer)
- entry.Buffer = buffer
-
- entry.write()
-
- entry.Buffer = nil
-
- // To avoid Entry#log() returning a value that only would make sense for
- // panic() to use in Entry#Panic(), we avoid the allocation by checking
- // directly here.
- if level <= PanicLevel {
- panic(&entry)
- }
-}
-
-func (entry *Entry) fireHooks() {
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
- err := entry.Logger.Hooks.Fire(entry.Level, entry)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
- }
-}
-
-func (entry *Entry) write() {
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
- serialized, err := entry.Logger.Formatter.Format(entry)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
- } else {
- _, err = entry.Logger.Out.Write(serialized)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
- }
- }
-}
-
-func (entry *Entry) Trace(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(TraceLevel) {
- entry.log(TraceLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Debug(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(DebugLevel) {
- entry.log(DebugLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Print(args ...interface{}) {
- entry.Info(args...)
-}
-
-func (entry *Entry) Info(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(InfoLevel) {
- entry.log(InfoLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Warn(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(WarnLevel) {
- entry.log(WarnLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Warning(args ...interface{}) {
- entry.Warn(args...)
-}
-
-func (entry *Entry) Error(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(ErrorLevel) {
- entry.log(ErrorLevel, fmt.Sprint(args...))
- }
-}
-
-func (entry *Entry) Fatal(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(FatalLevel) {
- entry.log(FatalLevel, fmt.Sprint(args...))
- }
- entry.Logger.Exit(1)
-}
-
-func (entry *Entry) Panic(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(PanicLevel) {
- entry.log(PanicLevel, fmt.Sprint(args...))
- }
- panic(fmt.Sprint(args...))
-}
-
-// Entry Printf family functions
-
-func (entry *Entry) Tracef(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(TraceLevel) {
- entry.Trace(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Debugf(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(DebugLevel) {
- entry.Debug(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Infof(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(InfoLevel) {
- entry.Info(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Printf(format string, args ...interface{}) {
- entry.Infof(format, args...)
-}
-
-func (entry *Entry) Warnf(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(WarnLevel) {
- entry.Warn(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Warningf(format string, args ...interface{}) {
- entry.Warnf(format, args...)
-}
-
-func (entry *Entry) Errorf(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(ErrorLevel) {
- entry.Error(fmt.Sprintf(format, args...))
- }
-}
-
-func (entry *Entry) Fatalf(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(FatalLevel) {
- entry.Fatal(fmt.Sprintf(format, args...))
- }
- entry.Logger.Exit(1)
-}
-
-func (entry *Entry) Panicf(format string, args ...interface{}) {
- if entry.Logger.IsLevelEnabled(PanicLevel) {
- entry.Panic(fmt.Sprintf(format, args...))
- }
-}
-
-// Entry Println family functions
-
-func (entry *Entry) Traceln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(TraceLevel) {
- entry.Trace(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Debugln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(DebugLevel) {
- entry.Debug(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Infoln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(InfoLevel) {
- entry.Info(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Println(args ...interface{}) {
- entry.Infoln(args...)
-}
-
-func (entry *Entry) Warnln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(WarnLevel) {
- entry.Warn(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Warningln(args ...interface{}) {
- entry.Warnln(args...)
-}
-
-func (entry *Entry) Errorln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(ErrorLevel) {
- entry.Error(entry.sprintlnn(args...))
- }
-}
-
-func (entry *Entry) Fatalln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(FatalLevel) {
- entry.Fatal(entry.sprintlnn(args...))
- }
- entry.Logger.Exit(1)
-}
-
-func (entry *Entry) Panicln(args ...interface{}) {
- if entry.Logger.IsLevelEnabled(PanicLevel) {
- entry.Panic(entry.sprintlnn(args...))
- }
-}
-
-// Sprintlnn => Sprint no newline. This is to get the behavior of how
-// fmt.Sprintln where spaces are always added between operands, regardless of
-// their type. Instead of vendoring the Sprintln implementation to spare a
-// string allocation, we do the simplest thing.
-func (entry *Entry) sprintlnn(args ...interface{}) string {
- msg := fmt.Sprintln(args...)
- return msg[:len(msg)-1]
-}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
deleted file mode 100644
index 7342613c..00000000
--- a/vendor/github.com/Sirupsen/logrus/exported.go
+++ /dev/null
@@ -1,219 +0,0 @@
-package logrus
-
-import (
- "io"
- "time"
-)
-
-var (
- // std is the name of the standard logger in stdlib `log`
- std = New()
-)
-
-func StandardLogger() *Logger {
- return std
-}
-
-// SetOutput sets the standard logger output.
-func SetOutput(out io.Writer) {
- std.SetOutput(out)
-}
-
-// SetFormatter sets the standard logger formatter.
-func SetFormatter(formatter Formatter) {
- std.SetFormatter(formatter)
-}
-
-// SetReportCaller sets whether the standard logger will include the calling
-// method as a field.
-func SetReportCaller(include bool) {
- std.SetReportCaller(include)
-}
-
-// SetLevel sets the standard logger level.
-func SetLevel(level Level) {
- std.SetLevel(level)
-}
-
-// GetLevel returns the standard logger level.
-func GetLevel() Level {
- return std.GetLevel()
-}
-
-// IsLevelEnabled checks if the log level of the standard logger is greater than the level param
-func IsLevelEnabled(level Level) bool {
- return std.IsLevelEnabled(level)
-}
-
-// AddHook adds a hook to the standard logger hooks.
-func AddHook(hook Hook) {
- std.AddHook(hook)
-}
-
-// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
-func WithError(err error) *Entry {
- return std.WithField(ErrorKey, err)
-}
-
-// WithField creates an entry from the standard logger and adds a field to
-// it. If you want multiple fields, use `WithFields`.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithField(key string, value interface{}) *Entry {
- return std.WithField(key, value)
-}
-
-// WithFields creates an entry from the standard logger and adds multiple
-// fields to it. This is simply a helper for `WithField`, invoking it
-// once for each field.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithFields(fields Fields) *Entry {
- return std.WithFields(fields)
-}
-
-// WithTime creats an entry from the standard logger and overrides the time of
-// logs generated with it.
-//
-// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
-// or Panic on the Entry it returns.
-func WithTime(t time.Time) *Entry {
- return std.WithTime(t)
-}
-
-// Trace logs a message at level Trace on the standard logger.
-func Trace(args ...interface{}) {
- std.Trace(args...)
-}
-
-// Debug logs a message at level Debug on the standard logger.
-func Debug(args ...interface{}) {
- std.Debug(args...)
-}
-
-// Print logs a message at level Info on the standard logger.
-func Print(args ...interface{}) {
- std.Print(args...)
-}
-
-// Info logs a message at level Info on the standard logger.
-func Info(args ...interface{}) {
- std.Info(args...)
-}
-
-// Warn logs a message at level Warn on the standard logger.
-func Warn(args ...interface{}) {
- std.Warn(args...)
-}
-
-// Warning logs a message at level Warn on the standard logger.
-func Warning(args ...interface{}) {
- std.Warning(args...)
-}
-
-// Error logs a message at level Error on the standard logger.
-func Error(args ...interface{}) {
- std.Error(args...)
-}
-
-// Panic logs a message at level Panic on the standard logger.
-func Panic(args ...interface{}) {
- std.Panic(args...)
-}
-
-// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
-func Fatal(args ...interface{}) {
- std.Fatal(args...)
-}
-
-// Tracef logs a message at level Trace on the standard logger.
-func Tracef(format string, args ...interface{}) {
- std.Tracef(format, args...)
-}
-
-// Debugf logs a message at level Debug on the standard logger.
-func Debugf(format string, args ...interface{}) {
- std.Debugf(format, args...)
-}
-
-// Printf logs a message at level Info on the standard logger.
-func Printf(format string, args ...interface{}) {
- std.Printf(format, args...)
-}
-
-// Infof logs a message at level Info on the standard logger.
-func Infof(format string, args ...interface{}) {
- std.Infof(format, args...)
-}
-
-// Warnf logs a message at level Warn on the standard logger.
-func Warnf(format string, args ...interface{}) {
- std.Warnf(format, args...)
-}
-
-// Warningf logs a message at level Warn on the standard logger.
-func Warningf(format string, args ...interface{}) {
- std.Warningf(format, args...)
-}
-
-// Errorf logs a message at level Error on the standard logger.
-func Errorf(format string, args ...interface{}) {
- std.Errorf(format, args...)
-}
-
-// Panicf logs a message at level Panic on the standard logger.
-func Panicf(format string, args ...interface{}) {
- std.Panicf(format, args...)
-}
-
-// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
-func Fatalf(format string, args ...interface{}) {
- std.Fatalf(format, args...)
-}
-
-// Traceln logs a message at level Trace on the standard logger.
-func Traceln(args ...interface{}) {
- std.Traceln(args...)
-}
-
-// Debugln logs a message at level Debug on the standard logger.
-func Debugln(args ...interface{}) {
- std.Debugln(args...)
-}
-
-// Println logs a message at level Info on the standard logger.
-func Println(args ...interface{}) {
- std.Println(args...)
-}
-
-// Infoln logs a message at level Info on the standard logger.
-func Infoln(args ...interface{}) {
- std.Infoln(args...)
-}
-
-// Warnln logs a message at level Warn on the standard logger.
-func Warnln(args ...interface{}) {
- std.Warnln(args...)
-}
-
-// Warningln logs a message at level Warn on the standard logger.
-func Warningln(args ...interface{}) {
- std.Warningln(args...)
-}
-
-// Errorln logs a message at level Error on the standard logger.
-func Errorln(args ...interface{}) {
- std.Errorln(args...)
-}
-
-// Panicln logs a message at level Panic on the standard logger.
-func Panicln(args ...interface{}) {
- std.Panicln(args...)
-}
-
-// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1.
-func Fatalln(args ...interface{}) {
- std.Fatalln(args...)
-}
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
deleted file mode 100644
index 40888377..00000000
--- a/vendor/github.com/Sirupsen/logrus/formatter.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package logrus
-
-import "time"
-
-// Default key names for the default fields
-const (
- defaultTimestampFormat = time.RFC3339
- FieldKeyMsg = "msg"
- FieldKeyLevel = "level"
- FieldKeyTime = "time"
- FieldKeyLogrusError = "logrus_error"
- FieldKeyFunc = "func"
- FieldKeyFile = "file"
-)
-
-// The Formatter interface is used to implement a custom Formatter. It takes an
-// `Entry`. It exposes all the fields, including the default ones:
-//
-// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
-// * `entry.Data["time"]`. The timestamp.
-// * `entry.Data["level"]. The level the entry was logged at.
-//
-// Any additional fields added with `WithField` or `WithFields` are also in
-// `entry.Data`. Format is expected to return an array of bytes which are then
-// logged to `logger.Out`.
-type Formatter interface {
- Format(*Entry) ([]byte, error)
-}
-
-// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when
-// dumping it. If this code wasn't there doing:
-//
-// logrus.WithField("level", 1).Info("hello")
-//
-// Would just silently drop the user provided level. Instead with this code
-// it'll logged as:
-//
-// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
-//
-// It's not exported because it's still using Data in an opinionated way. It's to
-// avoid code duplication between the two default formatters.
-func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) {
- timeKey := fieldMap.resolve(FieldKeyTime)
- if t, ok := data[timeKey]; ok {
- data["fields."+timeKey] = t
- delete(data, timeKey)
- }
-
- msgKey := fieldMap.resolve(FieldKeyMsg)
- if m, ok := data[msgKey]; ok {
- data["fields."+msgKey] = m
- delete(data, msgKey)
- }
-
- levelKey := fieldMap.resolve(FieldKeyLevel)
- if l, ok := data[levelKey]; ok {
- data["fields."+levelKey] = l
- delete(data, levelKey)
- }
-
- logrusErrKey := fieldMap.resolve(FieldKeyLogrusError)
- if l, ok := data[logrusErrKey]; ok {
- data["fields."+logrusErrKey] = l
- delete(data, logrusErrKey)
- }
-
- // If reportCaller is not set, 'func' will not conflict.
- if reportCaller {
- funcKey := fieldMap.resolve(FieldKeyFunc)
- if l, ok := data[funcKey]; ok {
- data["fields."+funcKey] = l
- }
- fileKey := fieldMap.resolve(FieldKeyFile)
- if l, ok := data[fileKey]; ok {
- data["fields."+fileKey] = l
- }
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/go.mod b/vendor/github.com/Sirupsen/logrus/go.mod
deleted file mode 100644
index 94574cc6..00000000
--- a/vendor/github.com/Sirupsen/logrus/go.mod
+++ /dev/null
@@ -1,11 +0,0 @@
-module github.com/sirupsen/logrus
-
-require (
- github.com/davecgh/go-spew v1.1.1 // indirect
- github.com/konsorten/go-windows-terminal-sequences v1.0.1
- github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/stretchr/objx v0.1.1 // indirect
- github.com/stretchr/testify v1.2.2
- golang.org/x/crypto v0.0.0-20180904163835-0709b304e793
- golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33
-)
diff --git a/vendor/github.com/Sirupsen/logrus/go.sum b/vendor/github.com/Sirupsen/logrus/go.sum
deleted file mode 100644
index 133d34ae..00000000
--- a/vendor/github.com/Sirupsen/logrus/go.sum
+++ /dev/null
@@ -1,15 +0,0 @@
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs=
-github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
-github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w=
-github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793 h1:u+LnwYTOOW7Ukr/fppxEb1Nwz0AtPflrblfvUudpo+I=
-golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8=
-golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
deleted file mode 100644
index 3f151cdc..00000000
--- a/vendor/github.com/Sirupsen/logrus/hooks.go
+++ /dev/null
@@ -1,34 +0,0 @@
-package logrus
-
-// A hook to be fired when logging on the logging levels returned from
-// `Levels()` on your implementation of the interface. Note that this is not
-// fired in a goroutine or a channel with workers, you should handle such
-// functionality yourself if your call is non-blocking and you don't wish for
-// the logging calls for levels returned from `Levels()` to block.
-type Hook interface {
- Levels() []Level
- Fire(*Entry) error
-}
-
-// Internal type for storing the hooks on a logger instance.
-type LevelHooks map[Level][]Hook
-
-// Add a hook to an instance of logger. This is called with
-// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
-func (hooks LevelHooks) Add(hook Hook) {
- for _, level := range hook.Levels() {
- hooks[level] = append(hooks[level], hook)
- }
-}
-
-// Fire all the hooks for the passed level. Used by `entry.log` to fire
-// appropriate hooks for a log entry.
-func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
- for _, hook := range hooks[level] {
- if err := hook.Fire(entry); err != nil {
- return err
- }
- }
-
- return nil
-}
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
deleted file mode 100644
index 26057535..00000000
--- a/vendor/github.com/Sirupsen/logrus/json_formatter.go
+++ /dev/null
@@ -1,105 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
-)
-
-type fieldKey string
-
-// FieldMap allows customization of the key names for default fields.
-type FieldMap map[fieldKey]string
-
-func (f FieldMap) resolve(key fieldKey) string {
- if k, ok := f[key]; ok {
- return k
- }
-
- return string(key)
-}
-
-// JSONFormatter formats logs into parsable json
-type JSONFormatter struct {
- // TimestampFormat sets the format used for marshaling timestamps.
- TimestampFormat string
-
- // DisableTimestamp allows disabling automatic timestamps in output
- DisableTimestamp bool
-
- // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key.
- DataKey string
-
- // FieldMap allows users to customize the names of keys for default fields.
- // As an example:
- // formatter := &JSONFormatter{
- // FieldMap: FieldMap{
- // FieldKeyTime: "@timestamp",
- // FieldKeyLevel: "@level",
- // FieldKeyMsg: "@message",
- // FieldKeyFunc: "@caller",
- // },
- // }
- FieldMap FieldMap
-
- // PrettyPrint will indent all json logs
- PrettyPrint bool
-}
-
-// Format renders a single log entry
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
- data := make(Fields, len(entry.Data)+4)
- for k, v := range entry.Data {
- switch v := v.(type) {
- case error:
- // Otherwise errors are ignored by `encoding/json`
- // https://github.com/sirupsen/logrus/issues/137
- data[k] = v.Error()
- default:
- data[k] = v
- }
- }
-
- if f.DataKey != "" {
- newData := make(Fields, 4)
- newData[f.DataKey] = data
- data = newData
- }
-
- prefixFieldClashes(data, f.FieldMap, entry.HasCaller())
-
- timestampFormat := f.TimestampFormat
- if timestampFormat == "" {
- timestampFormat = defaultTimestampFormat
- }
-
- if entry.err != "" {
- data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err
- }
- if !f.DisableTimestamp {
- data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
- }
- data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
- data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
- if entry.HasCaller() {
- data[f.FieldMap.resolve(FieldKeyFunc)] = entry.Caller.Function
- data[f.FieldMap.resolve(FieldKeyFile)] = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
- }
-
- var b *bytes.Buffer
- if entry.Buffer != nil {
- b = entry.Buffer
- } else {
- b = &bytes.Buffer{}
- }
-
- encoder := json.NewEncoder(b)
- if f.PrettyPrint {
- encoder.SetIndent("", " ")
- }
- if err := encoder.Encode(data); err != nil {
- return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
- }
-
- return b.Bytes(), nil
-}
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
deleted file mode 100644
index 5ceca0ea..00000000
--- a/vendor/github.com/Sirupsen/logrus/logger.go
+++ /dev/null
@@ -1,415 +0,0 @@
-package logrus
-
-import (
- "io"
- "os"
- "sync"
- "sync/atomic"
- "time"
-)
-
-type Logger struct {
- // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
- // file, or leave it default which is `os.Stderr`. You can also set this to
- // something more adventurous, such as logging to Kafka.
- Out io.Writer
- // Hooks for the logger instance. These allow firing events based on logging
- // levels and log entries. For example, to send errors to an error tracking
- // service, log to StatsD or dump the core on fatal errors.
- Hooks LevelHooks
- // All log entries pass through the formatter before logged to Out. The
- // included formatters are `TextFormatter` and `JSONFormatter` for which
- // TextFormatter is the default. In development (when a TTY is attached) it
- // logs with colors, but to a file it wouldn't. You can easily implement your
- // own that implements the `Formatter` interface, see the `README` or included
- // formatters for examples.
- Formatter Formatter
-
- // Flag for whether to log caller info (off by default)
- ReportCaller bool
-
- // The logging level the logger should log at. This is typically (and defaults
- // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
- // logged.
- Level Level
- // Used to sync writing to the log. Locking is enabled by Default
- mu MutexWrap
- // Reusable empty entry
- entryPool sync.Pool
- // Function to exit the application, defaults to `os.Exit()`
- ExitFunc exitFunc
-}
-
-type exitFunc func(int)
-
-type MutexWrap struct {
- lock sync.Mutex
- disabled bool
-}
-
-func (mw *MutexWrap) Lock() {
- if !mw.disabled {
- mw.lock.Lock()
- }
-}
-
-func (mw *MutexWrap) Unlock() {
- if !mw.disabled {
- mw.lock.Unlock()
- }
-}
-
-func (mw *MutexWrap) Disable() {
- mw.disabled = true
-}
-
-// Creates a new logger. Configuration should be set by changing `Formatter`,
-// `Out` and `Hooks` directly on the default logger instance. You can also just
-// instantiate your own:
-//
-// var log = &Logger{
-// Out: os.Stderr,
-// Formatter: new(JSONFormatter),
-// Hooks: make(LevelHooks),
-// Level: logrus.DebugLevel,
-// }
-//
-// It's recommended to make this a global instance called `log`.
-func New() *Logger {
- return &Logger{
- Out: os.Stderr,
- Formatter: new(TextFormatter),
- Hooks: make(LevelHooks),
- Level: InfoLevel,
- ExitFunc: os.Exit,
- ReportCaller: false,
- }
-}
-
-func (logger *Logger) newEntry() *Entry {
- entry, ok := logger.entryPool.Get().(*Entry)
- if ok {
- return entry
- }
- return NewEntry(logger)
-}
-
-func (logger *Logger) releaseEntry(entry *Entry) {
- entry.Data = map[string]interface{}{}
- logger.entryPool.Put(entry)
-}
-
-// Adds a field to the log entry, note that it doesn't log until you call
-// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry.
-// If you want multiple fields, use `WithFields`.
-func (logger *Logger) WithField(key string, value interface{}) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithField(key, value)
-}
-
-// Adds a struct of fields to the log entry. All it does is call `WithField` for
-// each `Field`.
-func (logger *Logger) WithFields(fields Fields) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithFields(fields)
-}
-
-// Add an error as single field to the log entry. All it does is call
-// `WithError` for the given `error`.
-func (logger *Logger) WithError(err error) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithError(err)
-}
-
-// Overrides the time of the log entry.
-func (logger *Logger) WithTime(t time.Time) *Entry {
- entry := logger.newEntry()
- defer logger.releaseEntry(entry)
- return entry.WithTime(t)
-}
-
-func (logger *Logger) Tracef(format string, args ...interface{}) {
- if logger.IsLevelEnabled(TraceLevel) {
- entry := logger.newEntry()
- entry.Tracef(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Debugf(format string, args ...interface{}) {
- if logger.IsLevelEnabled(DebugLevel) {
- entry := logger.newEntry()
- entry.Debugf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Infof(format string, args ...interface{}) {
- if logger.IsLevelEnabled(InfoLevel) {
- entry := logger.newEntry()
- entry.Infof(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Printf(format string, args ...interface{}) {
- entry := logger.newEntry()
- entry.Printf(format, args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warnf(format string, args ...interface{}) {
- if logger.IsLevelEnabled(WarnLevel) {
- entry := logger.newEntry()
- entry.Warnf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Warningf(format string, args ...interface{}) {
- if logger.IsLevelEnabled(WarnLevel) {
- entry := logger.newEntry()
- entry.Warnf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Errorf(format string, args ...interface{}) {
- if logger.IsLevelEnabled(ErrorLevel) {
- entry := logger.newEntry()
- entry.Errorf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Fatalf(format string, args ...interface{}) {
- if logger.IsLevelEnabled(FatalLevel) {
- entry := logger.newEntry()
- entry.Fatalf(format, args...)
- logger.releaseEntry(entry)
- }
- logger.Exit(1)
-}
-
-func (logger *Logger) Panicf(format string, args ...interface{}) {
- if logger.IsLevelEnabled(PanicLevel) {
- entry := logger.newEntry()
- entry.Panicf(format, args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Trace(args ...interface{}) {
- if logger.IsLevelEnabled(TraceLevel) {
- entry := logger.newEntry()
- entry.Trace(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Debug(args ...interface{}) {
- if logger.IsLevelEnabled(DebugLevel) {
- entry := logger.newEntry()
- entry.Debug(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Info(args ...interface{}) {
- if logger.IsLevelEnabled(InfoLevel) {
- entry := logger.newEntry()
- entry.Info(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Print(args ...interface{}) {
- entry := logger.newEntry()
- entry.Info(args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warn(args ...interface{}) {
- if logger.IsLevelEnabled(WarnLevel) {
- entry := logger.newEntry()
- entry.Warn(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Warning(args ...interface{}) {
- if logger.IsLevelEnabled(WarnLevel) {
- entry := logger.newEntry()
- entry.Warn(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Error(args ...interface{}) {
- if logger.IsLevelEnabled(ErrorLevel) {
- entry := logger.newEntry()
- entry.Error(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Fatal(args ...interface{}) {
- if logger.IsLevelEnabled(FatalLevel) {
- entry := logger.newEntry()
- entry.Fatal(args...)
- logger.releaseEntry(entry)
- }
- logger.Exit(1)
-}
-
-func (logger *Logger) Panic(args ...interface{}) {
- if logger.IsLevelEnabled(PanicLevel) {
- entry := logger.newEntry()
- entry.Panic(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Traceln(args ...interface{}) {
- if logger.IsLevelEnabled(TraceLevel) {
- entry := logger.newEntry()
- entry.Traceln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Debugln(args ...interface{}) {
- if logger.IsLevelEnabled(DebugLevel) {
- entry := logger.newEntry()
- entry.Debugln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Infoln(args ...interface{}) {
- if logger.IsLevelEnabled(InfoLevel) {
- entry := logger.newEntry()
- entry.Infoln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Println(args ...interface{}) {
- entry := logger.newEntry()
- entry.Println(args...)
- logger.releaseEntry(entry)
-}
-
-func (logger *Logger) Warnln(args ...interface{}) {
- if logger.IsLevelEnabled(WarnLevel) {
- entry := logger.newEntry()
- entry.Warnln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Warningln(args ...interface{}) {
- if logger.IsLevelEnabled(WarnLevel) {
- entry := logger.newEntry()
- entry.Warnln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Errorln(args ...interface{}) {
- if logger.IsLevelEnabled(ErrorLevel) {
- entry := logger.newEntry()
- entry.Errorln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Fatalln(args ...interface{}) {
- if logger.IsLevelEnabled(FatalLevel) {
- entry := logger.newEntry()
- entry.Fatalln(args...)
- logger.releaseEntry(entry)
- }
- logger.Exit(1)
-}
-
-func (logger *Logger) Panicln(args ...interface{}) {
- if logger.IsLevelEnabled(PanicLevel) {
- entry := logger.newEntry()
- entry.Panicln(args...)
- logger.releaseEntry(entry)
- }
-}
-
-func (logger *Logger) Exit(code int) {
- runHandlers()
- if logger.ExitFunc == nil {
- logger.ExitFunc = os.Exit
- }
- logger.ExitFunc(code)
-}
-
-//When file is opened with appending mode, it's safe to
-//write concurrently to a file (within 4k message on Linux).
-//In these cases user can choose to disable the lock.
-func (logger *Logger) SetNoLock() {
- logger.mu.Disable()
-}
-
-func (logger *Logger) level() Level {
- return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
-}
-
-// SetLevel sets the logger level.
-func (logger *Logger) SetLevel(level Level) {
- atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
-}
-
-// GetLevel returns the logger level.
-func (logger *Logger) GetLevel() Level {
- return logger.level()
-}
-
-// AddHook adds a hook to the logger hooks.
-func (logger *Logger) AddHook(hook Hook) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Hooks.Add(hook)
-}
-
-// IsLevelEnabled checks if the log level of the logger is greater than the level param
-func (logger *Logger) IsLevelEnabled(level Level) bool {
- return logger.level() >= level
-}
-
-// SetFormatter sets the logger formatter.
-func (logger *Logger) SetFormatter(formatter Formatter) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Formatter = formatter
-}
-
-// SetOutput sets the logger output.
-func (logger *Logger) SetOutput(output io.Writer) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.Out = output
-}
-
-func (logger *Logger) SetReportCaller(reportCaller bool) {
- logger.mu.Lock()
- defer logger.mu.Unlock()
- logger.ReportCaller = reportCaller
-}
-
-// ReplaceHooks replaces the logger hooks and returns the old ones
-func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks {
- logger.mu.Lock()
- oldHooks := logger.Hooks
- logger.Hooks = hooks
- logger.mu.Unlock()
- return oldHooks
-}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
deleted file mode 100644
index 4ef45186..00000000
--- a/vendor/github.com/Sirupsen/logrus/logrus.go
+++ /dev/null
@@ -1,178 +0,0 @@
-package logrus
-
-import (
- "fmt"
- "log"
- "strings"
-)
-
-// Fields type, used to pass to `WithFields`.
-type Fields map[string]interface{}
-
-// Level type
-type Level uint32
-
-// Convert the Level to a string. E.g. PanicLevel becomes "panic".
-func (level Level) String() string {
- switch level {
- case TraceLevel:
- return "trace"
- case DebugLevel:
- return "debug"
- case InfoLevel:
- return "info"
- case WarnLevel:
- return "warning"
- case ErrorLevel:
- return "error"
- case FatalLevel:
- return "fatal"
- case PanicLevel:
- return "panic"
- }
-
- return "unknown"
-}
-
-// ParseLevel takes a string level and returns the Logrus log level constant.
-func ParseLevel(lvl string) (Level, error) {
- switch strings.ToLower(lvl) {
- case "panic":
- return PanicLevel, nil
- case "fatal":
- return FatalLevel, nil
- case "error":
- return ErrorLevel, nil
- case "warn", "warning":
- return WarnLevel, nil
- case "info":
- return InfoLevel, nil
- case "debug":
- return DebugLevel, nil
- case "trace":
- return TraceLevel, nil
- }
-
- var l Level
- return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
-}
-
-// UnmarshalText implements encoding.TextUnmarshaler.
-func (level *Level) UnmarshalText(text []byte) error {
- l, err := ParseLevel(string(text))
- if err != nil {
- return err
- }
-
- *level = Level(l)
-
- return nil
-}
-
-// A constant exposing all logging levels
-var AllLevels = []Level{
- PanicLevel,
- FatalLevel,
- ErrorLevel,
- WarnLevel,
- InfoLevel,
- DebugLevel,
- TraceLevel,
-}
-
-// These are the different logging levels. You can set the logging level to log
-// on your instance of logger, obtained with `logrus.New()`.
-const (
- // PanicLevel level, highest level of severity. Logs and then calls panic with the
- // message passed to Debug, Info, ...
- PanicLevel Level = iota
- // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the
- // logging level is set to Panic.
- FatalLevel
- // ErrorLevel level. Logs. Used for errors that should definitely be noted.
- // Commonly used for hooks to send errors to an error tracking service.
- ErrorLevel
- // WarnLevel level. Non-critical entries that deserve eyes.
- WarnLevel
- // InfoLevel level. General operational entries about what's going on inside the
- // application.
- InfoLevel
- // DebugLevel level. Usually only enabled when debugging. Very verbose logging.
- DebugLevel
- // TraceLevel level. Designates finer-grained informational events than the Debug.
- TraceLevel
-)
-
-// Won't compile if StdLogger can't be realized by a log.Logger
-var (
- _ StdLogger = &log.Logger{}
- _ StdLogger = &Entry{}
- _ StdLogger = &Logger{}
-)
-
-// StdLogger is what your logrus-enabled library should take, that way
-// it'll accept a stdlib logger and a logrus logger. There's no standard
-// interface, this is the closest we get, unfortunately.
-type StdLogger interface {
- Print(...interface{})
- Printf(string, ...interface{})
- Println(...interface{})
-
- Fatal(...interface{})
- Fatalf(string, ...interface{})
- Fatalln(...interface{})
-
- Panic(...interface{})
- Panicf(string, ...interface{})
- Panicln(...interface{})
-}
-
-// The FieldLogger interface generalizes the Entry and Logger types
-type FieldLogger interface {
- WithField(key string, value interface{}) *Entry
- WithFields(fields Fields) *Entry
- WithError(err error) *Entry
-
- Debugf(format string, args ...interface{})
- Infof(format string, args ...interface{})
- Printf(format string, args ...interface{})
- Warnf(format string, args ...interface{})
- Warningf(format string, args ...interface{})
- Errorf(format string, args ...interface{})
- Fatalf(format string, args ...interface{})
- Panicf(format string, args ...interface{})
-
- Debug(args ...interface{})
- Info(args ...interface{})
- Print(args ...interface{})
- Warn(args ...interface{})
- Warning(args ...interface{})
- Error(args ...interface{})
- Fatal(args ...interface{})
- Panic(args ...interface{})
-
- Debugln(args ...interface{})
- Infoln(args ...interface{})
- Println(args ...interface{})
- Warnln(args ...interface{})
- Warningln(args ...interface{})
- Errorln(args ...interface{})
- Fatalln(args ...interface{})
- Panicln(args ...interface{})
-
- // IsDebugEnabled() bool
- // IsInfoEnabled() bool
- // IsWarnEnabled() bool
- // IsErrorEnabled() bool
- // IsFatalEnabled() bool
- // IsPanicEnabled() bool
-}
-
-// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is
-// here for consistancy. Do not use. Use Logger or Entry instead.
-type Ext1FieldLogger interface {
- FieldLogger
- Tracef(format string, args ...interface{})
- Trace(args ...interface{})
- Traceln(args ...interface{})
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go
deleted file mode 100644
index 2403de98..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_check_appengine.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build appengine
-
-package logrus
-
-import (
- "io"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- return true
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_js.go b/vendor/github.com/Sirupsen/logrus/terminal_check_js.go
deleted file mode 100644
index 0c209750..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_check_js.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build js
-
-package logrus
-
-import (
- "io"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- return false
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
deleted file mode 100644
index cf309d6f..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_check_notappengine.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build !appengine,!js,!windows
-
-package logrus
-
-import (
- "io"
- "os"
-
- "golang.org/x/crypto/ssh/terminal"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- switch v := w.(type) {
- case *os.File:
- return terminal.IsTerminal(int(v.Fd()))
- default:
- return false
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go
deleted file mode 100644
index 3b9d2864..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_check_windows.go
+++ /dev/null
@@ -1,20 +0,0 @@
-// +build !appengine,!js,windows
-
-package logrus
-
-import (
- "io"
- "os"
- "syscall"
-)
-
-func checkIfTerminal(w io.Writer) bool {
- switch v := w.(type) {
- case *os.File:
- var mode uint32
- err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode)
- return err == nil
- default:
- return false
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
deleted file mode 100644
index 3dbd2372..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ /dev/null
@@ -1,8 +0,0 @@
-// +build !windows
-
-package logrus
-
-import "io"
-
-func initTerminal(w io.Writer) {
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
deleted file mode 100644
index b4ef5286..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !appengine,!js,windows
-
-package logrus
-
-import (
- "io"
- "os"
- "syscall"
-
- sequences "github.com/konsorten/go-windows-terminal-sequences"
-)
-
-func initTerminal(w io.Writer) {
- switch v := w.(type) {
- case *os.File:
- sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true)
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
deleted file mode 100644
index 49ec92f1..00000000
--- a/vendor/github.com/Sirupsen/logrus/text_formatter.go
+++ /dev/null
@@ -1,269 +0,0 @@
-package logrus
-
-import (
- "bytes"
- "fmt"
- "os"
- "sort"
- "strings"
- "sync"
- "time"
-)
-
-const (
- nocolor = 0
- red = 31
- green = 32
- yellow = 33
- blue = 36
- gray = 37
-)
-
-var (
- baseTimestamp time.Time
- emptyFieldMap FieldMap
-)
-
-func init() {
- baseTimestamp = time.Now()
-}
-
-// TextFormatter formats logs into text
-type TextFormatter struct {
- // Set to true to bypass checking for a TTY before outputting colors.
- ForceColors bool
-
- // Force disabling colors.
- DisableColors bool
-
- // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/
- EnvironmentOverrideColors bool
-
- // Disable timestamp logging. useful when output is redirected to logging
- // system that already adds timestamps.
- DisableTimestamp bool
-
- // Enable logging the full timestamp when a TTY is attached instead of just
- // the time passed since beginning of execution.
- FullTimestamp bool
-
- // TimestampFormat to use for display when a full timestamp is printed
- TimestampFormat string
-
- // The fields are sorted by default for a consistent output. For applications
- // that log extremely frequently and don't use the JSON formatter this may not
- // be desired.
- DisableSorting bool
-
- // The keys sorting function, when uninitialized it uses sort.Strings.
- SortingFunc func([]string)
-
- // Disables the truncation of the level text to 4 characters.
- DisableLevelTruncation bool
-
- // QuoteEmptyFields will wrap empty fields in quotes if true
- QuoteEmptyFields bool
-
- // Whether the logger's out is to a terminal
- isTerminal bool
-
- // FieldMap allows users to customize the names of keys for default fields.
- // As an example:
- // formatter := &TextFormatter{
- // FieldMap: FieldMap{
- // FieldKeyTime: "@timestamp",
- // FieldKeyLevel: "@level",
- // FieldKeyMsg: "@message"}}
- FieldMap FieldMap
-
- terminalInitOnce sync.Once
-}
-
-func (f *TextFormatter) init(entry *Entry) {
- if entry.Logger != nil {
- f.isTerminal = checkIfTerminal(entry.Logger.Out)
-
- if f.isTerminal {
- initTerminal(entry.Logger.Out)
- }
- }
-}
-
-func (f *TextFormatter) isColored() bool {
- isColored := f.ForceColors || f.isTerminal
-
- if f.EnvironmentOverrideColors {
- if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" {
- isColored = true
- } else if ok && force == "0" {
- isColored = false
- } else if os.Getenv("CLICOLOR") == "0" {
- isColored = false
- }
- }
-
- return isColored && !f.DisableColors
-}
-
-// Format renders a single log entry
-func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
- prefixFieldClashes(entry.Data, f.FieldMap, entry.HasCaller())
-
- keys := make([]string, 0, len(entry.Data))
- for k := range entry.Data {
- keys = append(keys, k)
- }
-
- fixedKeys := make([]string, 0, 4+len(entry.Data))
- if !f.DisableTimestamp {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime))
- }
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel))
- if entry.Message != "" {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg))
- }
- if entry.err != "" {
- fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError))
- }
- if entry.HasCaller() {
- fixedKeys = append(fixedKeys,
- f.FieldMap.resolve(FieldKeyFunc), f.FieldMap.resolve(FieldKeyFile))
- }
-
- if !f.DisableSorting {
- if f.SortingFunc == nil {
- sort.Strings(keys)
- fixedKeys = append(fixedKeys, keys...)
- } else {
- if !f.isColored() {
- fixedKeys = append(fixedKeys, keys...)
- f.SortingFunc(fixedKeys)
- } else {
- f.SortingFunc(keys)
- }
- }
- } else {
- fixedKeys = append(fixedKeys, keys...)
- }
-
- var b *bytes.Buffer
- if entry.Buffer != nil {
- b = entry.Buffer
- } else {
- b = &bytes.Buffer{}
- }
-
- f.terminalInitOnce.Do(func() { f.init(entry) })
-
- timestampFormat := f.TimestampFormat
- if timestampFormat == "" {
- timestampFormat = defaultTimestampFormat
- }
- if f.isColored() {
- f.printColored(b, entry, keys, timestampFormat)
- } else {
- for _, key := range fixedKeys {
- var value interface{}
- switch {
- case key == f.FieldMap.resolve(FieldKeyTime):
- value = entry.Time.Format(timestampFormat)
- case key == f.FieldMap.resolve(FieldKeyLevel):
- value = entry.Level.String()
- case key == f.FieldMap.resolve(FieldKeyMsg):
- value = entry.Message
- case key == f.FieldMap.resolve(FieldKeyLogrusError):
- value = entry.err
- case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller():
- value = entry.Caller.Function
- case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller():
- value = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line)
- default:
- value = entry.Data[key]
- }
- f.appendKeyValue(b, key, value)
- }
- }
-
- b.WriteByte('\n')
- return b.Bytes(), nil
-}
-
-func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
- var levelColor int
- switch entry.Level {
- case DebugLevel, TraceLevel:
- levelColor = gray
- case WarnLevel:
- levelColor = yellow
- case ErrorLevel, FatalLevel, PanicLevel:
- levelColor = red
- default:
- levelColor = blue
- }
-
- levelText := strings.ToUpper(entry.Level.String())
- if !f.DisableLevelTruncation {
- levelText = levelText[0:4]
- }
-
- // Remove a single newline if it already exists in the message to keep
- // the behavior of logrus text_formatter the same as the stdlib log package
- entry.Message = strings.TrimSuffix(entry.Message, "\n")
-
- caller := ""
-
- if entry.HasCaller() {
- caller = fmt.Sprintf("%s:%d %s()",
- entry.Caller.File, entry.Caller.Line, entry.Caller.Function)
- }
-
- if f.DisableTimestamp {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message)
- } else if !f.FullTimestamp {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message)
- } else {
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message)
- }
- for _, k := range keys {
- v := entry.Data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
- f.appendValue(b, v)
- }
-}
-
-func (f *TextFormatter) needsQuoting(text string) bool {
- if f.QuoteEmptyFields && len(text) == 0 {
- return true
- }
- for _, ch := range text {
- if !((ch >= 'a' && ch <= 'z') ||
- (ch >= 'A' && ch <= 'Z') ||
- (ch >= '0' && ch <= '9') ||
- ch == '-' || ch == '.' || ch == '_' || ch == '/' || ch == '@' || ch == '^' || ch == '+') {
- return true
- }
- }
- return false
-}
-
-func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
- if b.Len() > 0 {
- b.WriteByte(' ')
- }
- b.WriteString(key)
- b.WriteByte('=')
- f.appendValue(b, value)
-}
-
-func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
- stringVal, ok := value.(string)
- if !ok {
- stringVal = fmt.Sprint(value)
- }
-
- if !f.needsQuoting(stringVal) {
- b.WriteString(stringVal)
- } else {
- b.WriteString(fmt.Sprintf("%q", stringVal))
- }
-}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
deleted file mode 100644
index 9e1f7513..00000000
--- a/vendor/github.com/Sirupsen/logrus/writer.go
+++ /dev/null
@@ -1,64 +0,0 @@
-package logrus
-
-import (
- "bufio"
- "io"
- "runtime"
-)
-
-func (logger *Logger) Writer() *io.PipeWriter {
- return logger.WriterLevel(InfoLevel)
-}
-
-func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
- return NewEntry(logger).WriterLevel(level)
-}
-
-func (entry *Entry) Writer() *io.PipeWriter {
- return entry.WriterLevel(InfoLevel)
-}
-
-func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
- reader, writer := io.Pipe()
-
- var printFunc func(args ...interface{})
-
- switch level {
- case TraceLevel:
- printFunc = entry.Trace
- case DebugLevel:
- printFunc = entry.Debug
- case InfoLevel:
- printFunc = entry.Info
- case WarnLevel:
- printFunc = entry.Warn
- case ErrorLevel:
- printFunc = entry.Error
- case FatalLevel:
- printFunc = entry.Fatal
- case PanicLevel:
- printFunc = entry.Panic
- default:
- printFunc = entry.Print
- }
-
- go entry.writerScanner(reader, printFunc)
- runtime.SetFinalizer(writer, writerFinalizer)
-
- return writer
-}
-
-func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
- scanner := bufio.NewScanner(reader)
- for scanner.Scan() {
- printFunc(scanner.Text())
- }
- if err := scanner.Err(); err != nil {
- entry.Errorf("Error while reading from Writer: %s", err)
- }
- reader.Close()
-}
-
-func writerFinalizer(writer *io.PipeWriter) {
- writer.Close()
-}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
new file mode 100644
index 00000000..e068e731
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/AUTHORS
@@ -0,0 +1 @@
+Google Inc.
\ No newline at end of file
diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
similarity index 100%
rename from vendor/github.com/coreos/go-semver/LICENSE
rename to vendor/github.com/census-instrumentation/opencensus-proto/LICENSE
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
new file mode 100644
index 00000000..12b578d0
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1/common.pb.go
@@ -0,0 +1,356 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/agent/common/v1/common.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type LibraryInfo_Language int32
+
+const (
+ LibraryInfo_LANGUAGE_UNSPECIFIED LibraryInfo_Language = 0
+ LibraryInfo_CPP LibraryInfo_Language = 1
+ LibraryInfo_C_SHARP LibraryInfo_Language = 2
+ LibraryInfo_ERLANG LibraryInfo_Language = 3
+ LibraryInfo_GO_LANG LibraryInfo_Language = 4
+ LibraryInfo_JAVA LibraryInfo_Language = 5
+ LibraryInfo_NODE_JS LibraryInfo_Language = 6
+ LibraryInfo_PHP LibraryInfo_Language = 7
+ LibraryInfo_PYTHON LibraryInfo_Language = 8
+ LibraryInfo_RUBY LibraryInfo_Language = 9
+)
+
+var LibraryInfo_Language_name = map[int32]string{
+ 0: "LANGUAGE_UNSPECIFIED",
+ 1: "CPP",
+ 2: "C_SHARP",
+ 3: "ERLANG",
+ 4: "GO_LANG",
+ 5: "JAVA",
+ 6: "NODE_JS",
+ 7: "PHP",
+ 8: "PYTHON",
+ 9: "RUBY",
+}
+
+var LibraryInfo_Language_value = map[string]int32{
+ "LANGUAGE_UNSPECIFIED": 0,
+ "CPP": 1,
+ "C_SHARP": 2,
+ "ERLANG": 3,
+ "GO_LANG": 4,
+ "JAVA": 5,
+ "NODE_JS": 6,
+ "PHP": 7,
+ "PYTHON": 8,
+ "RUBY": 9,
+}
+
+func (x LibraryInfo_Language) String() string {
+ return proto.EnumName(LibraryInfo_Language_name, int32(x))
+}
+
+func (LibraryInfo_Language) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{2, 0}
+}
+
+// Identifier metadata of the Node that produces the span or tracing data.
+// Note, this is not the metadata about the Node or service that is described by associated spans.
+// In the future we plan to extend the identifier proto definition to support
+// additional information (e.g cloud id, etc.)
+type Node struct {
+ // Identifier that uniquely identifies a process within a VM/container.
+ Identifier *ProcessIdentifier `protobuf:"bytes,1,opt,name=identifier,proto3" json:"identifier,omitempty"`
+ // Information on the OpenCensus Library that initiates the stream.
+ LibraryInfo *LibraryInfo `protobuf:"bytes,2,opt,name=library_info,json=libraryInfo,proto3" json:"library_info,omitempty"`
+ // Additional information on service.
+ ServiceInfo *ServiceInfo `protobuf:"bytes,3,opt,name=service_info,json=serviceInfo,proto3" json:"service_info,omitempty"`
+ // Additional attributes.
+ Attributes map[string]string `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Node) Reset() { *m = Node{} }
+func (m *Node) String() string { return proto.CompactTextString(m) }
+func (*Node) ProtoMessage() {}
+func (*Node) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{0}
+}
+
+func (m *Node) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Node.Unmarshal(m, b)
+}
+func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Node.Marshal(b, m, deterministic)
+}
+func (m *Node) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Node.Merge(m, src)
+}
+func (m *Node) XXX_Size() int {
+ return xxx_messageInfo_Node.Size(m)
+}
+func (m *Node) XXX_DiscardUnknown() {
+ xxx_messageInfo_Node.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Node proto.InternalMessageInfo
+
+func (m *Node) GetIdentifier() *ProcessIdentifier {
+ if m != nil {
+ return m.Identifier
+ }
+ return nil
+}
+
+func (m *Node) GetLibraryInfo() *LibraryInfo {
+ if m != nil {
+ return m.LibraryInfo
+ }
+ return nil
+}
+
+func (m *Node) GetServiceInfo() *ServiceInfo {
+ if m != nil {
+ return m.ServiceInfo
+ }
+ return nil
+}
+
+func (m *Node) GetAttributes() map[string]string {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// Identifier that uniquely identifies a process within a VM/container.
+type ProcessIdentifier struct {
+ // The host name. Usually refers to the machine/container name.
+ // For example: os.Hostname() in Go, socket.gethostname() in Python.
+ HostName string `protobuf:"bytes,1,opt,name=host_name,json=hostName,proto3" json:"host_name,omitempty"`
+ // Process id.
+ Pid uint32 `protobuf:"varint,2,opt,name=pid,proto3" json:"pid,omitempty"`
+ // Start time of this ProcessIdentifier. Represented in epoch time.
+ StartTimestamp *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ProcessIdentifier) Reset() { *m = ProcessIdentifier{} }
+func (m *ProcessIdentifier) String() string { return proto.CompactTextString(m) }
+func (*ProcessIdentifier) ProtoMessage() {}
+func (*ProcessIdentifier) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{1}
+}
+
+func (m *ProcessIdentifier) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ProcessIdentifier.Unmarshal(m, b)
+}
+func (m *ProcessIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ProcessIdentifier.Marshal(b, m, deterministic)
+}
+func (m *ProcessIdentifier) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProcessIdentifier.Merge(m, src)
+}
+func (m *ProcessIdentifier) XXX_Size() int {
+ return xxx_messageInfo_ProcessIdentifier.Size(m)
+}
+func (m *ProcessIdentifier) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProcessIdentifier.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProcessIdentifier proto.InternalMessageInfo
+
+func (m *ProcessIdentifier) GetHostName() string {
+ if m != nil {
+ return m.HostName
+ }
+ return ""
+}
+
+func (m *ProcessIdentifier) GetPid() uint32 {
+ if m != nil {
+ return m.Pid
+ }
+ return 0
+}
+
+func (m *ProcessIdentifier) GetStartTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTimestamp
+ }
+ return nil
+}
+
+// Information on OpenCensus Library.
+type LibraryInfo struct {
+ // Language of OpenCensus Library.
+ Language LibraryInfo_Language `protobuf:"varint,1,opt,name=language,proto3,enum=opencensus.proto.agent.common.v1.LibraryInfo_Language" json:"language,omitempty"`
+ // Version of Agent exporter of Library.
+ ExporterVersion string `protobuf:"bytes,2,opt,name=exporter_version,json=exporterVersion,proto3" json:"exporter_version,omitempty"`
+ // Version of OpenCensus Library.
+ CoreLibraryVersion string `protobuf:"bytes,3,opt,name=core_library_version,json=coreLibraryVersion,proto3" json:"core_library_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LibraryInfo) Reset() { *m = LibraryInfo{} }
+func (m *LibraryInfo) String() string { return proto.CompactTextString(m) }
+func (*LibraryInfo) ProtoMessage() {}
+func (*LibraryInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{2}
+}
+
+func (m *LibraryInfo) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LibraryInfo.Unmarshal(m, b)
+}
+func (m *LibraryInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LibraryInfo.Marshal(b, m, deterministic)
+}
+func (m *LibraryInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LibraryInfo.Merge(m, src)
+}
+func (m *LibraryInfo) XXX_Size() int {
+ return xxx_messageInfo_LibraryInfo.Size(m)
+}
+func (m *LibraryInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_LibraryInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LibraryInfo proto.InternalMessageInfo
+
+func (m *LibraryInfo) GetLanguage() LibraryInfo_Language {
+ if m != nil {
+ return m.Language
+ }
+ return LibraryInfo_LANGUAGE_UNSPECIFIED
+}
+
+func (m *LibraryInfo) GetExporterVersion() string {
+ if m != nil {
+ return m.ExporterVersion
+ }
+ return ""
+}
+
+func (m *LibraryInfo) GetCoreLibraryVersion() string {
+ if m != nil {
+ return m.CoreLibraryVersion
+ }
+ return ""
+}
+
+// Additional service information.
+type ServiceInfo struct {
+ // Name of the service.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ServiceInfo) Reset() { *m = ServiceInfo{} }
+func (m *ServiceInfo) String() string { return proto.CompactTextString(m) }
+func (*ServiceInfo) ProtoMessage() {}
+func (*ServiceInfo) Descriptor() ([]byte, []int) {
+ return fileDescriptor_126c72ed8a252c84, []int{3}
+}
+
+func (m *ServiceInfo) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ServiceInfo.Unmarshal(m, b)
+}
+func (m *ServiceInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ServiceInfo.Marshal(b, m, deterministic)
+}
+func (m *ServiceInfo) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ServiceInfo.Merge(m, src)
+}
+func (m *ServiceInfo) XXX_Size() int {
+ return xxx_messageInfo_ServiceInfo.Size(m)
+}
+func (m *ServiceInfo) XXX_DiscardUnknown() {
+ xxx_messageInfo_ServiceInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceInfo proto.InternalMessageInfo
+
+func (m *ServiceInfo) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("opencensus.proto.agent.common.v1.LibraryInfo_Language", LibraryInfo_Language_name, LibraryInfo_Language_value)
+ proto.RegisterType((*Node)(nil), "opencensus.proto.agent.common.v1.Node")
+ proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.agent.common.v1.Node.AttributesEntry")
+ proto.RegisterType((*ProcessIdentifier)(nil), "opencensus.proto.agent.common.v1.ProcessIdentifier")
+ proto.RegisterType((*LibraryInfo)(nil), "opencensus.proto.agent.common.v1.LibraryInfo")
+ proto.RegisterType((*ServiceInfo)(nil), "opencensus.proto.agent.common.v1.ServiceInfo")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/agent/common/v1/common.proto", fileDescriptor_126c72ed8a252c84)
+}
+
+var fileDescriptor_126c72ed8a252c84 = []byte{
+ // 590 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0x4f, 0x4f, 0xdb, 0x3e,
+ 0x1c, 0xc6, 0x7f, 0x69, 0x0a, 0xb4, 0xdf, 0xfc, 0x06, 0x99, 0xc5, 0xa1, 0x62, 0x87, 0xb1, 0xee,
+ 0xc2, 0x0e, 0x4d, 0x06, 0x48, 0xd3, 0x34, 0x69, 0x87, 0x52, 0x3a, 0x28, 0x42, 0x25, 0x72, 0x01,
+ 0x89, 0x5d, 0xa2, 0xb4, 0xb8, 0xc1, 0x5a, 0x63, 0x57, 0xb6, 0x53, 0x8d, 0xd3, 0x8e, 0xd3, 0xde,
+ 0xc0, 0x5e, 0xd4, 0x5e, 0xd5, 0x64, 0x3b, 0x69, 0xa3, 0x71, 0x28, 0xb7, 0xef, 0x9f, 0xe7, 0xf9,
+ 0x38, 0x7a, 0x6c, 0x05, 0x3a, 0x7c, 0x4e, 0xd8, 0x84, 0x30, 0x99, 0xcb, 0x70, 0x2e, 0xb8, 0xe2,
+ 0x61, 0x92, 0x12, 0xa6, 0xc2, 0x09, 0xcf, 0x32, 0xce, 0xc2, 0xc5, 0x61, 0x51, 0x05, 0x66, 0x89,
+ 0xf6, 0x57, 0x72, 0x3b, 0x09, 0x8c, 0x3c, 0x28, 0x44, 0x8b, 0xc3, 0xbd, 0xd7, 0x29, 0xe7, 0xe9,
+ 0x8c, 0x58, 0xd8, 0x38, 0x9f, 0x86, 0x8a, 0x66, 0x44, 0xaa, 0x24, 0x9b, 0x5b, 0x43, 0xfb, 0xb7,
+ 0x0b, 0xf5, 0x21, 0xbf, 0x27, 0x68, 0x04, 0x40, 0xef, 0x09, 0x53, 0x74, 0x4a, 0x89, 0x68, 0x39,
+ 0xfb, 0xce, 0x81, 0x77, 0x74, 0x1c, 0xac, 0x3b, 0x20, 0x88, 0x04, 0x9f, 0x10, 0x29, 0x07, 0x4b,
+ 0x2b, 0xae, 0x60, 0x50, 0x04, 0xff, 0xcf, 0xe8, 0x58, 0x24, 0xe2, 0x31, 0xa6, 0x6c, 0xca, 0x5b,
+ 0x35, 0x83, 0xed, 0xac, 0xc7, 0x5e, 0x5a, 0xd7, 0x80, 0x4d, 0x39, 0xf6, 0x66, 0xab, 0x46, 0x13,
+ 0x25, 0x11, 0x0b, 0x3a, 0x21, 0x96, 0xe8, 0x3e, 0x97, 0x38, 0xb2, 0x2e, 0x4b, 0x94, 0xab, 0x06,
+ 0xdd, 0x02, 0x24, 0x4a, 0x09, 0x3a, 0xce, 0x15, 0x91, 0xad, 0xfa, 0xbe, 0x7b, 0xe0, 0x1d, 0x7d,
+ 0x58, 0xcf, 0xd3, 0xa1, 0x05, 0xdd, 0xa5, 0xb1, 0xcf, 0x94, 0x78, 0xc4, 0x15, 0xd2, 0xde, 0x67,
+ 0xd8, 0xf9, 0x67, 0x8d, 0x7c, 0x70, 0xbf, 0x91, 0x47, 0x13, 0x6e, 0x13, 0xeb, 0x12, 0xed, 0xc2,
+ 0xc6, 0x22, 0x99, 0xe5, 0xc4, 0x24, 0xd3, 0xc4, 0xb6, 0xf9, 0x54, 0xfb, 0xe8, 0xb4, 0x7f, 0x3a,
+ 0xf0, 0xf2, 0x49, 0xb8, 0xe8, 0x15, 0x34, 0x1f, 0xb8, 0x54, 0x31, 0x4b, 0x32, 0x52, 0x70, 0x1a,
+ 0x7a, 0x30, 0x4c, 0x32, 0xa2, 0xf1, 0x73, 0x7a, 0x6f, 0x50, 0x2f, 0xb0, 0x2e, 0x51, 0x0f, 0x76,
+ 0xa4, 0x4a, 0x84, 0x8a, 0x97, 0xd7, 0x5e, 0x04, 0xb6, 0x17, 0xd8, 0x87, 0x11, 0x94, 0x0f, 0x23,
+ 0xb8, 0x2e, 0x15, 0x78, 0xdb, 0x58, 0x96, 0x7d, 0xfb, 0x4f, 0x0d, 0xbc, 0xca, 0x7d, 0x20, 0x0c,
+ 0x8d, 0x59, 0xc2, 0xd2, 0x3c, 0x49, 0xed, 0x27, 0x6c, 0x3f, 0x27, 0xae, 0x0a, 0x20, 0xb8, 0x2c,
+ 0xdc, 0x78, 0xc9, 0x41, 0xef, 0xc0, 0x27, 0xdf, 0xe7, 0x5c, 0x28, 0x22, 0xe2, 0x05, 0x11, 0x92,
+ 0x72, 0x56, 0x44, 0xb2, 0x53, 0xce, 0x6f, 0xed, 0x18, 0xbd, 0x87, 0xdd, 0x09, 0x17, 0x24, 0x2e,
+ 0x1f, 0x56, 0x29, 0x77, 0x8d, 0x1c, 0xe9, 0x5d, 0x71, 0x58, 0xe1, 0x68, 0xff, 0x72, 0xa0, 0x51,
+ 0x9e, 0x89, 0x5a, 0xb0, 0x7b, 0xd9, 0x1d, 0x9e, 0xdd, 0x74, 0xcf, 0xfa, 0xf1, 0xcd, 0x70, 0x14,
+ 0xf5, 0x7b, 0x83, 0x2f, 0x83, 0xfe, 0xa9, 0xff, 0x1f, 0xda, 0x02, 0xb7, 0x17, 0x45, 0xbe, 0x83,
+ 0x3c, 0xd8, 0xea, 0xc5, 0xa3, 0xf3, 0x2e, 0x8e, 0xfc, 0x1a, 0x02, 0xd8, 0xec, 0x63, 0xed, 0xf0,
+ 0x5d, 0xbd, 0x38, 0xbb, 0x8a, 0x4d, 0x53, 0x47, 0x0d, 0xa8, 0x5f, 0x74, 0x6f, 0xbb, 0xfe, 0x86,
+ 0x1e, 0x0f, 0xaf, 0x4e, 0xfb, 0xf1, 0xc5, 0xc8, 0xdf, 0xd4, 0x94, 0xe8, 0x3c, 0xf2, 0xb7, 0xb4,
+ 0x31, 0xba, 0xbb, 0x3e, 0xbf, 0x1a, 0xfa, 0x0d, 0xad, 0xc5, 0x37, 0x27, 0x77, 0x7e, 0xb3, 0xfd,
+ 0x06, 0xbc, 0xca, 0x4b, 0x44, 0x08, 0xea, 0x95, 0xab, 0x34, 0xf5, 0xc9, 0x0f, 0x78, 0x4b, 0xf9,
+ 0xda, 0x44, 0x4f, 0xbc, 0x9e, 0x29, 0x23, 0xbd, 0x8c, 0x9c, 0xaf, 0x83, 0x94, 0xaa, 0x87, 0x7c,
+ 0xac, 0x05, 0xa1, 0xf5, 0x75, 0x28, 0x93, 0x4a, 0xe4, 0x19, 0x61, 0x2a, 0x51, 0x94, 0xb3, 0x70,
+ 0x85, 0xec, 0xd8, 0x9f, 0x4b, 0x4a, 0x58, 0x27, 0x7d, 0xf2, 0x8f, 0x19, 0x6f, 0x9a, 0xed, 0xf1,
+ 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x94, 0xe5, 0x77, 0x76, 0x8e, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
new file mode 100644
index 00000000..801212d9
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/metrics/v1/metrics_service.pb.go
@@ -0,0 +1,264 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/agent/metrics/v1/metrics_service.proto
+
+package v1
+
+import (
+ context "context"
+ fmt "fmt"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
+ v11 "github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1"
+ v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ExportMetricsServiceRequest struct {
+ // This is required only in the first message on the stream or if the
+ // previous sent ExportMetricsServiceRequest message has a different Node (e.g.
+ // when the same RPC is used to send Metrics from multiple Applications).
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // A list of metrics that belong to the last received Node.
+ Metrics []*v11.Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
+ // The resource for the metrics in this message that do not have an explicit
+ // resource set.
+ // If unset, the most recently set resource in the RPC stream applies. It is
+ // valid to never be set within a stream, e.g. when no resource info is known
+ // at all or when all sent metrics have an explicit resource set.
+ Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ExportMetricsServiceRequest) Reset() { *m = ExportMetricsServiceRequest{} }
+func (m *ExportMetricsServiceRequest) String() string { return proto.CompactTextString(m) }
+func (*ExportMetricsServiceRequest) ProtoMessage() {}
+func (*ExportMetricsServiceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_47e253a956287d04, []int{0}
+}
+
+func (m *ExportMetricsServiceRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ExportMetricsServiceRequest.Unmarshal(m, b)
+}
+func (m *ExportMetricsServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ExportMetricsServiceRequest.Marshal(b, m, deterministic)
+}
+func (m *ExportMetricsServiceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExportMetricsServiceRequest.Merge(m, src)
+}
+func (m *ExportMetricsServiceRequest) XXX_Size() int {
+ return xxx_messageInfo_ExportMetricsServiceRequest.Size(m)
+}
+func (m *ExportMetricsServiceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExportMetricsServiceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExportMetricsServiceRequest proto.InternalMessageInfo
+
+func (m *ExportMetricsServiceRequest) GetNode() *v1.Node {
+ if m != nil {
+ return m.Node
+ }
+ return nil
+}
+
+func (m *ExportMetricsServiceRequest) GetMetrics() []*v11.Metric {
+ if m != nil {
+ return m.Metrics
+ }
+ return nil
+}
+
+func (m *ExportMetricsServiceRequest) GetResource() *v12.Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+type ExportMetricsServiceResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ExportMetricsServiceResponse) Reset() { *m = ExportMetricsServiceResponse{} }
+func (m *ExportMetricsServiceResponse) String() string { return proto.CompactTextString(m) }
+func (*ExportMetricsServiceResponse) ProtoMessage() {}
+func (*ExportMetricsServiceResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_47e253a956287d04, []int{1}
+}
+
+func (m *ExportMetricsServiceResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ExportMetricsServiceResponse.Unmarshal(m, b)
+}
+func (m *ExportMetricsServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ExportMetricsServiceResponse.Marshal(b, m, deterministic)
+}
+func (m *ExportMetricsServiceResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExportMetricsServiceResponse.Merge(m, src)
+}
+func (m *ExportMetricsServiceResponse) XXX_Size() int {
+ return xxx_messageInfo_ExportMetricsServiceResponse.Size(m)
+}
+func (m *ExportMetricsServiceResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExportMetricsServiceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExportMetricsServiceResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*ExportMetricsServiceRequest)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceRequest")
+ proto.RegisterType((*ExportMetricsServiceResponse)(nil), "opencensus.proto.agent.metrics.v1.ExportMetricsServiceResponse")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/agent/metrics/v1/metrics_service.proto", fileDescriptor_47e253a956287d04)
+}
+
+var fileDescriptor_47e253a956287d04 = []byte{
+ // 340 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x92, 0xc1, 0x4a, 0xf3, 0x40,
+ 0x14, 0x85, 0xff, 0xf9, 0x2b, 0x55, 0xa6, 0xe0, 0x62, 0xdc, 0x94, 0x2a, 0x52, 0xab, 0x48, 0x45,
+ 0x32, 0x63, 0xea, 0x42, 0x10, 0x54, 0x28, 0xb8, 0x11, 0x94, 0x12, 0x77, 0x6e, 0xa4, 0x4d, 0x2f,
+ 0x71, 0x16, 0x99, 0x1b, 0x67, 0x26, 0xc1, 0x57, 0x70, 0xe5, 0x3b, 0xf8, 0x5c, 0x3e, 0x8c, 0x24,
+ 0x93, 0xb4, 0x94, 0x18, 0x0b, 0xee, 0x2e, 0x99, 0xf3, 0x9d, 0x9c, 0x33, 0x73, 0xe9, 0x05, 0x26,
+ 0xa0, 0x42, 0x50, 0x26, 0x35, 0x22, 0xd1, 0x68, 0x51, 0x4c, 0x23, 0x50, 0x56, 0xc4, 0x60, 0xb5,
+ 0x0c, 0x8d, 0xc8, 0xfc, 0x6a, 0x7c, 0x36, 0xa0, 0x33, 0x19, 0x02, 0x2f, 0x64, 0xec, 0x60, 0x09,
+ 0xba, 0x2f, 0xbc, 0x00, 0x79, 0xa9, 0xe6, 0x99, 0xdf, 0xf3, 0x1a, 0xbc, 0x43, 0x8c, 0x63, 0x54,
+ 0xb9, 0xb5, 0x9b, 0x1c, 0xdf, 0x3b, 0xa9, 0xc9, 0xeb, 0x21, 0x4a, 0xe9, 0x69, 0x4d, 0xaa, 0xc1,
+ 0x60, 0xaa, 0x43, 0xc8, 0xb5, 0xd5, 0xec, 0xc4, 0x83, 0x2f, 0x42, 0x77, 0x6f, 0xdf, 0x12, 0xd4,
+ 0xf6, 0xde, 0x99, 0x3c, 0xba, 0x22, 0x01, 0xbc, 0xa6, 0x60, 0x2c, 0xbb, 0xa4, 0x1b, 0x0a, 0xe7,
+ 0xd0, 0x25, 0x7d, 0x32, 0xec, 0x8c, 0x8e, 0x79, 0x43, 0xb1, 0x32, 0x6b, 0xe6, 0xf3, 0x07, 0x9c,
+ 0x43, 0x50, 0x30, 0xec, 0x8a, 0x6e, 0x96, 0xc9, 0xba, 0xff, 0xfb, 0xad, 0x61, 0x67, 0x74, 0x58,
+ 0xc7, 0x97, 0x37, 0xc2, 0x5d, 0x80, 0xa0, 0x62, 0xd8, 0x98, 0x6e, 0x55, 0x61, 0xbb, 0xad, 0xa6,
+ 0xdf, 0x2f, 0xea, 0x64, 0x3e, 0x0f, 0xca, 0x39, 0x58, 0x70, 0x83, 0x7d, 0xba, 0xf7, 0x73, 0x3b,
+ 0x93, 0xa0, 0x32, 0x30, 0xfa, 0x24, 0x74, 0x7b, 0xf5, 0x88, 0x7d, 0x10, 0xda, 0x76, 0x0c, 0xbb,
+ 0xe6, 0x6b, 0xdf, 0x91, 0xff, 0x72, 0x79, 0xbd, 0x9b, 0x3f, 0xf3, 0x2e, 0xde, 0xe0, 0xdf, 0x90,
+ 0x9c, 0x91, 0xf1, 0x3b, 0xa1, 0x47, 0x12, 0xd7, 0x7b, 0x8d, 0x77, 0x56, 0x6d, 0x26, 0xb9, 0x6a,
+ 0x42, 0x9e, 0xee, 0x22, 0x69, 0x5f, 0xd2, 0x59, 0xfe, 0x48, 0xc2, 0x19, 0x78, 0x52, 0x19, 0xab,
+ 0xd3, 0x18, 0x94, 0x9d, 0x5a, 0x89, 0x4a, 0x2c, 0xbd, 0x3d, 0xb7, 0x32, 0x11, 0x28, 0x2f, 0xaa,
+ 0xef, 0xfb, 0xac, 0x5d, 0x1c, 0x9f, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0x16, 0x61, 0x3b, 0xc3,
+ 0x1b, 0x03, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// MetricsServiceClient is the client API for MetricsService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MetricsServiceClient interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error)
+}
+
+type metricsServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewMetricsServiceClient(cc *grpc.ClientConn) MetricsServiceClient {
+ return &metricsServiceClient{cc}
+}
+
+func (c *metricsServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (MetricsService_ExportClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_MetricsService_serviceDesc.Streams[0], "/opencensus.proto.agent.metrics.v1.MetricsService/Export", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &metricsServiceExportClient{stream}
+ return x, nil
+}
+
+type MetricsService_ExportClient interface {
+ Send(*ExportMetricsServiceRequest) error
+ Recv() (*ExportMetricsServiceResponse, error)
+ grpc.ClientStream
+}
+
+type metricsServiceExportClient struct {
+ grpc.ClientStream
+}
+
+func (x *metricsServiceExportClient) Send(m *ExportMetricsServiceRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *metricsServiceExportClient) Recv() (*ExportMetricsServiceResponse, error) {
+ m := new(ExportMetricsServiceResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// MetricsServiceServer is the server API for MetricsService service.
+type MetricsServiceServer interface {
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(MetricsService_ExportServer) error
+}
+
+func RegisterMetricsServiceServer(s *grpc.Server, srv MetricsServiceServer) {
+ s.RegisterService(&_MetricsService_serviceDesc, srv)
+}
+
+func _MetricsService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(MetricsServiceServer).Export(&metricsServiceExportServer{stream})
+}
+
+type MetricsService_ExportServer interface {
+ Send(*ExportMetricsServiceResponse) error
+ Recv() (*ExportMetricsServiceRequest, error)
+ grpc.ServerStream
+}
+
+type metricsServiceExportServer struct {
+ grpc.ServerStream
+}
+
+func (x *metricsServiceExportServer) Send(m *ExportMetricsServiceResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *metricsServiceExportServer) Recv() (*ExportMetricsServiceRequest, error) {
+ m := new(ExportMetricsServiceRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _MetricsService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "opencensus.proto.agent.metrics.v1.MetricsService",
+ HandlerType: (*MetricsServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Export",
+ Handler: _MetricsService_Export_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "opencensus/proto/agent/metrics/v1/metrics_service.proto",
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
new file mode 100644
index 00000000..e7c49a38
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.go
@@ -0,0 +1,443 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/agent/trace/v1/trace_service.proto
+
+package v1
+
+import (
+ context "context"
+ fmt "fmt"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/agent/common/v1"
+ v12 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ v11 "github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1"
+ proto "github.com/golang/protobuf/proto"
+ grpc "google.golang.org/grpc"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type CurrentLibraryConfig struct {
+ // This is required only in the first message on the stream or if the
+ // previous sent CurrentLibraryConfig message has a different Node (e.g.
+ // when the same RPC is used to configure multiple Applications).
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // Current configuration.
+ Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *CurrentLibraryConfig) Reset() { *m = CurrentLibraryConfig{} }
+func (m *CurrentLibraryConfig) String() string { return proto.CompactTextString(m) }
+func (*CurrentLibraryConfig) ProtoMessage() {}
+func (*CurrentLibraryConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7027f99caf7ac6a5, []int{0}
+}
+
+func (m *CurrentLibraryConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_CurrentLibraryConfig.Unmarshal(m, b)
+}
+func (m *CurrentLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_CurrentLibraryConfig.Marshal(b, m, deterministic)
+}
+func (m *CurrentLibraryConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_CurrentLibraryConfig.Merge(m, src)
+}
+func (m *CurrentLibraryConfig) XXX_Size() int {
+ return xxx_messageInfo_CurrentLibraryConfig.Size(m)
+}
+func (m *CurrentLibraryConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_CurrentLibraryConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CurrentLibraryConfig proto.InternalMessageInfo
+
+func (m *CurrentLibraryConfig) GetNode() *v1.Node {
+ if m != nil {
+ return m.Node
+ }
+ return nil
+}
+
+func (m *CurrentLibraryConfig) GetConfig() *v11.TraceConfig {
+ if m != nil {
+ return m.Config
+ }
+ return nil
+}
+
+type UpdatedLibraryConfig struct {
+ // This field is ignored when the RPC is used to configure only one Application.
+ // This is required only in the first message on the stream or if the
+ // previous sent UpdatedLibraryConfig message has a different Node.
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // Requested updated configuration.
+ Config *v11.TraceConfig `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *UpdatedLibraryConfig) Reset() { *m = UpdatedLibraryConfig{} }
+func (m *UpdatedLibraryConfig) String() string { return proto.CompactTextString(m) }
+func (*UpdatedLibraryConfig) ProtoMessage() {}
+func (*UpdatedLibraryConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7027f99caf7ac6a5, []int{1}
+}
+
+func (m *UpdatedLibraryConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_UpdatedLibraryConfig.Unmarshal(m, b)
+}
+func (m *UpdatedLibraryConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_UpdatedLibraryConfig.Marshal(b, m, deterministic)
+}
+func (m *UpdatedLibraryConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_UpdatedLibraryConfig.Merge(m, src)
+}
+func (m *UpdatedLibraryConfig) XXX_Size() int {
+ return xxx_messageInfo_UpdatedLibraryConfig.Size(m)
+}
+func (m *UpdatedLibraryConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_UpdatedLibraryConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UpdatedLibraryConfig proto.InternalMessageInfo
+
+func (m *UpdatedLibraryConfig) GetNode() *v1.Node {
+ if m != nil {
+ return m.Node
+ }
+ return nil
+}
+
+func (m *UpdatedLibraryConfig) GetConfig() *v11.TraceConfig {
+ if m != nil {
+ return m.Config
+ }
+ return nil
+}
+
+type ExportTraceServiceRequest struct {
+ // This is required only in the first message on the stream or if the
+ // previous sent ExportTraceServiceRequest message has a different Node (e.g.
+ // when the same RPC is used to send Spans from multiple Applications).
+ Node *v1.Node `protobuf:"bytes,1,opt,name=node,proto3" json:"node,omitempty"`
+ // A list of Spans that belong to the last received Node.
+ Spans []*v11.Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
+ // The resource for the spans in this message that do not have an explicit
+ // resource set.
+ // If unset, the most recently set resource in the RPC stream applies. It is
+ // valid to never be set within a stream, e.g. when no resource info is known.
+ Resource *v12.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ExportTraceServiceRequest) Reset() { *m = ExportTraceServiceRequest{} }
+func (m *ExportTraceServiceRequest) String() string { return proto.CompactTextString(m) }
+func (*ExportTraceServiceRequest) ProtoMessage() {}
+func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7027f99caf7ac6a5, []int{2}
+}
+
+func (m *ExportTraceServiceRequest) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ExportTraceServiceRequest.Unmarshal(m, b)
+}
+func (m *ExportTraceServiceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ExportTraceServiceRequest.Marshal(b, m, deterministic)
+}
+func (m *ExportTraceServiceRequest) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExportTraceServiceRequest.Merge(m, src)
+}
+func (m *ExportTraceServiceRequest) XXX_Size() int {
+ return xxx_messageInfo_ExportTraceServiceRequest.Size(m)
+}
+func (m *ExportTraceServiceRequest) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExportTraceServiceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExportTraceServiceRequest proto.InternalMessageInfo
+
+func (m *ExportTraceServiceRequest) GetNode() *v1.Node {
+ if m != nil {
+ return m.Node
+ }
+ return nil
+}
+
+func (m *ExportTraceServiceRequest) GetSpans() []*v11.Span {
+ if m != nil {
+ return m.Spans
+ }
+ return nil
+}
+
+func (m *ExportTraceServiceRequest) GetResource() *v12.Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+type ExportTraceServiceResponse struct {
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ExportTraceServiceResponse) Reset() { *m = ExportTraceServiceResponse{} }
+func (m *ExportTraceServiceResponse) String() string { return proto.CompactTextString(m) }
+func (*ExportTraceServiceResponse) ProtoMessage() {}
+func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) {
+ return fileDescriptor_7027f99caf7ac6a5, []int{3}
+}
+
+func (m *ExportTraceServiceResponse) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ExportTraceServiceResponse.Unmarshal(m, b)
+}
+func (m *ExportTraceServiceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ExportTraceServiceResponse.Marshal(b, m, deterministic)
+}
+func (m *ExportTraceServiceResponse) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ExportTraceServiceResponse.Merge(m, src)
+}
+func (m *ExportTraceServiceResponse) XXX_Size() int {
+ return xxx_messageInfo_ExportTraceServiceResponse.Size(m)
+}
+func (m *ExportTraceServiceResponse) XXX_DiscardUnknown() {
+ xxx_messageInfo_ExportTraceServiceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExportTraceServiceResponse proto.InternalMessageInfo
+
+func init() {
+ proto.RegisterType((*CurrentLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.CurrentLibraryConfig")
+ proto.RegisterType((*UpdatedLibraryConfig)(nil), "opencensus.proto.agent.trace.v1.UpdatedLibraryConfig")
+ proto.RegisterType((*ExportTraceServiceRequest)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceRequest")
+ proto.RegisterType((*ExportTraceServiceResponse)(nil), "opencensus.proto.agent.trace.v1.ExportTraceServiceResponse")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/agent/trace/v1/trace_service.proto", fileDescriptor_7027f99caf7ac6a5)
+}
+
+var fileDescriptor_7027f99caf7ac6a5 = []byte{
+ // 423 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x54, 0xbf, 0x6b, 0xdb, 0x40,
+ 0x14, 0xee, 0xd9, 0xad, 0x28, 0xe7, 0x2e, 0x15, 0x1d, 0x54, 0x51, 0xb0, 0x11, 0xb4, 0x18, 0x5a,
+ 0x9d, 0x2a, 0x1b, 0x2f, 0x2e, 0x74, 0xb0, 0x29, 0x74, 0x28, 0xc5, 0xc8, 0xed, 0x92, 0xc5, 0xc8,
+ 0xd2, 0x8b, 0xa2, 0xc1, 0x77, 0xca, 0xdd, 0x49, 0x24, 0x90, 0x2d, 0x43, 0xf6, 0x0c, 0xf9, 0xc3,
+ 0xf2, 0x17, 0x05, 0xdd, 0xc9, 0x3f, 0x12, 0x5b, 0x11, 0x24, 0x4b, 0xb6, 0x87, 0xde, 0xf7, 0x7d,
+ 0xf7, 0xbd, 0x7b, 0xdf, 0x09, 0x0f, 0x59, 0x06, 0x34, 0x02, 0x2a, 0x72, 0xe1, 0x65, 0x9c, 0x49,
+ 0xe6, 0x85, 0x09, 0x50, 0xe9, 0x49, 0x1e, 0x46, 0xe0, 0x15, 0xbe, 0x2e, 0x16, 0x02, 0x78, 0x91,
+ 0x46, 0x40, 0x14, 0xc4, 0xec, 0x6e, 0x49, 0xfa, 0x0b, 0x51, 0x24, 0xa2, 0xb0, 0xa4, 0xf0, 0x6d,
+ 0xb7, 0x46, 0x35, 0x62, 0xab, 0x15, 0xa3, 0xa5, 0xac, 0xae, 0x34, 0xdb, 0xfe, 0xba, 0x07, 0xe7,
+ 0x20, 0x58, 0xce, 0xb5, 0x83, 0x75, 0x5d, 0x81, 0x3f, 0xef, 0x81, 0xef, 0x7b, 0xad, 0x60, 0xdf,
+ 0x1a, 0x60, 0x8b, 0x88, 0xd1, 0xe3, 0x34, 0xd1, 0x68, 0xe7, 0x1a, 0xe1, 0x0f, 0xd3, 0x9c, 0x73,
+ 0xa0, 0xf2, 0x4f, 0xba, 0xe4, 0x21, 0x3f, 0x9f, 0xaa, 0xb6, 0x39, 0xc6, 0xaf, 0x29, 0x8b, 0xc1,
+ 0x42, 0x3d, 0xd4, 0xef, 0x0c, 0xbe, 0x90, 0x9a, 0xc9, 0xab, 0x71, 0x0a, 0x9f, 0xfc, 0x65, 0x31,
+ 0x04, 0x8a, 0x63, 0xfe, 0xc4, 0x86, 0x3e, 0xc4, 0x6a, 0xd5, 0xb1, 0xd7, 0x37, 0x46, 0xfe, 0x95,
+ 0x85, 0x3e, 0x33, 0xa8, 0x58, 0xca, 0xd4, 0xff, 0x2c, 0x0e, 0x25, 0xc4, 0x2f, 0xc7, 0xd4, 0x2d,
+ 0xc2, 0x1f, 0x7f, 0x9d, 0x65, 0x8c, 0x4b, 0xd5, 0x9d, 0xeb, 0x60, 0x04, 0x70, 0x9a, 0x83, 0x90,
+ 0xcf, 0x72, 0x36, 0xc2, 0x6f, 0x44, 0x16, 0x52, 0x61, 0xb5, 0x7a, 0xed, 0x7e, 0x67, 0xd0, 0x7d,
+ 0xc4, 0xd8, 0x3c, 0x0b, 0x69, 0xa0, 0xd1, 0xe6, 0x04, 0xbf, 0x5d, 0x27, 0xc4, 0x6a, 0xd7, 0x1d,
+ 0xbb, 0xc9, 0x50, 0xe1, 0x93, 0xa0, 0xaa, 0x83, 0x0d, 0xcf, 0xf9, 0x84, 0xed, 0x43, 0x33, 0x89,
+ 0x8c, 0x51, 0x01, 0x83, 0x9b, 0x16, 0x7e, 0xb7, 0xdb, 0x30, 0x2f, 0xb0, 0x51, 0x6d, 0x62, 0x44,
+ 0x1a, 0x9e, 0x02, 0x39, 0x94, 0x2a, 0xbb, 0x99, 0x76, 0x68, 0xef, 0xce, 0xab, 0x3e, 0xfa, 0x8e,
+ 0xcc, 0x2b, 0x84, 0x0d, 0xed, 0xd6, 0x1c, 0x37, 0xea, 0xd4, 0xae, 0xca, 0xfe, 0xf1, 0x24, 0xae,
+ 0xbe, 0x12, 0xed, 0x64, 0x72, 0x89, 0xb0, 0x93, 0xb2, 0x26, 0x9d, 0xc9, 0xfb, 0x5d, 0x89, 0x59,
+ 0x89, 0x98, 0xa1, 0xa3, 0xdf, 0x49, 0x2a, 0x4f, 0xf2, 0x65, 0x19, 0x05, 0x4f, 0x93, 0xdd, 0x94,
+ 0x0a, 0xc9, 0xf3, 0x15, 0x50, 0x19, 0xca, 0x94, 0x51, 0x6f, 0xab, 0xeb, 0xea, 0x17, 0x9c, 0x00,
+ 0x75, 0x93, 0x87, 0x7f, 0xa8, 0xa5, 0xa1, 0x9a, 0xc3, 0xbb, 0x00, 0x00, 0x00, 0xff, 0xff, 0xcf,
+ 0x9c, 0x9b, 0xf7, 0xcb, 0x04, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// TraceServiceClient is the client API for TraceService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type TraceServiceClient interface {
+ // After initialization, this RPC must be kept alive for the entire life of
+ // the application. The agent pushes configs down to applications via a
+ // stream.
+ Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error)
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error)
+}
+
+type traceServiceClient struct {
+ cc *grpc.ClientConn
+}
+
+func NewTraceServiceClient(cc *grpc.ClientConn) TraceServiceClient {
+ return &traceServiceClient{cc}
+}
+
+func (c *traceServiceClient) Config(ctx context.Context, opts ...grpc.CallOption) (TraceService_ConfigClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[0], "/opencensus.proto.agent.trace.v1.TraceService/Config", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &traceServiceConfigClient{stream}
+ return x, nil
+}
+
+type TraceService_ConfigClient interface {
+ Send(*CurrentLibraryConfig) error
+ Recv() (*UpdatedLibraryConfig, error)
+ grpc.ClientStream
+}
+
+type traceServiceConfigClient struct {
+ grpc.ClientStream
+}
+
+func (x *traceServiceConfigClient) Send(m *CurrentLibraryConfig) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *traceServiceConfigClient) Recv() (*UpdatedLibraryConfig, error) {
+ m := new(UpdatedLibraryConfig)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func (c *traceServiceClient) Export(ctx context.Context, opts ...grpc.CallOption) (TraceService_ExportClient, error) {
+ stream, err := c.cc.NewStream(ctx, &_TraceService_serviceDesc.Streams[1], "/opencensus.proto.agent.trace.v1.TraceService/Export", opts...)
+ if err != nil {
+ return nil, err
+ }
+ x := &traceServiceExportClient{stream}
+ return x, nil
+}
+
+type TraceService_ExportClient interface {
+ Send(*ExportTraceServiceRequest) error
+ Recv() (*ExportTraceServiceResponse, error)
+ grpc.ClientStream
+}
+
+type traceServiceExportClient struct {
+ grpc.ClientStream
+}
+
+func (x *traceServiceExportClient) Send(m *ExportTraceServiceRequest) error {
+ return x.ClientStream.SendMsg(m)
+}
+
+func (x *traceServiceExportClient) Recv() (*ExportTraceServiceResponse, error) {
+ m := new(ExportTraceServiceResponse)
+ if err := x.ClientStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+// TraceServiceServer is the server API for TraceService service.
+type TraceServiceServer interface {
+ // After initialization, this RPC must be kept alive for the entire life of
+ // the application. The agent pushes configs down to applications via a
+ // stream.
+ Config(TraceService_ConfigServer) error
+ // For performance reasons, it is recommended to keep this RPC
+ // alive for the entire life of the application.
+ Export(TraceService_ExportServer) error
+}
+
+func RegisterTraceServiceServer(s *grpc.Server, srv TraceServiceServer) {
+ s.RegisterService(&_TraceService_serviceDesc, srv)
+}
+
+func _TraceService_Config_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(TraceServiceServer).Config(&traceServiceConfigServer{stream})
+}
+
+type TraceService_ConfigServer interface {
+ Send(*UpdatedLibraryConfig) error
+ Recv() (*CurrentLibraryConfig, error)
+ grpc.ServerStream
+}
+
+type traceServiceConfigServer struct {
+ grpc.ServerStream
+}
+
+func (x *traceServiceConfigServer) Send(m *UpdatedLibraryConfig) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *traceServiceConfigServer) Recv() (*CurrentLibraryConfig, error) {
+ m := new(CurrentLibraryConfig)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+func _TraceService_Export_Handler(srv interface{}, stream grpc.ServerStream) error {
+ return srv.(TraceServiceServer).Export(&traceServiceExportServer{stream})
+}
+
+type TraceService_ExportServer interface {
+ Send(*ExportTraceServiceResponse) error
+ Recv() (*ExportTraceServiceRequest, error)
+ grpc.ServerStream
+}
+
+type traceServiceExportServer struct {
+ grpc.ServerStream
+}
+
+func (x *traceServiceExportServer) Send(m *ExportTraceServiceResponse) error {
+ return x.ServerStream.SendMsg(m)
+}
+
+func (x *traceServiceExportServer) Recv() (*ExportTraceServiceRequest, error) {
+ m := new(ExportTraceServiceRequest)
+ if err := x.ServerStream.RecvMsg(m); err != nil {
+ return nil, err
+ }
+ return m, nil
+}
+
+var _TraceService_serviceDesc = grpc.ServiceDesc{
+ ServiceName: "opencensus.proto.agent.trace.v1.TraceService",
+ HandlerType: (*TraceServiceServer)(nil),
+ Methods: []grpc.MethodDesc{},
+ Streams: []grpc.StreamDesc{
+ {
+ StreamName: "Config",
+ Handler: _TraceService_Config_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ {
+ StreamName: "Export",
+ Handler: _TraceService_Export_Handler,
+ ServerStreams: true,
+ ClientStreams: true,
+ },
+ },
+ Metadata: "opencensus/proto/agent/trace/v1/trace_service.proto",
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
new file mode 100644
index 00000000..bd4b8a82
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1/trace_service.pb.gw.go
@@ -0,0 +1,154 @@
+// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT.
+// source: opencensus/proto/agent/trace/v1/trace_service.proto
+
+/*
+Package v1 is a reverse proxy.
+
+It translates gRPC into RESTful JSON APIs.
+*/
+package v1
+
+import (
+ "io"
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+ "github.com/grpc-ecosystem/grpc-gateway/runtime"
+ "github.com/grpc-ecosystem/grpc-gateway/utilities"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/status"
+)
+
+var _ codes.Code
+var _ io.Reader
+var _ status.Status
+var _ = runtime.String
+var _ = utilities.NewDoubleArray
+
+func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (TraceService_ExportClient, runtime.ServerMetadata, error) {
+ var metadata runtime.ServerMetadata
+ stream, err := client.Export(ctx)
+ if err != nil {
+ grpclog.Infof("Failed to start streaming: %v", err)
+ return nil, metadata, err
+ }
+ newReader, berr := utilities.IOReaderFactory(req.Body)
+ if berr != nil {
+ return nil, metadata, berr
+ }
+ dec := marshaler.NewDecoder(newReader())
+ handleSend := func() error {
+ var protoReq ExportTraceServiceRequest
+ err := dec.Decode(&protoReq)
+ if err == io.EOF {
+ return err
+ }
+ if err != nil {
+ grpclog.Infof("Failed to decode request: %v", err)
+ return err
+ }
+ if err := stream.Send(&protoReq); err != nil {
+ grpclog.Infof("Failed to send request: %v", err)
+ return err
+ }
+ return nil
+ }
+ if err := handleSend(); err != nil {
+ if cerr := stream.CloseSend(); cerr != nil {
+ grpclog.Infof("Failed to terminate client stream: %v", cerr)
+ }
+ if err == io.EOF {
+ return stream, metadata, nil
+ }
+ return nil, metadata, err
+ }
+ go func() {
+ for {
+ if err := handleSend(); err != nil {
+ break
+ }
+ }
+ if err := stream.CloseSend(); err != nil {
+ grpclog.Infof("Failed to terminate client stream: %v", err)
+ }
+ }()
+ header, err := stream.Header()
+ if err != nil {
+ grpclog.Infof("Failed to get header from client: %v", err)
+ return nil, metadata, err
+ }
+ metadata.HeaderMD = header
+ return stream, metadata, nil
+}
+
+// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but
+// automatically dials to "endpoint" and closes the connection when "ctx" gets done.
+func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) {
+ conn, err := grpc.Dial(endpoint, opts...)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ return
+ }
+ go func() {
+ <-ctx.Done()
+ if cerr := conn.Close(); cerr != nil {
+ grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr)
+ }
+ }()
+ }()
+
+ return RegisterTraceServiceHandler(ctx, mux, conn)
+}
+
+// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux".
+// The handlers forward requests to the grpc endpoint over "conn".
+func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error {
+ return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn))
+}
+
+// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService
+// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient".
+// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient"
+// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in
+// "TraceServiceClient" to call the correct interceptors.
+func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error {
+
+ mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
+ ctx, cancel := context.WithCancel(req.Context())
+ defer cancel()
+ inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
+ rctx, err := runtime.AnnotateContext(ctx, mux, req)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+ resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams)
+ ctx = runtime.NewServerMetadataContext(ctx, md)
+ if err != nil {
+ runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
+ return
+ }
+
+ forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, func() (proto.Message, error) { return resp.Recv() }, mux.GetForwardResponseOptions()...)
+
+ })
+
+ return nil
+}
+
+var (
+ pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, ""))
+)
+
+var (
+ forward_TraceService_Export_0 = runtime.ForwardResponseStream
+)
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
new file mode 100644
index 00000000..53b8aa99
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1/metrics.pb.go
@@ -0,0 +1,1126 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/metrics/v1/metrics.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// The kind of metric. It describes how the data is reported.
+//
+// A gauge is an instantaneous measurement of a value.
+//
+// A cumulative measurement is a value accumulated over a time interval. In
+// a time series, cumulative measurements should have the same start time,
+// increasing values and increasing end times, until an event resets the
+// cumulative value to zero and sets a new start time for the following
+// points.
+type MetricDescriptor_Type int32
+
+const (
+ // Do not use this default value.
+ MetricDescriptor_UNSPECIFIED MetricDescriptor_Type = 0
+ // Integer gauge. The value can go both up and down.
+ MetricDescriptor_GAUGE_INT64 MetricDescriptor_Type = 1
+ // Floating point gauge. The value can go both up and down.
+ MetricDescriptor_GAUGE_DOUBLE MetricDescriptor_Type = 2
+ // Distribution gauge measurement. The count and sum can go both up and
+ // down. Recorded values are always >= 0.
+ // Used in scenarios like a snapshot of time the current items in a queue
+ // have spent there.
+ MetricDescriptor_GAUGE_DISTRIBUTION MetricDescriptor_Type = 3
+ // Integer cumulative measurement. The value cannot decrease, if resets
+ // then the start_time should also be reset.
+ MetricDescriptor_CUMULATIVE_INT64 MetricDescriptor_Type = 4
+ // Floating point cumulative measurement. The value cannot decrease, if
+ // resets then the start_time should also be reset. Recorded values are
+ // always >= 0.
+ MetricDescriptor_CUMULATIVE_DOUBLE MetricDescriptor_Type = 5
+ // Distribution cumulative measurement. The count and sum cannot decrease,
+ // if resets then the start_time should also be reset.
+ MetricDescriptor_CUMULATIVE_DISTRIBUTION MetricDescriptor_Type = 6
+ // Some frameworks implemented Histograms as a summary of observations
+ // (usually things like request durations and response sizes). While it
+ // also provides a total count of observations and a sum of all observed
+ // values, it calculates configurable percentiles over a sliding time
+ // window. This is not recommended, since it cannot be aggregated.
+ MetricDescriptor_SUMMARY MetricDescriptor_Type = 7
+)
+
+var MetricDescriptor_Type_name = map[int32]string{
+ 0: "UNSPECIFIED",
+ 1: "GAUGE_INT64",
+ 2: "GAUGE_DOUBLE",
+ 3: "GAUGE_DISTRIBUTION",
+ 4: "CUMULATIVE_INT64",
+ 5: "CUMULATIVE_DOUBLE",
+ 6: "CUMULATIVE_DISTRIBUTION",
+ 7: "SUMMARY",
+}
+
+var MetricDescriptor_Type_value = map[string]int32{
+ "UNSPECIFIED": 0,
+ "GAUGE_INT64": 1,
+ "GAUGE_DOUBLE": 2,
+ "GAUGE_DISTRIBUTION": 3,
+ "CUMULATIVE_INT64": 4,
+ "CUMULATIVE_DOUBLE": 5,
+ "CUMULATIVE_DISTRIBUTION": 6,
+ "SUMMARY": 7,
+}
+
+func (x MetricDescriptor_Type) String() string {
+ return proto.EnumName(MetricDescriptor_Type_name, int32(x))
+}
+
+func (MetricDescriptor_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{1, 0}
+}
+
+// Defines a Metric which has one or more timeseries.
+type Metric struct {
+ // The descriptor of the Metric.
+ // TODO(issue #152): consider only sending the name of descriptor for
+ // optimization.
+ MetricDescriptor *MetricDescriptor `protobuf:"bytes,1,opt,name=metric_descriptor,json=metricDescriptor,proto3" json:"metric_descriptor,omitempty"`
+ // One or more timeseries for a single metric, where each timeseries has
+ // one or more points.
+ Timeseries []*TimeSeries `protobuf:"bytes,2,rep,name=timeseries,proto3" json:"timeseries,omitempty"`
+ // The resource for the metric. If unset, it may be set to a default value
+ // provided for a sequence of messages in an RPC stream.
+ Resource *v1.Resource `protobuf:"bytes,3,opt,name=resource,proto3" json:"resource,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Metric) Reset() { *m = Metric{} }
+func (m *Metric) String() string { return proto.CompactTextString(m) }
+func (*Metric) ProtoMessage() {}
+func (*Metric) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{0}
+}
+
+func (m *Metric) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Metric.Unmarshal(m, b)
+}
+func (m *Metric) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Metric.Marshal(b, m, deterministic)
+}
+func (m *Metric) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Metric.Merge(m, src)
+}
+func (m *Metric) XXX_Size() int {
+ return xxx_messageInfo_Metric.Size(m)
+}
+func (m *Metric) XXX_DiscardUnknown() {
+ xxx_messageInfo_Metric.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metric proto.InternalMessageInfo
+
+func (m *Metric) GetMetricDescriptor() *MetricDescriptor {
+ if m != nil {
+ return m.MetricDescriptor
+ }
+ return nil
+}
+
+func (m *Metric) GetTimeseries() []*TimeSeries {
+ if m != nil {
+ return m.Timeseries
+ }
+ return nil
+}
+
+func (m *Metric) GetResource() *v1.Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+// Defines a metric type and its schema.
+type MetricDescriptor struct {
+ // The metric type, including its DNS name prefix. It must be unique.
+ Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+ // A detailed description of the metric, which can be used in documentation.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ // The unit in which the metric value is reported. Follows the format
+ // described by http://unitsofmeasure.org/ucum.html.
+ Unit string `protobuf:"bytes,3,opt,name=unit,proto3" json:"unit,omitempty"`
+ Type MetricDescriptor_Type `protobuf:"varint,4,opt,name=type,proto3,enum=opencensus.proto.metrics.v1.MetricDescriptor_Type" json:"type,omitempty"`
+ // The label keys associated with the metric descriptor.
+ LabelKeys []*LabelKey `protobuf:"bytes,5,rep,name=label_keys,json=labelKeys,proto3" json:"label_keys,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *MetricDescriptor) Reset() { *m = MetricDescriptor{} }
+func (m *MetricDescriptor) String() string { return proto.CompactTextString(m) }
+func (*MetricDescriptor) ProtoMessage() {}
+func (*MetricDescriptor) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{1}
+}
+
+func (m *MetricDescriptor) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_MetricDescriptor.Unmarshal(m, b)
+}
+func (m *MetricDescriptor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_MetricDescriptor.Marshal(b, m, deterministic)
+}
+func (m *MetricDescriptor) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_MetricDescriptor.Merge(m, src)
+}
+func (m *MetricDescriptor) XXX_Size() int {
+ return xxx_messageInfo_MetricDescriptor.Size(m)
+}
+func (m *MetricDescriptor) XXX_DiscardUnknown() {
+ xxx_messageInfo_MetricDescriptor.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricDescriptor proto.InternalMessageInfo
+
+func (m *MetricDescriptor) GetName() string {
+ if m != nil {
+ return m.Name
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetUnit() string {
+ if m != nil {
+ return m.Unit
+ }
+ return ""
+}
+
+func (m *MetricDescriptor) GetType() MetricDescriptor_Type {
+ if m != nil {
+ return m.Type
+ }
+ return MetricDescriptor_UNSPECIFIED
+}
+
+func (m *MetricDescriptor) GetLabelKeys() []*LabelKey {
+ if m != nil {
+ return m.LabelKeys
+ }
+ return nil
+}
+
+// Defines a label key associated with a metric descriptor.
+type LabelKey struct {
+ // The key for the label.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // A human-readable description of what this label key represents.
+ Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelKey) Reset() { *m = LabelKey{} }
+func (m *LabelKey) String() string { return proto.CompactTextString(m) }
+func (*LabelKey) ProtoMessage() {}
+func (*LabelKey) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{2}
+}
+
+func (m *LabelKey) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelKey.Unmarshal(m, b)
+}
+func (m *LabelKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelKey.Marshal(b, m, deterministic)
+}
+func (m *LabelKey) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelKey.Merge(m, src)
+}
+func (m *LabelKey) XXX_Size() int {
+ return xxx_messageInfo_LabelKey.Size(m)
+}
+func (m *LabelKey) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelKey proto.InternalMessageInfo
+
+func (m *LabelKey) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *LabelKey) GetDescription() string {
+ if m != nil {
+ return m.Description
+ }
+ return ""
+}
+
+// A collection of data points that describes the time-varying values
+// of a metric.
+type TimeSeries struct {
+ // Must be present for cumulative metrics. The time when the cumulative value
+ // was reset to zero. Exclusive. The cumulative value is over the time interval
+ // (start_timestamp, timestamp]. If not specified, the backend can use the
+ // previous recorded value.
+ StartTimestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=start_timestamp,json=startTimestamp,proto3" json:"start_timestamp,omitempty"`
+ // The set of label values that uniquely identify this timeseries. Applies to
+ // all points. The order of label values must match that of label keys in the
+ // metric descriptor.
+ LabelValues []*LabelValue `protobuf:"bytes,2,rep,name=label_values,json=labelValues,proto3" json:"label_values,omitempty"`
+ // The data points of this timeseries. Point.value type MUST match the
+ // MetricDescriptor.type.
+ Points []*Point `protobuf:"bytes,3,rep,name=points,proto3" json:"points,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TimeSeries) Reset() { *m = TimeSeries{} }
+func (m *TimeSeries) String() string { return proto.CompactTextString(m) }
+func (*TimeSeries) ProtoMessage() {}
+func (*TimeSeries) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{3}
+}
+
+func (m *TimeSeries) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TimeSeries.Unmarshal(m, b)
+}
+func (m *TimeSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TimeSeries.Marshal(b, m, deterministic)
+}
+func (m *TimeSeries) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TimeSeries.Merge(m, src)
+}
+func (m *TimeSeries) XXX_Size() int {
+ return xxx_messageInfo_TimeSeries.Size(m)
+}
+func (m *TimeSeries) XXX_DiscardUnknown() {
+ xxx_messageInfo_TimeSeries.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TimeSeries proto.InternalMessageInfo
+
+func (m *TimeSeries) GetStartTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTimestamp
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetLabelValues() []*LabelValue {
+ if m != nil {
+ return m.LabelValues
+ }
+ return nil
+}
+
+func (m *TimeSeries) GetPoints() []*Point {
+ if m != nil {
+ return m.Points
+ }
+ return nil
+}
+
+type LabelValue struct {
+ // The value for the label.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // If false the value field is ignored and considered not set.
+ // This is used to differentiate a missing label from an empty string.
+ HasValue bool `protobuf:"varint,2,opt,name=has_value,json=hasValue,proto3" json:"has_value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *LabelValue) Reset() { *m = LabelValue{} }
+func (m *LabelValue) String() string { return proto.CompactTextString(m) }
+func (*LabelValue) ProtoMessage() {}
+func (*LabelValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{4}
+}
+
+func (m *LabelValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_LabelValue.Unmarshal(m, b)
+}
+func (m *LabelValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_LabelValue.Marshal(b, m, deterministic)
+}
+func (m *LabelValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_LabelValue.Merge(m, src)
+}
+func (m *LabelValue) XXX_Size() int {
+ return xxx_messageInfo_LabelValue.Size(m)
+}
+func (m *LabelValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_LabelValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LabelValue proto.InternalMessageInfo
+
+func (m *LabelValue) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+func (m *LabelValue) GetHasValue() bool {
+ if m != nil {
+ return m.HasValue
+ }
+ return false
+}
+
+// A timestamped measurement.
+type Point struct {
+ // The moment when this point was recorded. Inclusive.
+ // If not specified, the timestamp will be decided by the backend.
+ Timestamp *timestamp.Timestamp `protobuf:"bytes,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // The actual point value.
+ //
+ // Types that are valid to be assigned to Value:
+ // *Point_Int64Value
+ // *Point_DoubleValue
+ // *Point_DistributionValue
+ // *Point_SummaryValue
+ Value isPoint_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Point) Reset() { *m = Point{} }
+func (m *Point) String() string { return proto.CompactTextString(m) }
+func (*Point) ProtoMessage() {}
+func (*Point) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{5}
+}
+
+func (m *Point) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Point.Unmarshal(m, b)
+}
+func (m *Point) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Point.Marshal(b, m, deterministic)
+}
+func (m *Point) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Point.Merge(m, src)
+}
+func (m *Point) XXX_Size() int {
+ return xxx_messageInfo_Point.Size(m)
+}
+func (m *Point) XXX_DiscardUnknown() {
+ xxx_messageInfo_Point.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Point proto.InternalMessageInfo
+
+func (m *Point) GetTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+type isPoint_Value interface {
+ isPoint_Value()
+}
+
+type Point_Int64Value struct {
+ Int64Value int64 `protobuf:"varint,2,opt,name=int64_value,json=int64Value,proto3,oneof"`
+}
+
+type Point_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,3,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+type Point_DistributionValue struct {
+ DistributionValue *DistributionValue `protobuf:"bytes,4,opt,name=distribution_value,json=distributionValue,proto3,oneof"`
+}
+
+type Point_SummaryValue struct {
+ SummaryValue *SummaryValue `protobuf:"bytes,5,opt,name=summary_value,json=summaryValue,proto3,oneof"`
+}
+
+func (*Point_Int64Value) isPoint_Value() {}
+
+func (*Point_DoubleValue) isPoint_Value() {}
+
+func (*Point_DistributionValue) isPoint_Value() {}
+
+func (*Point_SummaryValue) isPoint_Value() {}
+
+func (m *Point) GetValue() isPoint_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Point) GetInt64Value() int64 {
+ if x, ok := m.GetValue().(*Point_Int64Value); ok {
+ return x.Int64Value
+ }
+ return 0
+}
+
+func (m *Point) GetDoubleValue() float64 {
+ if x, ok := m.GetValue().(*Point_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+func (m *Point) GetDistributionValue() *DistributionValue {
+ if x, ok := m.GetValue().(*Point_DistributionValue); ok {
+ return x.DistributionValue
+ }
+ return nil
+}
+
+func (m *Point) GetSummaryValue() *SummaryValue {
+ if x, ok := m.GetValue().(*Point_SummaryValue); ok {
+ return x.SummaryValue
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Point) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Point_Int64Value)(nil),
+ (*Point_DoubleValue)(nil),
+ (*Point_DistributionValue)(nil),
+ (*Point_SummaryValue)(nil),
+ }
+}
+
+// Distribution contains summary statistics for a population of values. It
+// optionally contains a histogram representing the distribution of those
+// values across a set of buckets.
+type DistributionValue struct {
+ // The number of values in the population. Must be non-negative. This value
+ // must equal the sum of the values in bucket_counts if a histogram is
+ // provided.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The sum of the values in the population. If count is zero then this field
+ // must be zero.
+ Sum float64 `protobuf:"fixed64,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // The sum of squared deviations from the mean of the values in the
+ // population. For values x_i this is:
+ //
+ // Sum[i=1..n]((x_i - mean)^2)
+ //
+ // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition
+ // describes Welford's method for accumulating this sum in one pass.
+ //
+ // If count is zero then this field must be zero.
+ SumOfSquaredDeviation float64 `protobuf:"fixed64,3,opt,name=sum_of_squared_deviation,json=sumOfSquaredDeviation,proto3" json:"sum_of_squared_deviation,omitempty"`
+ // Don't change bucket boundaries within a TimeSeries if your backend doesn't
+ // support this.
+ // TODO(issue #152): consider not required to send bucket options for
+ // optimization.
+ BucketOptions *DistributionValue_BucketOptions `protobuf:"bytes,4,opt,name=bucket_options,json=bucketOptions,proto3" json:"bucket_options,omitempty"`
+ // If the distribution does not have a histogram, then omit this field.
+ // If there is a histogram, then the sum of the values in the Bucket counts
+ // must equal the value in the count field of the distribution.
+ Buckets []*DistributionValue_Bucket `protobuf:"bytes,5,rep,name=buckets,proto3" json:"buckets,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue) Reset() { *m = DistributionValue{} }
+func (m *DistributionValue) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue) ProtoMessage() {}
+func (*DistributionValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6}
+}
+
+func (m *DistributionValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue.Unmarshal(m, b)
+}
+func (m *DistributionValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue.Merge(m, src)
+}
+func (m *DistributionValue) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue.Size(m)
+}
+func (m *DistributionValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue proto.InternalMessageInfo
+
+func (m *DistributionValue) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+func (m *DistributionValue) GetSum() float64 {
+ if m != nil {
+ return m.Sum
+ }
+ return 0
+}
+
+func (m *DistributionValue) GetSumOfSquaredDeviation() float64 {
+ if m != nil {
+ return m.SumOfSquaredDeviation
+ }
+ return 0
+}
+
+func (m *DistributionValue) GetBucketOptions() *DistributionValue_BucketOptions {
+ if m != nil {
+ return m.BucketOptions
+ }
+ return nil
+}
+
+func (m *DistributionValue) GetBuckets() []*DistributionValue_Bucket {
+ if m != nil {
+ return m.Buckets
+ }
+ return nil
+}
+
+// A Distribution may optionally contain a histogram of the values in the
+// population. The bucket boundaries for that histogram are described by
+// BucketOptions.
+//
+// If bucket_options has no type, then there is no histogram associated with
+// the Distribution.
+type DistributionValue_BucketOptions struct {
+ // Types that are valid to be assigned to Type:
+ // *DistributionValue_BucketOptions_Explicit_
+ Type isDistributionValue_BucketOptions_Type `protobuf_oneof:"type"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_BucketOptions) Reset() { *m = DistributionValue_BucketOptions{} }
+func (m *DistributionValue_BucketOptions) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_BucketOptions) ProtoMessage() {}
+func (*DistributionValue_BucketOptions) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 0}
+}
+
+func (m *DistributionValue_BucketOptions) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_BucketOptions.Unmarshal(m, b)
+}
+func (m *DistributionValue_BucketOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_BucketOptions.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_BucketOptions) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_BucketOptions.Merge(m, src)
+}
+func (m *DistributionValue_BucketOptions) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_BucketOptions.Size(m)
+}
+func (m *DistributionValue_BucketOptions) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_BucketOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_BucketOptions proto.InternalMessageInfo
+
+type isDistributionValue_BucketOptions_Type interface {
+ isDistributionValue_BucketOptions_Type()
+}
+
+type DistributionValue_BucketOptions_Explicit_ struct {
+ Explicit *DistributionValue_BucketOptions_Explicit `protobuf:"bytes,1,opt,name=explicit,proto3,oneof"`
+}
+
+func (*DistributionValue_BucketOptions_Explicit_) isDistributionValue_BucketOptions_Type() {}
+
+func (m *DistributionValue_BucketOptions) GetType() isDistributionValue_BucketOptions_Type {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+func (m *DistributionValue_BucketOptions) GetExplicit() *DistributionValue_BucketOptions_Explicit {
+ if x, ok := m.GetType().(*DistributionValue_BucketOptions_Explicit_); ok {
+ return x.Explicit
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*DistributionValue_BucketOptions) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*DistributionValue_BucketOptions_Explicit_)(nil),
+ }
+}
+
+// Specifies a set of buckets with arbitrary upper-bounds.
+// This defines size(bounds) + 1 (= N) buckets. The boundaries for bucket
+// index i are:
+//
+// [0, bucket_bounds[i]) for i == 0
+// [bucket_bounds[i-1], bucket_bounds[i]) for 0 < i < N-1
+// [bucket_bounds[i], +infinity) for i == N-1
+type DistributionValue_BucketOptions_Explicit struct {
+ // The values must be strictly increasing and > 0.
+ Bounds []float64 `protobuf:"fixed64,1,rep,packed,name=bounds,proto3" json:"bounds,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_BucketOptions_Explicit) Reset() {
+ *m = DistributionValue_BucketOptions_Explicit{}
+}
+func (m *DistributionValue_BucketOptions_Explicit) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_BucketOptions_Explicit) ProtoMessage() {}
+func (*DistributionValue_BucketOptions_Explicit) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 0, 0}
+}
+
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Unmarshal(m, b)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Merge(m, src)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_BucketOptions_Explicit.Size(m)
+}
+func (m *DistributionValue_BucketOptions_Explicit) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_BucketOptions_Explicit.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_BucketOptions_Explicit proto.InternalMessageInfo
+
+func (m *DistributionValue_BucketOptions_Explicit) GetBounds() []float64 {
+ if m != nil {
+ return m.Bounds
+ }
+ return nil
+}
+
+type DistributionValue_Bucket struct {
+ // The number of values in each bucket of the histogram, as described in
+ // bucket_bounds.
+ Count int64 `protobuf:"varint,1,opt,name=count,proto3" json:"count,omitempty"`
+ // If the distribution does not have a histogram, then omit this field.
+ Exemplar *DistributionValue_Exemplar `protobuf:"bytes,2,opt,name=exemplar,proto3" json:"exemplar,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_Bucket) Reset() { *m = DistributionValue_Bucket{} }
+func (m *DistributionValue_Bucket) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_Bucket) ProtoMessage() {}
+func (*DistributionValue_Bucket) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 1}
+}
+
+func (m *DistributionValue_Bucket) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_Bucket.Unmarshal(m, b)
+}
+func (m *DistributionValue_Bucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_Bucket.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_Bucket) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_Bucket.Merge(m, src)
+}
+func (m *DistributionValue_Bucket) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_Bucket.Size(m)
+}
+func (m *DistributionValue_Bucket) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_Bucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_Bucket proto.InternalMessageInfo
+
+func (m *DistributionValue_Bucket) GetCount() int64 {
+ if m != nil {
+ return m.Count
+ }
+ return 0
+}
+
+func (m *DistributionValue_Bucket) GetExemplar() *DistributionValue_Exemplar {
+ if m != nil {
+ return m.Exemplar
+ }
+ return nil
+}
+
+// Exemplars are example points that may be used to annotate aggregated
+// Distribution values. They are metadata that gives information about a
+// particular value added to a Distribution bucket.
+type DistributionValue_Exemplar struct {
+ // Value of the exemplar point. It determines which bucket the exemplar
+ // belongs to.
+ Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The observation (sampling) time of the above value.
+ Timestamp *timestamp.Timestamp `protobuf:"bytes,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"`
+ // Contextual information about the example value.
+ Attachments map[string]string `protobuf:"bytes,3,rep,name=attachments,proto3" json:"attachments,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *DistributionValue_Exemplar) Reset() { *m = DistributionValue_Exemplar{} }
+func (m *DistributionValue_Exemplar) String() string { return proto.CompactTextString(m) }
+func (*DistributionValue_Exemplar) ProtoMessage() {}
+func (*DistributionValue_Exemplar) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{6, 2}
+}
+
+func (m *DistributionValue_Exemplar) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_DistributionValue_Exemplar.Unmarshal(m, b)
+}
+func (m *DistributionValue_Exemplar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_DistributionValue_Exemplar.Marshal(b, m, deterministic)
+}
+func (m *DistributionValue_Exemplar) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_DistributionValue_Exemplar.Merge(m, src)
+}
+func (m *DistributionValue_Exemplar) XXX_Size() int {
+ return xxx_messageInfo_DistributionValue_Exemplar.Size(m)
+}
+func (m *DistributionValue_Exemplar) XXX_DiscardUnknown() {
+ xxx_messageInfo_DistributionValue_Exemplar.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DistributionValue_Exemplar proto.InternalMessageInfo
+
+func (m *DistributionValue_Exemplar) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func (m *DistributionValue_Exemplar) GetTimestamp() *timestamp.Timestamp {
+ if m != nil {
+ return m.Timestamp
+ }
+ return nil
+}
+
+func (m *DistributionValue_Exemplar) GetAttachments() map[string]string {
+ if m != nil {
+ return m.Attachments
+ }
+ return nil
+}
+
+// The start_timestamp only applies to the count and sum in the SummaryValue.
+type SummaryValue struct {
+ // The total number of recorded values since start_time. Optional since
+ // some systems don't expose this.
+ Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The total sum of recorded values since start_time. Optional since some
+ // systems don't expose this. If count is zero then this field must be zero.
+ // This field must be unset if the sum is not available.
+ Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // Values calculated over an arbitrary time window.
+ Snapshot *SummaryValue_Snapshot `protobuf:"bytes,3,opt,name=snapshot,proto3" json:"snapshot,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SummaryValue) Reset() { *m = SummaryValue{} }
+func (m *SummaryValue) String() string { return proto.CompactTextString(m) }
+func (*SummaryValue) ProtoMessage() {}
+func (*SummaryValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{7}
+}
+
+func (m *SummaryValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SummaryValue.Unmarshal(m, b)
+}
+func (m *SummaryValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SummaryValue.Marshal(b, m, deterministic)
+}
+func (m *SummaryValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SummaryValue.Merge(m, src)
+}
+func (m *SummaryValue) XXX_Size() int {
+ return xxx_messageInfo_SummaryValue.Size(m)
+}
+func (m *SummaryValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_SummaryValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SummaryValue proto.InternalMessageInfo
+
+func (m *SummaryValue) GetCount() *wrappers.Int64Value {
+ if m != nil {
+ return m.Count
+ }
+ return nil
+}
+
+func (m *SummaryValue) GetSum() *wrappers.DoubleValue {
+ if m != nil {
+ return m.Sum
+ }
+ return nil
+}
+
+func (m *SummaryValue) GetSnapshot() *SummaryValue_Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+// The values in this message can be reset at arbitrary unknown times, with
+// the requirement that all of them are reset at the same time.
+type SummaryValue_Snapshot struct {
+ // The number of values in the snapshot. Optional since some systems don't
+ // expose this.
+ Count *wrappers.Int64Value `protobuf:"bytes,1,opt,name=count,proto3" json:"count,omitempty"`
+ // The sum of values in the snapshot. Optional since some systems don't
+ // expose this. If count is zero then this field must be zero or not set
+ // (if not supported).
+ Sum *wrappers.DoubleValue `protobuf:"bytes,2,opt,name=sum,proto3" json:"sum,omitempty"`
+ // A list of values at different percentiles of the distribution calculated
+ // from the current snapshot. The percentiles must be strictly increasing.
+ PercentileValues []*SummaryValue_Snapshot_ValueAtPercentile `protobuf:"bytes,3,rep,name=percentile_values,json=percentileValues,proto3" json:"percentile_values,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SummaryValue_Snapshot) Reset() { *m = SummaryValue_Snapshot{} }
+func (m *SummaryValue_Snapshot) String() string { return proto.CompactTextString(m) }
+func (*SummaryValue_Snapshot) ProtoMessage() {}
+func (*SummaryValue_Snapshot) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{7, 0}
+}
+
+func (m *SummaryValue_Snapshot) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SummaryValue_Snapshot.Unmarshal(m, b)
+}
+func (m *SummaryValue_Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SummaryValue_Snapshot.Marshal(b, m, deterministic)
+}
+func (m *SummaryValue_Snapshot) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SummaryValue_Snapshot.Merge(m, src)
+}
+func (m *SummaryValue_Snapshot) XXX_Size() int {
+ return xxx_messageInfo_SummaryValue_Snapshot.Size(m)
+}
+func (m *SummaryValue_Snapshot) XXX_DiscardUnknown() {
+ xxx_messageInfo_SummaryValue_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SummaryValue_Snapshot proto.InternalMessageInfo
+
+func (m *SummaryValue_Snapshot) GetCount() *wrappers.Int64Value {
+ if m != nil {
+ return m.Count
+ }
+ return nil
+}
+
+func (m *SummaryValue_Snapshot) GetSum() *wrappers.DoubleValue {
+ if m != nil {
+ return m.Sum
+ }
+ return nil
+}
+
+func (m *SummaryValue_Snapshot) GetPercentileValues() []*SummaryValue_Snapshot_ValueAtPercentile {
+ if m != nil {
+ return m.PercentileValues
+ }
+ return nil
+}
+
+// Represents the value at a given percentile of a distribution.
+type SummaryValue_Snapshot_ValueAtPercentile struct {
+ // The percentile of a distribution. Must be in the interval
+ // (0.0, 100.0].
+ Percentile float64 `protobuf:"fixed64,1,opt,name=percentile,proto3" json:"percentile,omitempty"`
+ // The value at the given percentile of a distribution.
+ Value float64 `protobuf:"fixed64,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) Reset() {
+ *m = SummaryValue_Snapshot_ValueAtPercentile{}
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) String() string { return proto.CompactTextString(m) }
+func (*SummaryValue_Snapshot_ValueAtPercentile) ProtoMessage() {}
+func (*SummaryValue_Snapshot_ValueAtPercentile) Descriptor() ([]byte, []int) {
+ return fileDescriptor_0ee3deb72053811a, []int{7, 0, 0}
+}
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Unmarshal(m, b)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Marshal(b, m, deterministic)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Merge(m, src)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_Size() int {
+ return xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.Size(m)
+}
+func (m *SummaryValue_Snapshot_ValueAtPercentile) XXX_DiscardUnknown() {
+ xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SummaryValue_Snapshot_ValueAtPercentile proto.InternalMessageInfo
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) GetPercentile() float64 {
+ if m != nil {
+ return m.Percentile
+ }
+ return 0
+}
+
+func (m *SummaryValue_Snapshot_ValueAtPercentile) GetValue() float64 {
+ if m != nil {
+ return m.Value
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("opencensus.proto.metrics.v1.MetricDescriptor_Type", MetricDescriptor_Type_name, MetricDescriptor_Type_value)
+ proto.RegisterType((*Metric)(nil), "opencensus.proto.metrics.v1.Metric")
+ proto.RegisterType((*MetricDescriptor)(nil), "opencensus.proto.metrics.v1.MetricDescriptor")
+ proto.RegisterType((*LabelKey)(nil), "opencensus.proto.metrics.v1.LabelKey")
+ proto.RegisterType((*TimeSeries)(nil), "opencensus.proto.metrics.v1.TimeSeries")
+ proto.RegisterType((*LabelValue)(nil), "opencensus.proto.metrics.v1.LabelValue")
+ proto.RegisterType((*Point)(nil), "opencensus.proto.metrics.v1.Point")
+ proto.RegisterType((*DistributionValue)(nil), "opencensus.proto.metrics.v1.DistributionValue")
+ proto.RegisterType((*DistributionValue_BucketOptions)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions")
+ proto.RegisterType((*DistributionValue_BucketOptions_Explicit)(nil), "opencensus.proto.metrics.v1.DistributionValue.BucketOptions.Explicit")
+ proto.RegisterType((*DistributionValue_Bucket)(nil), "opencensus.proto.metrics.v1.DistributionValue.Bucket")
+ proto.RegisterType((*DistributionValue_Exemplar)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar")
+ proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.metrics.v1.DistributionValue.Exemplar.AttachmentsEntry")
+ proto.RegisterType((*SummaryValue)(nil), "opencensus.proto.metrics.v1.SummaryValue")
+ proto.RegisterType((*SummaryValue_Snapshot)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot")
+ proto.RegisterType((*SummaryValue_Snapshot_ValueAtPercentile)(nil), "opencensus.proto.metrics.v1.SummaryValue.Snapshot.ValueAtPercentile")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/metrics/v1/metrics.proto", fileDescriptor_0ee3deb72053811a)
+}
+
+var fileDescriptor_0ee3deb72053811a = []byte{
+ // 1098 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0xdd, 0x6e, 0x1b, 0xc5,
+ 0x17, 0xcf, 0xda, 0x8e, 0xe3, 0x9c, 0x75, 0xdb, 0xf5, 0xa8, 0xed, 0xdf, 0xda, 0xfc, 0x15, 0xc2,
+ 0x22, 0x20, 0x15, 0xca, 0x5a, 0x31, 0xa5, 0xad, 0x2a, 0x54, 0x14, 0xc7, 0x6e, 0x62, 0xc8, 0x87,
+ 0x35, 0xb6, 0x2b, 0xd1, 0x1b, 0x6b, 0xbd, 0x9e, 0x24, 0x4b, 0xbc, 0x1f, 0xdd, 0x99, 0x35, 0xf8,
+ 0x05, 0x78, 0x04, 0xae, 0xb9, 0x45, 0x3c, 0x07, 0x57, 0x3c, 0x01, 0x4f, 0x81, 0x78, 0x03, 0xb4,
+ 0x33, 0xb3, 0x1f, 0x89, 0xc1, 0xd4, 0x45, 0xe2, 0xee, 0x9c, 0x33, 0xe7, 0xfc, 0xfc, 0x3b, 0x9f,
+ 0x5e, 0x78, 0xe4, 0x07, 0xc4, 0xb3, 0x89, 0x47, 0x23, 0xda, 0x08, 0x42, 0x9f, 0xf9, 0x0d, 0x97,
+ 0xb0, 0xd0, 0xb1, 0x69, 0x63, 0xb6, 0x9f, 0x88, 0x26, 0x7f, 0x40, 0x5b, 0x99, 0xab, 0xb0, 0x98,
+ 0xc9, 0xfb, 0x6c, 0x5f, 0x7f, 0xef, 0xd2, 0xf7, 0x2f, 0xa7, 0x44, 0x60, 0x8c, 0xa3, 0x8b, 0x06,
+ 0x73, 0x5c, 0x42, 0x99, 0xe5, 0x06, 0xc2, 0x57, 0xdf, 0xbe, 0xed, 0xf0, 0x6d, 0x68, 0x05, 0x01,
+ 0x09, 0x25, 0x96, 0xfe, 0xc9, 0x02, 0x91, 0x90, 0x50, 0x3f, 0x0a, 0x6d, 0x12, 0x33, 0x49, 0x64,
+ 0xe1, 0x6c, 0xfc, 0xa1, 0x40, 0xf9, 0x94, 0xff, 0x38, 0x7a, 0x0d, 0x35, 0x41, 0x63, 0x34, 0x21,
+ 0xd4, 0x0e, 0x9d, 0x80, 0xf9, 0x61, 0x5d, 0xd9, 0x51, 0x76, 0xd5, 0xe6, 0x9e, 0xb9, 0x84, 0xb1,
+ 0x29, 0xe2, 0xdb, 0x69, 0x10, 0xd6, 0xdc, 0x5b, 0x16, 0x74, 0x04, 0xc0, 0xd3, 0x20, 0xa1, 0x43,
+ 0x68, 0xbd, 0xb0, 0x53, 0xdc, 0x55, 0x9b, 0x1f, 0x2f, 0x05, 0x1d, 0x38, 0x2e, 0xe9, 0x73, 0x77,
+ 0x9c, 0x0b, 0x45, 0x2d, 0xa8, 0x24, 0x19, 0xd4, 0x8b, 0x9c, 0xdb, 0x47, 0x8b, 0x30, 0x69, 0x8e,
+ 0xb3, 0x7d, 0x13, 0x4b, 0x19, 0xa7, 0x71, 0xc6, 0x0f, 0x45, 0xd0, 0x6e, 0x73, 0x46, 0x08, 0x4a,
+ 0x9e, 0xe5, 0x12, 0x9e, 0xf0, 0x26, 0xe6, 0x32, 0xda, 0x01, 0x35, 0x29, 0x85, 0xe3, 0x7b, 0xf5,
+ 0x02, 0x7f, 0xca, 0x9b, 0xe2, 0xa8, 0xc8, 0x73, 0x18, 0xa7, 0xb2, 0x89, 0xb9, 0x8c, 0x5e, 0x42,
+ 0x89, 0xcd, 0x03, 0x52, 0x2f, 0xed, 0x28, 0xbb, 0x77, 0x9b, 0xcd, 0x95, 0x4a, 0x67, 0x0e, 0xe6,
+ 0x01, 0xc1, 0x3c, 0x1e, 0xb5, 0x01, 0xa6, 0xd6, 0x98, 0x4c, 0x47, 0xd7, 0x64, 0x4e, 0xeb, 0xeb,
+ 0xbc, 0x66, 0x1f, 0x2e, 0x45, 0x3b, 0x89, 0xdd, 0xbf, 0x22, 0x73, 0xbc, 0x39, 0x95, 0x12, 0x35,
+ 0x7e, 0x52, 0xa0, 0x14, 0x83, 0xa2, 0x7b, 0xa0, 0x0e, 0xcf, 0xfa, 0xbd, 0xce, 0x61, 0xf7, 0x65,
+ 0xb7, 0xd3, 0xd6, 0xd6, 0x62, 0xc3, 0xd1, 0xc1, 0xf0, 0xa8, 0x33, 0xea, 0x9e, 0x0d, 0x9e, 0x3c,
+ 0xd6, 0x14, 0xa4, 0x41, 0x55, 0x18, 0xda, 0xe7, 0xc3, 0xd6, 0x49, 0x47, 0x2b, 0xa0, 0x87, 0x80,
+ 0xa4, 0xa5, 0xdb, 0x1f, 0xe0, 0x6e, 0x6b, 0x38, 0xe8, 0x9e, 0x9f, 0x69, 0x45, 0x74, 0x1f, 0xb4,
+ 0xc3, 0xe1, 0xe9, 0xf0, 0xe4, 0x60, 0xd0, 0x7d, 0x95, 0xc4, 0x97, 0xd0, 0x03, 0xa8, 0xe5, 0xac,
+ 0x12, 0x64, 0x1d, 0x6d, 0xc1, 0xff, 0xf2, 0xe6, 0x3c, 0x52, 0x19, 0xa9, 0xb0, 0xd1, 0x1f, 0x9e,
+ 0x9e, 0x1e, 0xe0, 0xaf, 0xb5, 0x0d, 0xe3, 0x05, 0x54, 0x92, 0x14, 0x90, 0x06, 0xc5, 0x6b, 0x32,
+ 0x97, 0xed, 0x88, 0xc5, 0x7f, 0xee, 0x86, 0xf1, 0x9b, 0x02, 0x90, 0xcd, 0x0d, 0x3a, 0x84, 0x7b,
+ 0x94, 0x59, 0x21, 0x1b, 0xa5, 0x1b, 0x24, 0xc7, 0x59, 0x37, 0xc5, 0x0a, 0x99, 0xc9, 0x0a, 0xf1,
+ 0x69, 0xe3, 0x1e, 0xf8, 0x2e, 0x0f, 0x49, 0x75, 0xf4, 0x25, 0x54, 0x45, 0x17, 0x66, 0xd6, 0x34,
+ 0x7a, 0xcb, 0xd9, 0xe5, 0x49, 0xbc, 0x8a, 0xfd, 0xb1, 0x3a, 0x4d, 0x65, 0x8a, 0x9e, 0x43, 0x39,
+ 0xf0, 0x1d, 0x8f, 0xd1, 0x7a, 0x91, 0xa3, 0x18, 0x4b, 0x51, 0x7a, 0xb1, 0x2b, 0x96, 0x11, 0xc6,
+ 0x17, 0x00, 0x19, 0x2c, 0xba, 0x0f, 0xeb, 0x9c, 0x8f, 0xac, 0x8f, 0x50, 0xd0, 0x16, 0x6c, 0x5e,
+ 0x59, 0x54, 0x30, 0xe5, 0xf5, 0xa9, 0xe0, 0xca, 0x95, 0x45, 0x79, 0x88, 0xf1, 0x4b, 0x01, 0xd6,
+ 0x39, 0x24, 0x7a, 0x06, 0x9b, 0xab, 0x54, 0x24, 0x73, 0x46, 0xef, 0x83, 0xea, 0x78, 0xec, 0xc9,
+ 0xe3, 0xdc, 0x4f, 0x14, 0x8f, 0xd7, 0x30, 0x70, 0xa3, 0x60, 0xf6, 0x01, 0x54, 0x27, 0x7e, 0x34,
+ 0x9e, 0x12, 0xe9, 0x13, 0x6f, 0x86, 0x72, 0xbc, 0x86, 0x55, 0x61, 0x15, 0x4e, 0x23, 0x40, 0x13,
+ 0x87, 0xb2, 0xd0, 0x19, 0x47, 0x71, 0xe3, 0xa4, 0x6b, 0x89, 0x53, 0x31, 0x97, 0x16, 0xa5, 0x9d,
+ 0x0b, 0xe3, 0x58, 0xc7, 0x6b, 0xb8, 0x36, 0xb9, 0x6d, 0x44, 0x3d, 0xb8, 0x43, 0x23, 0xd7, 0xb5,
+ 0xc2, 0xb9, 0xc4, 0x5e, 0xe7, 0xd8, 0x8f, 0x96, 0x62, 0xf7, 0x45, 0x44, 0x02, 0x5b, 0xa5, 0x39,
+ 0xbd, 0xb5, 0x21, 0x2b, 0x6e, 0xfc, 0x5a, 0x86, 0xda, 0x02, 0x8b, 0xb8, 0x21, 0xb6, 0x1f, 0x79,
+ 0x8c, 0xd7, 0xb3, 0x88, 0x85, 0x12, 0x0f, 0x31, 0x8d, 0x5c, 0x5e, 0x27, 0x05, 0xc7, 0x22, 0x7a,
+ 0x0a, 0x75, 0x1a, 0xb9, 0x23, 0xff, 0x62, 0x44, 0xdf, 0x44, 0x56, 0x48, 0x26, 0xa3, 0x09, 0x99,
+ 0x39, 0x16, 0x9f, 0x68, 0x5e, 0x2a, 0xfc, 0x80, 0x46, 0xee, 0xf9, 0x45, 0x5f, 0xbc, 0xb6, 0x93,
+ 0x47, 0x64, 0xc3, 0xdd, 0x71, 0x64, 0x5f, 0x13, 0x36, 0xf2, 0xf9, 0xb0, 0x53, 0x59, 0xae, 0xcf,
+ 0x57, 0x2b, 0x97, 0xd9, 0xe2, 0x20, 0xe7, 0x02, 0x03, 0xdf, 0x19, 0xe7, 0x55, 0x74, 0x0e, 0x1b,
+ 0xc2, 0x90, 0xdc, 0x9b, 0xcf, 0xde, 0x09, 0x1d, 0x27, 0x28, 0xfa, 0x8f, 0x0a, 0xdc, 0xb9, 0xf1,
+ 0x8b, 0xc8, 0x86, 0x0a, 0xf9, 0x2e, 0x98, 0x3a, 0xb6, 0xc3, 0xe4, 0xec, 0x75, 0xfe, 0x4d, 0x06,
+ 0x66, 0x47, 0x82, 0x1d, 0xaf, 0xe1, 0x14, 0x58, 0x37, 0xa0, 0x92, 0xd8, 0xd1, 0x43, 0x28, 0x8f,
+ 0xfd, 0xc8, 0x9b, 0xd0, 0xba, 0xb2, 0x53, 0xdc, 0x55, 0xb0, 0xd4, 0x5a, 0x65, 0x71, 0xa6, 0x75,
+ 0x0a, 0x65, 0x81, 0xf8, 0x37, 0x3d, 0xec, 0xc7, 0x84, 0x89, 0x1b, 0x4c, 0xad, 0x90, 0x37, 0x52,
+ 0x6d, 0x3e, 0x5d, 0x91, 0x70, 0x47, 0x86, 0xe3, 0x14, 0x48, 0xff, 0xbe, 0x10, 0x33, 0x14, 0xca,
+ 0xcd, 0x65, 0x56, 0x92, 0x65, 0xbe, 0xb1, 0xa5, 0x85, 0x55, 0xb6, 0xf4, 0x1b, 0x50, 0x2d, 0xc6,
+ 0x2c, 0xfb, 0xca, 0x25, 0xd9, 0xad, 0x39, 0x7e, 0x47, 0xd2, 0xe6, 0x41, 0x06, 0xd5, 0xf1, 0x58,
+ 0x38, 0xc7, 0x79, 0x70, 0xfd, 0x05, 0x68, 0xb7, 0x1d, 0xfe, 0xe2, 0x74, 0xa7, 0x19, 0x16, 0x72,
+ 0xe7, 0xea, 0x79, 0xe1, 0x99, 0x62, 0xfc, 0x5e, 0x84, 0x6a, 0x7e, 0xef, 0xd0, 0x7e, 0xbe, 0x09,
+ 0x6a, 0x73, 0x6b, 0x21, 0xe5, 0x6e, 0x7a, 0x6b, 0x92, 0x0e, 0x99, 0xd9, 0x96, 0xa9, 0xcd, 0xff,
+ 0x2f, 0x04, 0xb4, 0xb3, 0xc3, 0x23, 0x76, 0xf0, 0x0c, 0x2a, 0xd4, 0xb3, 0x02, 0x7a, 0xe5, 0x33,
+ 0xf9, 0x0d, 0xd1, 0x7c, 0xeb, 0xbb, 0x60, 0xf6, 0x65, 0x24, 0x4e, 0x31, 0xf4, 0x9f, 0x0b, 0x50,
+ 0x49, 0xcc, 0xff, 0x05, 0xff, 0x37, 0x50, 0x0b, 0x48, 0x68, 0x13, 0x8f, 0x39, 0xc9, 0x99, 0x4d,
+ 0xba, 0xdc, 0x5e, 0x3d, 0x11, 0x93, 0xab, 0x07, 0xac, 0x97, 0x42, 0x62, 0x2d, 0x83, 0x17, 0xff,
+ 0x5c, 0x7a, 0x17, 0x6a, 0x0b, 0x6e, 0x68, 0x1b, 0x20, 0x73, 0x94, 0xc3, 0x9b, 0xb3, 0xdc, 0xec,
+ 0x7a, 0x32, 0xd7, 0xad, 0x19, 0x6c, 0x3b, 0xfe, 0x32, 0x9a, 0xad, 0xaa, 0xf8, 0x2a, 0xa2, 0xbd,
+ 0xf8, 0xa1, 0xa7, 0xbc, 0x6e, 0x5f, 0x3a, 0xec, 0x2a, 0x1a, 0x9b, 0xb6, 0xef, 0x36, 0x44, 0xcc,
+ 0x9e, 0xe3, 0x51, 0x16, 0x46, 0xf1, 0xcc, 0xf1, 0xeb, 0xd8, 0xc8, 0xe0, 0xf6, 0xc4, 0x27, 0xef,
+ 0x25, 0xf1, 0xf6, 0x2e, 0xf3, 0x9f, 0xe0, 0xe3, 0x32, 0x7f, 0xf8, 0xf4, 0xcf, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x8e, 0xfc, 0xd7, 0x46, 0xa8, 0x0b, 0x00, 0x00,
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
new file mode 100644
index 00000000..38faa9fd
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1/resource.pb.go
@@ -0,0 +1,99 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/resource/v1/resource.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Resource information.
+type Resource struct {
+ // Type identifier for the resource.
+ Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
+ // Set of labels that describe the resource.
+ Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Resource) Reset() { *m = Resource{} }
+func (m *Resource) String() string { return proto.CompactTextString(m) }
+func (*Resource) ProtoMessage() {}
+func (*Resource) Descriptor() ([]byte, []int) {
+ return fileDescriptor_584700775a2fc762, []int{0}
+}
+
+func (m *Resource) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Resource.Unmarshal(m, b)
+}
+func (m *Resource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Resource.Marshal(b, m, deterministic)
+}
+func (m *Resource) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Resource.Merge(m, src)
+}
+func (m *Resource) XXX_Size() int {
+ return xxx_messageInfo_Resource.Size(m)
+}
+func (m *Resource) XXX_DiscardUnknown() {
+ xxx_messageInfo_Resource.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Resource proto.InternalMessageInfo
+
+func (m *Resource) GetType() string {
+ if m != nil {
+ return m.Type
+ }
+ return ""
+}
+
+func (m *Resource) GetLabels() map[string]string {
+ if m != nil {
+ return m.Labels
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterType((*Resource)(nil), "opencensus.proto.resource.v1.Resource")
+ proto.RegisterMapType((map[string]string)(nil), "opencensus.proto.resource.v1.Resource.LabelsEntry")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/resource/v1/resource.proto", fileDescriptor_584700775a2fc762)
+}
+
+var fileDescriptor_584700775a2fc762 = []byte{
+ // 234 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0xce, 0x2f, 0x48, 0xcd,
+ 0x4b, 0x4e, 0xcd, 0x2b, 0x2e, 0x2d, 0xd6, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x2f, 0x4a, 0x2d,
+ 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0xd5, 0x2f, 0x33, 0x84, 0xb3, 0xf5, 0xc0, 0x52, 0x42, 0x32, 0x08,
+ 0xc5, 0x10, 0x11, 0x3d, 0xb8, 0x82, 0x32, 0x43, 0xa5, 0xa5, 0x8c, 0x5c, 0x1c, 0x41, 0x50, 0xbe,
+ 0x90, 0x10, 0x17, 0x4b, 0x49, 0x65, 0x41, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x98,
+ 0x2d, 0xe4, 0xc5, 0xc5, 0x96, 0x93, 0x98, 0x94, 0x9a, 0x53, 0x2c, 0xc1, 0xa4, 0xc0, 0xac, 0xc1,
+ 0x6d, 0x64, 0xa4, 0x87, 0xcf, 0x3c, 0x3d, 0x98, 0x59, 0x7a, 0x3e, 0x60, 0x4d, 0xae, 0x79, 0x25,
+ 0x45, 0x95, 0x41, 0x50, 0x13, 0xa4, 0x2c, 0xb9, 0xb8, 0x91, 0x84, 0x85, 0x04, 0xb8, 0x98, 0xb3,
+ 0x53, 0x2b, 0xa1, 0xb6, 0x81, 0x98, 0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x12,
+ 0x4c, 0x60, 0x31, 0x08, 0xc7, 0x8a, 0xc9, 0x82, 0xd1, 0xa9, 0x92, 0x4b, 0x3e, 0x33, 0x1f, 0xaf,
+ 0xd5, 0x4e, 0xbc, 0x30, 0xbb, 0x03, 0x40, 0x52, 0x01, 0x8c, 0x51, 0xae, 0xe9, 0x99, 0x25, 0x19,
+ 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0x10, 0x5d, 0xba, 0x99, 0x79, 0xc5, 0x25, 0x45, 0xa5,
+ 0xb9, 0xa9, 0x79, 0x25, 0x89, 0x25, 0x99, 0xf9, 0x79, 0xfa, 0x08, 0x03, 0x75, 0x21, 0x01, 0x99,
+ 0x9e, 0x9a, 0xa7, 0x9b, 0x8e, 0x12, 0x9e, 0x49, 0x6c, 0x60, 0x19, 0x63, 0x40, 0x00, 0x00, 0x00,
+ 0xff, 0xff, 0x8e, 0x11, 0xaf, 0xda, 0x76, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
new file mode 100644
index 00000000..4de05355
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace.pb.go
@@ -0,0 +1,1543 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/trace/v1/trace.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ v1 "github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1"
+ proto "github.com/golang/protobuf/proto"
+ timestamp "github.com/golang/protobuf/ptypes/timestamp"
+ wrappers "github.com/golang/protobuf/ptypes/wrappers"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Type of span. Can be used to specify additional relationships between spans
+// in addition to a parent/child relationship.
+type Span_SpanKind int32
+
+const (
+ // Unspecified.
+ Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0
+ // Indicates that the span covers server-side handling of an RPC or other
+ // remote network request.
+ Span_SERVER Span_SpanKind = 1
+ // Indicates that the span covers the client-side wrapper around an RPC or
+ // other remote request.
+ Span_CLIENT Span_SpanKind = 2
+)
+
+var Span_SpanKind_name = map[int32]string{
+ 0: "SPAN_KIND_UNSPECIFIED",
+ 1: "SERVER",
+ 2: "CLIENT",
+}
+
+var Span_SpanKind_value = map[string]int32{
+ "SPAN_KIND_UNSPECIFIED": 0,
+ "SERVER": 1,
+ "CLIENT": 2,
+}
+
+func (x Span_SpanKind) String() string {
+ return proto.EnumName(Span_SpanKind_name, int32(x))
+}
+
+func (Span_SpanKind) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 0}
+}
+
+// Indicates whether the message was sent or received.
+type Span_TimeEvent_MessageEvent_Type int32
+
+const (
+ // Unknown event type.
+ Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED Span_TimeEvent_MessageEvent_Type = 0
+ // Indicates a sent message.
+ Span_TimeEvent_MessageEvent_SENT Span_TimeEvent_MessageEvent_Type = 1
+ // Indicates a received message.
+ Span_TimeEvent_MessageEvent_RECEIVED Span_TimeEvent_MessageEvent_Type = 2
+)
+
+var Span_TimeEvent_MessageEvent_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "SENT",
+ 2: "RECEIVED",
+}
+
+var Span_TimeEvent_MessageEvent_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "SENT": 1,
+ "RECEIVED": 2,
+}
+
+func (x Span_TimeEvent_MessageEvent_Type) String() string {
+ return proto.EnumName(Span_TimeEvent_MessageEvent_Type_name, int32(x))
+}
+
+func (Span_TimeEvent_MessageEvent_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1, 0}
+}
+
+// The relationship of the current span relative to the linked span: child,
+// parent, or unspecified.
+type Span_Link_Type int32
+
+const (
+ // The relationship of the two spans is unknown, or known but other
+ // than parent-child.
+ Span_Link_TYPE_UNSPECIFIED Span_Link_Type = 0
+ // The linked span is a child of the current span.
+ Span_Link_CHILD_LINKED_SPAN Span_Link_Type = 1
+ // The linked span is a parent of the current span.
+ Span_Link_PARENT_LINKED_SPAN Span_Link_Type = 2
+)
+
+var Span_Link_Type_name = map[int32]string{
+ 0: "TYPE_UNSPECIFIED",
+ 1: "CHILD_LINKED_SPAN",
+ 2: "PARENT_LINKED_SPAN",
+}
+
+var Span_Link_Type_value = map[string]int32{
+ "TYPE_UNSPECIFIED": 0,
+ "CHILD_LINKED_SPAN": 1,
+ "PARENT_LINKED_SPAN": 2,
+}
+
+func (x Span_Link_Type) String() string {
+ return proto.EnumName(Span_Link_Type_name, int32(x))
+}
+
+func (Span_Link_Type) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 4, 0}
+}
+
+// A span represents a single operation within a trace. Spans can be
+// nested to form a trace tree. Spans may also be linked to other spans
+// from the same or different trace. And form graphs. Often, a trace
+// contains a root span that describes the end-to-end latency, and one
+// or more subspans for its sub-operations. A trace can also contain
+// multiple root spans, or none at all. Spans do not need to be
+// contiguous - there may be gaps or overlaps between spans in a trace.
+//
+// The next id is 17.
+// TODO(bdrutu): Add an example.
+type Span struct {
+ // A unique identifier for a trace. All spans from the same trace share
+ // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes
+ // is considered invalid.
+ //
+ // This field is semantically required. Receiver should generate new
+ // random trace_id if empty or invalid trace_id was received.
+ //
+ // This field is required.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for a span within a trace, assigned when the span
+ // is created. The ID is an 8-byte array. An ID with all zeroes is considered
+ // invalid.
+ //
+ // This field is semantically required. Receiver should generate new
+ // random span_id if empty or invalid span_id was received.
+ //
+ // This field is required.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The Tracestate on the span.
+ Tracestate *Span_Tracestate `protobuf:"bytes,15,opt,name=tracestate,proto3" json:"tracestate,omitempty"`
+ // The `span_id` of this span's parent span. If this is a root span, then this
+ // field must be empty. The ID is an 8-byte array.
+ ParentSpanId []byte `protobuf:"bytes,3,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
+ // A description of the span's operation.
+ //
+ // For example, the name can be a qualified method name or a file name
+ // and a line number where the operation is called. A best practice is to use
+ // the same display name at the same call point in an application.
+ // This makes it easier to correlate spans in different traces.
+ //
+ // This field is semantically required to be set to non-empty string.
+ // When null or empty string received - receiver may use string "name"
+ // as a replacement. There might be smarted algorithms implemented by
+ // receiver to fix the empty span name.
+ //
+ // This field is required.
+ Name *TruncatableString `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"`
+ // Distinguishes between spans generated in a particular context. For example,
+ // two spans with the same name may be distinguished using `CLIENT` (caller)
+ // and `SERVER` (callee) to identify queueing latency associated with the span.
+ Kind Span_SpanKind `protobuf:"varint,14,opt,name=kind,proto3,enum=opencensus.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"`
+ // The start time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution starts. On the server side, this
+ // is the time when the server's application handler starts running.
+ //
+ // This field is semantically required. When not set on receive -
+ // receiver should set it to the value of end_time field if it was
+ // set. Or to the current time if neither was set. It is important to
+ // keep end_time > start_time for consistency.
+ //
+ // This field is required.
+ StartTime *timestamp.Timestamp `protobuf:"bytes,5,opt,name=start_time,json=startTime,proto3" json:"start_time,omitempty"`
+ // The end time of the span. On the client side, this is the time kept by
+ // the local machine where the span execution ends. On the server side, this
+ // is the time when the server application handler stops running.
+ //
+ // This field is semantically required. When not set on receive -
+ // receiver should set it to start_time value. It is important to
+ // keep end_time > start_time for consistency.
+ //
+ // This field is required.
+ EndTime *timestamp.Timestamp `protobuf:"bytes,6,opt,name=end_time,json=endTime,proto3" json:"end_time,omitempty"`
+ // A set of attributes on the span.
+ Attributes *Span_Attributes `protobuf:"bytes,7,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ // A stack trace captured at the start of the span.
+ StackTrace *StackTrace `protobuf:"bytes,8,opt,name=stack_trace,json=stackTrace,proto3" json:"stack_trace,omitempty"`
+ // The included time events.
+ TimeEvents *Span_TimeEvents `protobuf:"bytes,9,opt,name=time_events,json=timeEvents,proto3" json:"time_events,omitempty"`
+ // The included links.
+ Links *Span_Links `protobuf:"bytes,10,opt,name=links,proto3" json:"links,omitempty"`
+ // An optional final status for this span. Semantically when Status
+ // wasn't set it is means span ended without errors and assume
+ // Status.Ok (code = 0).
+ Status *Status `protobuf:"bytes,11,opt,name=status,proto3" json:"status,omitempty"`
+ // An optional resource that is associated with this span. If not set, this span
+ // should be part of a batch that does include the resource information, unless resource
+ // information is unknown.
+ Resource *v1.Resource `protobuf:"bytes,16,opt,name=resource,proto3" json:"resource,omitempty"`
+ // A highly recommended but not required flag that identifies when a
+ // trace crosses a process boundary. True when the parent_span belongs
+ // to the same process as the current span. This flag is most commonly
+ // used to indicate the need to adjust time as clocks in different
+ // processes may not be synchronized.
+ SameProcessAsParentSpan *wrappers.BoolValue `protobuf:"bytes,12,opt,name=same_process_as_parent_span,json=sameProcessAsParentSpan,proto3" json:"same_process_as_parent_span,omitempty"`
+ // An optional number of child spans that were generated while this span
+ // was active. If set, allows an implementation to detect missing child spans.
+ ChildSpanCount *wrappers.UInt32Value `protobuf:"bytes,13,opt,name=child_span_count,json=childSpanCount,proto3" json:"child_span_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span) Reset() { *m = Span{} }
+func (m *Span) String() string { return proto.CompactTextString(m) }
+func (*Span) ProtoMessage() {}
+func (*Span) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0}
+}
+
+func (m *Span) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span.Unmarshal(m, b)
+}
+func (m *Span) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span.Marshal(b, m, deterministic)
+}
+func (m *Span) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span.Merge(m, src)
+}
+func (m *Span) XXX_Size() int {
+ return xxx_messageInfo_Span.Size(m)
+}
+func (m *Span) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span proto.InternalMessageInfo
+
+func (m *Span) GetTraceId() []byte {
+ if m != nil {
+ return m.TraceId
+ }
+ return nil
+}
+
+func (m *Span) GetSpanId() []byte {
+ if m != nil {
+ return m.SpanId
+ }
+ return nil
+}
+
+func (m *Span) GetTracestate() *Span_Tracestate {
+ if m != nil {
+ return m.Tracestate
+ }
+ return nil
+}
+
+func (m *Span) GetParentSpanId() []byte {
+ if m != nil {
+ return m.ParentSpanId
+ }
+ return nil
+}
+
+func (m *Span) GetName() *TruncatableString {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *Span) GetKind() Span_SpanKind {
+ if m != nil {
+ return m.Kind
+ }
+ return Span_SPAN_KIND_UNSPECIFIED
+}
+
+func (m *Span) GetStartTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.StartTime
+ }
+ return nil
+}
+
+func (m *Span) GetEndTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.EndTime
+ }
+ return nil
+}
+
+func (m *Span) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+func (m *Span) GetStackTrace() *StackTrace {
+ if m != nil {
+ return m.StackTrace
+ }
+ return nil
+}
+
+func (m *Span) GetTimeEvents() *Span_TimeEvents {
+ if m != nil {
+ return m.TimeEvents
+ }
+ return nil
+}
+
+func (m *Span) GetLinks() *Span_Links {
+ if m != nil {
+ return m.Links
+ }
+ return nil
+}
+
+func (m *Span) GetStatus() *Status {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *Span) GetResource() *v1.Resource {
+ if m != nil {
+ return m.Resource
+ }
+ return nil
+}
+
+func (m *Span) GetSameProcessAsParentSpan() *wrappers.BoolValue {
+ if m != nil {
+ return m.SameProcessAsParentSpan
+ }
+ return nil
+}
+
+func (m *Span) GetChildSpanCount() *wrappers.UInt32Value {
+ if m != nil {
+ return m.ChildSpanCount
+ }
+ return nil
+}
+
+// This field conveys information about request position in multiple distributed tracing graphs.
+// It is a list of Tracestate.Entry with a maximum of 32 members in the list.
+//
+// See the https://github.com/w3c/distributed-tracing for more details about this field.
+type Span_Tracestate struct {
+ // A list of entries that represent the Tracestate.
+ Entries []*Span_Tracestate_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Tracestate) Reset() { *m = Span_Tracestate{} }
+func (m *Span_Tracestate) String() string { return proto.CompactTextString(m) }
+func (*Span_Tracestate) ProtoMessage() {}
+func (*Span_Tracestate) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 0}
+}
+
+func (m *Span_Tracestate) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Tracestate.Unmarshal(m, b)
+}
+func (m *Span_Tracestate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Tracestate.Marshal(b, m, deterministic)
+}
+func (m *Span_Tracestate) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Tracestate.Merge(m, src)
+}
+func (m *Span_Tracestate) XXX_Size() int {
+ return xxx_messageInfo_Span_Tracestate.Size(m)
+}
+func (m *Span_Tracestate) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Tracestate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Tracestate proto.InternalMessageInfo
+
+func (m *Span_Tracestate) GetEntries() []*Span_Tracestate_Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type Span_Tracestate_Entry struct {
+ // The key must begin with a lowercase letter, and can only contain
+ // lowercase letters 'a'-'z', digits '0'-'9', underscores '_', dashes
+ // '-', asterisks '*', and forward slashes '/'.
+ Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+ // The value is opaque string up to 256 characters printable ASCII
+ // RFC0020 characters (i.e., the range 0x20 to 0x7E) except ',' and '='.
+ // Note that this also excludes tabs, newlines, carriage returns, etc.
+ Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Tracestate_Entry) Reset() { *m = Span_Tracestate_Entry{} }
+func (m *Span_Tracestate_Entry) String() string { return proto.CompactTextString(m) }
+func (*Span_Tracestate_Entry) ProtoMessage() {}
+func (*Span_Tracestate_Entry) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 0, 0}
+}
+
+func (m *Span_Tracestate_Entry) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Tracestate_Entry.Unmarshal(m, b)
+}
+func (m *Span_Tracestate_Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Tracestate_Entry.Marshal(b, m, deterministic)
+}
+func (m *Span_Tracestate_Entry) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Tracestate_Entry.Merge(m, src)
+}
+func (m *Span_Tracestate_Entry) XXX_Size() int {
+ return xxx_messageInfo_Span_Tracestate_Entry.Size(m)
+}
+func (m *Span_Tracestate_Entry) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Tracestate_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Tracestate_Entry proto.InternalMessageInfo
+
+func (m *Span_Tracestate_Entry) GetKey() string {
+ if m != nil {
+ return m.Key
+ }
+ return ""
+}
+
+func (m *Span_Tracestate_Entry) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+// A set of attributes, each with a key and a value.
+type Span_Attributes struct {
+ // The set of attributes. The value can be a string, an integer, a double
+ // or the Boolean values `true` or `false`. Note, global attributes like
+ // server name can be set as tags using resource API. Examples of attributes:
+ //
+ // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36"
+ // "/http/server_latency": 300
+ // "abc.com/myattribute": true
+ // "abc.com/score": 10.239
+ AttributeMap map[string]*AttributeValue `protobuf:"bytes,1,rep,name=attribute_map,json=attributeMap,proto3" json:"attribute_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+ // The number of attributes that were discarded. Attributes can be discarded
+ // because their keys are too long or because there are too many attributes.
+ // If this value is 0, then no attributes were dropped.
+ DroppedAttributesCount int32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Attributes) Reset() { *m = Span_Attributes{} }
+func (m *Span_Attributes) String() string { return proto.CompactTextString(m) }
+func (*Span_Attributes) ProtoMessage() {}
+func (*Span_Attributes) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 1}
+}
+
+func (m *Span_Attributes) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Attributes.Unmarshal(m, b)
+}
+func (m *Span_Attributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Attributes.Marshal(b, m, deterministic)
+}
+func (m *Span_Attributes) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Attributes.Merge(m, src)
+}
+func (m *Span_Attributes) XXX_Size() int {
+ return xxx_messageInfo_Span_Attributes.Size(m)
+}
+func (m *Span_Attributes) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Attributes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Attributes proto.InternalMessageInfo
+
+func (m *Span_Attributes) GetAttributeMap() map[string]*AttributeValue {
+ if m != nil {
+ return m.AttributeMap
+ }
+ return nil
+}
+
+func (m *Span_Attributes) GetDroppedAttributesCount() int32 {
+ if m != nil {
+ return m.DroppedAttributesCount
+ }
+ return 0
+}
+
+// A time-stamped annotation or message event in the Span.
+type Span_TimeEvent struct {
+ // The time the event occurred.
+ Time *timestamp.Timestamp `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"`
+ // A `TimeEvent` can contain either an `Annotation` object or a
+ // `MessageEvent` object, but not both.
+ //
+ // Types that are valid to be assigned to Value:
+ // *Span_TimeEvent_Annotation_
+ // *Span_TimeEvent_MessageEvent_
+ Value isSpan_TimeEvent_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent) Reset() { *m = Span_TimeEvent{} }
+func (m *Span_TimeEvent) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent) ProtoMessage() {}
+func (*Span_TimeEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 2}
+}
+
+func (m *Span_TimeEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent.Merge(m, src)
+}
+func (m *Span_TimeEvent) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent.Size(m)
+}
+func (m *Span_TimeEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent proto.InternalMessageInfo
+
+func (m *Span_TimeEvent) GetTime() *timestamp.Timestamp {
+ if m != nil {
+ return m.Time
+ }
+ return nil
+}
+
+type isSpan_TimeEvent_Value interface {
+ isSpan_TimeEvent_Value()
+}
+
+type Span_TimeEvent_Annotation_ struct {
+ Annotation *Span_TimeEvent_Annotation `protobuf:"bytes,2,opt,name=annotation,proto3,oneof"`
+}
+
+type Span_TimeEvent_MessageEvent_ struct {
+ MessageEvent *Span_TimeEvent_MessageEvent `protobuf:"bytes,3,opt,name=message_event,json=messageEvent,proto3,oneof"`
+}
+
+func (*Span_TimeEvent_Annotation_) isSpan_TimeEvent_Value() {}
+
+func (*Span_TimeEvent_MessageEvent_) isSpan_TimeEvent_Value() {}
+
+func (m *Span_TimeEvent) GetValue() isSpan_TimeEvent_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetAnnotation() *Span_TimeEvent_Annotation {
+ if x, ok := m.GetValue().(*Span_TimeEvent_Annotation_); ok {
+ return x.Annotation
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent) GetMessageEvent() *Span_TimeEvent_MessageEvent {
+ if x, ok := m.GetValue().(*Span_TimeEvent_MessageEvent_); ok {
+ return x.MessageEvent
+ }
+ return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Span_TimeEvent) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*Span_TimeEvent_Annotation_)(nil),
+ (*Span_TimeEvent_MessageEvent_)(nil),
+ }
+}
+
+// A text annotation with a set of attributes.
+type Span_TimeEvent_Annotation struct {
+ // A user-supplied message describing the event.
+ Description *TruncatableString `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"`
+ // A set of attributes on the annotation.
+ Attributes *Span_Attributes `protobuf:"bytes,2,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent_Annotation) Reset() { *m = Span_TimeEvent_Annotation{} }
+func (m *Span_TimeEvent_Annotation) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent_Annotation) ProtoMessage() {}
+func (*Span_TimeEvent_Annotation) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 0}
+}
+
+func (m *Span_TimeEvent_Annotation) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent_Annotation.Merge(m, src)
+}
+func (m *Span_TimeEvent_Annotation) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent_Annotation.Size(m)
+}
+func (m *Span_TimeEvent_Annotation) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent_Annotation proto.InternalMessageInfo
+
+func (m *Span_TimeEvent_Annotation) GetDescription() *TruncatableString {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *Span_TimeEvent_Annotation) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// An event describing a message sent/received between Spans.
+type Span_TimeEvent_MessageEvent struct {
+ // The type of MessageEvent. Indicates whether the message was sent or
+ // received.
+ Type Span_TimeEvent_MessageEvent_Type `protobuf:"varint,1,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type" json:"type,omitempty"`
+ // An identifier for the MessageEvent's message that can be used to match
+ // SENT and RECEIVED MessageEvents. For example, this field could
+ // represent a sequence ID for a streaming RPC. It is recommended to be
+ // unique within a Span.
+ Id uint64 `protobuf:"varint,2,opt,name=id,proto3" json:"id,omitempty"`
+ // The number of uncompressed bytes sent or received.
+ UncompressedSize uint64 `protobuf:"varint,3,opt,name=uncompressed_size,json=uncompressedSize,proto3" json:"uncompressed_size,omitempty"`
+ // The number of compressed bytes sent or received. If zero, assumed to
+ // be the same size as uncompressed.
+ CompressedSize uint64 `protobuf:"varint,4,opt,name=compressed_size,json=compressedSize,proto3" json:"compressed_size,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvent_MessageEvent) Reset() { *m = Span_TimeEvent_MessageEvent{} }
+func (m *Span_TimeEvent_MessageEvent) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvent_MessageEvent) ProtoMessage() {}
+func (*Span_TimeEvent_MessageEvent) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 2, 1}
+}
+
+func (m *Span_TimeEvent_MessageEvent) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Unmarshal(m, b)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvent_MessageEvent.Merge(m, src)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvent_MessageEvent.Size(m)
+}
+func (m *Span_TimeEvent_MessageEvent) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvent_MessageEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvent_MessageEvent proto.InternalMessageInfo
+
+func (m *Span_TimeEvent_MessageEvent) GetType() Span_TimeEvent_MessageEvent_Type {
+ if m != nil {
+ return m.Type
+ }
+ return Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetId() uint64 {
+ if m != nil {
+ return m.Id
+ }
+ return 0
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetUncompressedSize() uint64 {
+ if m != nil {
+ return m.UncompressedSize
+ }
+ return 0
+}
+
+func (m *Span_TimeEvent_MessageEvent) GetCompressedSize() uint64 {
+ if m != nil {
+ return m.CompressedSize
+ }
+ return 0
+}
+
+// A collection of `TimeEvent`s. A `TimeEvent` is a time-stamped annotation
+// on the span, consisting of either user-supplied key-value pairs, or
+// details of a message sent/received between Spans.
+type Span_TimeEvents struct {
+ // A collection of `TimeEvent`s.
+ TimeEvent []*Span_TimeEvent `protobuf:"bytes,1,rep,name=time_event,json=timeEvent,proto3" json:"time_event,omitempty"`
+ // The number of dropped annotations in all the included time events.
+ // If the value is 0, then no annotations were dropped.
+ DroppedAnnotationsCount int32 `protobuf:"varint,2,opt,name=dropped_annotations_count,json=droppedAnnotationsCount,proto3" json:"dropped_annotations_count,omitempty"`
+ // The number of dropped message events in all the included time events.
+ // If the value is 0, then no message events were dropped.
+ DroppedMessageEventsCount int32 `protobuf:"varint,3,opt,name=dropped_message_events_count,json=droppedMessageEventsCount,proto3" json:"dropped_message_events_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_TimeEvents) Reset() { *m = Span_TimeEvents{} }
+func (m *Span_TimeEvents) String() string { return proto.CompactTextString(m) }
+func (*Span_TimeEvents) ProtoMessage() {}
+func (*Span_TimeEvents) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 3}
+}
+
+func (m *Span_TimeEvents) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_TimeEvents.Unmarshal(m, b)
+}
+func (m *Span_TimeEvents) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_TimeEvents.Marshal(b, m, deterministic)
+}
+func (m *Span_TimeEvents) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_TimeEvents.Merge(m, src)
+}
+func (m *Span_TimeEvents) XXX_Size() int {
+ return xxx_messageInfo_Span_TimeEvents.Size(m)
+}
+func (m *Span_TimeEvents) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_TimeEvents.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_TimeEvents proto.InternalMessageInfo
+
+func (m *Span_TimeEvents) GetTimeEvent() []*Span_TimeEvent {
+ if m != nil {
+ return m.TimeEvent
+ }
+ return nil
+}
+
+func (m *Span_TimeEvents) GetDroppedAnnotationsCount() int32 {
+ if m != nil {
+ return m.DroppedAnnotationsCount
+ }
+ return 0
+}
+
+func (m *Span_TimeEvents) GetDroppedMessageEventsCount() int32 {
+ if m != nil {
+ return m.DroppedMessageEventsCount
+ }
+ return 0
+}
+
+// A pointer from the current span to another span in the same trace or in a
+// different trace. For example, this can be used in batching operations,
+// where a single batch handler processes multiple requests from different
+// traces or when the handler receives a request from a different project.
+type Span_Link struct {
+ // A unique identifier of a trace that this linked span is part of. The ID is a
+ // 16-byte array.
+ TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
+ // A unique identifier for the linked span. The ID is an 8-byte array.
+ SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
+ // The relationship of the current span relative to the linked span.
+ Type Span_Link_Type `protobuf:"varint,3,opt,name=type,proto3,enum=opencensus.proto.trace.v1.Span_Link_Type" json:"type,omitempty"`
+ // A set of attributes on the link.
+ Attributes *Span_Attributes `protobuf:"bytes,4,opt,name=attributes,proto3" json:"attributes,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Link) Reset() { *m = Span_Link{} }
+func (m *Span_Link) String() string { return proto.CompactTextString(m) }
+func (*Span_Link) ProtoMessage() {}
+func (*Span_Link) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 4}
+}
+
+func (m *Span_Link) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Link.Unmarshal(m, b)
+}
+func (m *Span_Link) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Link.Marshal(b, m, deterministic)
+}
+func (m *Span_Link) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Link.Merge(m, src)
+}
+func (m *Span_Link) XXX_Size() int {
+ return xxx_messageInfo_Span_Link.Size(m)
+}
+func (m *Span_Link) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Link.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Link proto.InternalMessageInfo
+
+func (m *Span_Link) GetTraceId() []byte {
+ if m != nil {
+ return m.TraceId
+ }
+ return nil
+}
+
+func (m *Span_Link) GetSpanId() []byte {
+ if m != nil {
+ return m.SpanId
+ }
+ return nil
+}
+
+func (m *Span_Link) GetType() Span_Link_Type {
+ if m != nil {
+ return m.Type
+ }
+ return Span_Link_TYPE_UNSPECIFIED
+}
+
+func (m *Span_Link) GetAttributes() *Span_Attributes {
+ if m != nil {
+ return m.Attributes
+ }
+ return nil
+}
+
+// A collection of links, which are references from this span to a span
+// in the same or different trace.
+type Span_Links struct {
+ // A collection of links.
+ Link []*Span_Link `protobuf:"bytes,1,rep,name=link,proto3" json:"link,omitempty"`
+ // The number of dropped links after the maximum size was enforced. If
+ // this value is 0, then no links were dropped.
+ DroppedLinksCount int32 `protobuf:"varint,2,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Span_Links) Reset() { *m = Span_Links{} }
+func (m *Span_Links) String() string { return proto.CompactTextString(m) }
+func (*Span_Links) ProtoMessage() {}
+func (*Span_Links) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{0, 5}
+}
+
+func (m *Span_Links) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Span_Links.Unmarshal(m, b)
+}
+func (m *Span_Links) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Span_Links.Marshal(b, m, deterministic)
+}
+func (m *Span_Links) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Span_Links.Merge(m, src)
+}
+func (m *Span_Links) XXX_Size() int {
+ return xxx_messageInfo_Span_Links.Size(m)
+}
+func (m *Span_Links) XXX_DiscardUnknown() {
+ xxx_messageInfo_Span_Links.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Span_Links proto.InternalMessageInfo
+
+func (m *Span_Links) GetLink() []*Span_Link {
+ if m != nil {
+ return m.Link
+ }
+ return nil
+}
+
+func (m *Span_Links) GetDroppedLinksCount() int32 {
+ if m != nil {
+ return m.DroppedLinksCount
+ }
+ return 0
+}
+
+// The `Status` type defines a logical error model that is suitable for different
+// programming environments, including REST APIs and RPC APIs. This proto's fields
+// are a subset of those of
+// [google.rpc.Status](https://github.com/googleapis/googleapis/blob/master/google/rpc/status.proto),
+// which is used by [gRPC](https://github.com/grpc).
+type Status struct {
+ // The status code. This is optional field. It is safe to assume 0 (OK)
+ // when not set.
+ Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
+ // A developer-facing error message, which should be in English.
+ Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Status) Reset() { *m = Status{} }
+func (m *Status) String() string { return proto.CompactTextString(m) }
+func (*Status) ProtoMessage() {}
+func (*Status) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{1}
+}
+
+func (m *Status) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Status.Unmarshal(m, b)
+}
+func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Status.Marshal(b, m, deterministic)
+}
+func (m *Status) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Status.Merge(m, src)
+}
+func (m *Status) XXX_Size() int {
+ return xxx_messageInfo_Status.Size(m)
+}
+func (m *Status) XXX_DiscardUnknown() {
+ xxx_messageInfo_Status.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Status proto.InternalMessageInfo
+
+func (m *Status) GetCode() int32 {
+ if m != nil {
+ return m.Code
+ }
+ return 0
+}
+
+func (m *Status) GetMessage() string {
+ if m != nil {
+ return m.Message
+ }
+ return ""
+}
+
+// The value of an Attribute.
+type AttributeValue struct {
+ // The type of the value.
+ //
+ // Types that are valid to be assigned to Value:
+ // *AttributeValue_StringValue
+ // *AttributeValue_IntValue
+ // *AttributeValue_BoolValue
+ // *AttributeValue_DoubleValue
+ Value isAttributeValue_Value `protobuf_oneof:"value"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *AttributeValue) Reset() { *m = AttributeValue{} }
+func (m *AttributeValue) String() string { return proto.CompactTextString(m) }
+func (*AttributeValue) ProtoMessage() {}
+func (*AttributeValue) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{2}
+}
+
+func (m *AttributeValue) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_AttributeValue.Unmarshal(m, b)
+}
+func (m *AttributeValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_AttributeValue.Marshal(b, m, deterministic)
+}
+func (m *AttributeValue) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_AttributeValue.Merge(m, src)
+}
+func (m *AttributeValue) XXX_Size() int {
+ return xxx_messageInfo_AttributeValue.Size(m)
+}
+func (m *AttributeValue) XXX_DiscardUnknown() {
+ xxx_messageInfo_AttributeValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AttributeValue proto.InternalMessageInfo
+
+type isAttributeValue_Value interface {
+ isAttributeValue_Value()
+}
+
+type AttributeValue_StringValue struct {
+ StringValue *TruncatableString `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"`
+}
+
+type AttributeValue_IntValue struct {
+ IntValue int64 `protobuf:"varint,2,opt,name=int_value,json=intValue,proto3,oneof"`
+}
+
+type AttributeValue_BoolValue struct {
+ BoolValue bool `protobuf:"varint,3,opt,name=bool_value,json=boolValue,proto3,oneof"`
+}
+
+type AttributeValue_DoubleValue struct {
+ DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"`
+}
+
+func (*AttributeValue_StringValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_IntValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_BoolValue) isAttributeValue_Value() {}
+
+func (*AttributeValue_DoubleValue) isAttributeValue_Value() {}
+
+func (m *AttributeValue) GetValue() isAttributeValue_Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *AttributeValue) GetStringValue() *TruncatableString {
+ if x, ok := m.GetValue().(*AttributeValue_StringValue); ok {
+ return x.StringValue
+ }
+ return nil
+}
+
+func (m *AttributeValue) GetIntValue() int64 {
+ if x, ok := m.GetValue().(*AttributeValue_IntValue); ok {
+ return x.IntValue
+ }
+ return 0
+}
+
+func (m *AttributeValue) GetBoolValue() bool {
+ if x, ok := m.GetValue().(*AttributeValue_BoolValue); ok {
+ return x.BoolValue
+ }
+ return false
+}
+
+func (m *AttributeValue) GetDoubleValue() float64 {
+ if x, ok := m.GetValue().(*AttributeValue_DoubleValue); ok {
+ return x.DoubleValue
+ }
+ return 0
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*AttributeValue) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*AttributeValue_StringValue)(nil),
+ (*AttributeValue_IntValue)(nil),
+ (*AttributeValue_BoolValue)(nil),
+ (*AttributeValue_DoubleValue)(nil),
+ }
+}
+
+// The call stack which originated this span.
+type StackTrace struct {
+ // Stack frames in this stack trace.
+ StackFrames *StackTrace_StackFrames `protobuf:"bytes,1,opt,name=stack_frames,json=stackFrames,proto3" json:"stack_frames,omitempty"`
+ // The hash ID is used to conserve network bandwidth for duplicate
+ // stack traces within a single trace.
+ //
+ // Often multiple spans will have identical stack traces.
+ // The first occurrence of a stack trace should contain both
+ // `stack_frames` and a value in `stack_trace_hash_id`.
+ //
+ // Subsequent spans within the same request can refer
+ // to that stack trace by setting only `stack_trace_hash_id`.
+ //
+ // TODO: describe how to deal with the case where stack_trace_hash_id is
+ // zero because it was not set.
+ StackTraceHashId uint64 `protobuf:"varint,2,opt,name=stack_trace_hash_id,json=stackTraceHashId,proto3" json:"stack_trace_hash_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace) Reset() { *m = StackTrace{} }
+func (m *StackTrace) String() string { return proto.CompactTextString(m) }
+func (*StackTrace) ProtoMessage() {}
+func (*StackTrace) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{3}
+}
+
+func (m *StackTrace) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace.Unmarshal(m, b)
+}
+func (m *StackTrace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace.Marshal(b, m, deterministic)
+}
+func (m *StackTrace) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace.Merge(m, src)
+}
+func (m *StackTrace) XXX_Size() int {
+ return xxx_messageInfo_StackTrace.Size(m)
+}
+func (m *StackTrace) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace proto.InternalMessageInfo
+
+func (m *StackTrace) GetStackFrames() *StackTrace_StackFrames {
+ if m != nil {
+ return m.StackFrames
+ }
+ return nil
+}
+
+func (m *StackTrace) GetStackTraceHashId() uint64 {
+ if m != nil {
+ return m.StackTraceHashId
+ }
+ return 0
+}
+
+// A single stack frame in a stack trace.
+type StackTrace_StackFrame struct {
+ // The fully-qualified name that uniquely identifies the function or
+ // method that is active in this frame.
+ FunctionName *TruncatableString `protobuf:"bytes,1,opt,name=function_name,json=functionName,proto3" json:"function_name,omitempty"`
+ // An un-mangled function name, if `function_name` is
+ // [mangled](http://www.avabodh.com/cxxin/namemangling.html). The name can
+ // be fully qualified.
+ OriginalFunctionName *TruncatableString `protobuf:"bytes,2,opt,name=original_function_name,json=originalFunctionName,proto3" json:"original_function_name,omitempty"`
+ // The name of the source file where the function call appears.
+ FileName *TruncatableString `protobuf:"bytes,3,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"`
+ // The line number in `file_name` where the function call appears.
+ LineNumber int64 `protobuf:"varint,4,opt,name=line_number,json=lineNumber,proto3" json:"line_number,omitempty"`
+ // The column number where the function call appears, if available.
+ // This is important in JavaScript because of its anonymous functions.
+ ColumnNumber int64 `protobuf:"varint,5,opt,name=column_number,json=columnNumber,proto3" json:"column_number,omitempty"`
+ // The binary module from where the code was loaded.
+ LoadModule *Module `protobuf:"bytes,6,opt,name=load_module,json=loadModule,proto3" json:"load_module,omitempty"`
+ // The version of the deployed source code.
+ SourceVersion *TruncatableString `protobuf:"bytes,7,opt,name=source_version,json=sourceVersion,proto3" json:"source_version,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace_StackFrame) Reset() { *m = StackTrace_StackFrame{} }
+func (m *StackTrace_StackFrame) String() string { return proto.CompactTextString(m) }
+func (*StackTrace_StackFrame) ProtoMessage() {}
+func (*StackTrace_StackFrame) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{3, 0}
+}
+
+func (m *StackTrace_StackFrame) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace_StackFrame.Unmarshal(m, b)
+}
+func (m *StackTrace_StackFrame) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace_StackFrame.Marshal(b, m, deterministic)
+}
+func (m *StackTrace_StackFrame) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace_StackFrame.Merge(m, src)
+}
+func (m *StackTrace_StackFrame) XXX_Size() int {
+ return xxx_messageInfo_StackTrace_StackFrame.Size(m)
+}
+func (m *StackTrace_StackFrame) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace_StackFrame.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace_StackFrame proto.InternalMessageInfo
+
+func (m *StackTrace_StackFrame) GetFunctionName() *TruncatableString {
+ if m != nil {
+ return m.FunctionName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetOriginalFunctionName() *TruncatableString {
+ if m != nil {
+ return m.OriginalFunctionName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetFileName() *TruncatableString {
+ if m != nil {
+ return m.FileName
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetLineNumber() int64 {
+ if m != nil {
+ return m.LineNumber
+ }
+ return 0
+}
+
+func (m *StackTrace_StackFrame) GetColumnNumber() int64 {
+ if m != nil {
+ return m.ColumnNumber
+ }
+ return 0
+}
+
+func (m *StackTrace_StackFrame) GetLoadModule() *Module {
+ if m != nil {
+ return m.LoadModule
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrame) GetSourceVersion() *TruncatableString {
+ if m != nil {
+ return m.SourceVersion
+ }
+ return nil
+}
+
+// A collection of stack frames, which can be truncated.
+type StackTrace_StackFrames struct {
+ // Stack frames in this call stack.
+ Frame []*StackTrace_StackFrame `protobuf:"bytes,1,rep,name=frame,proto3" json:"frame,omitempty"`
+ // The number of stack frames that were dropped because there
+ // were too many stack frames.
+ // If this value is 0, then no stack frames were dropped.
+ DroppedFramesCount int32 `protobuf:"varint,2,opt,name=dropped_frames_count,json=droppedFramesCount,proto3" json:"dropped_frames_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *StackTrace_StackFrames) Reset() { *m = StackTrace_StackFrames{} }
+func (m *StackTrace_StackFrames) String() string { return proto.CompactTextString(m) }
+func (*StackTrace_StackFrames) ProtoMessage() {}
+func (*StackTrace_StackFrames) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{3, 1}
+}
+
+func (m *StackTrace_StackFrames) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_StackTrace_StackFrames.Unmarshal(m, b)
+}
+func (m *StackTrace_StackFrames) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_StackTrace_StackFrames.Marshal(b, m, deterministic)
+}
+func (m *StackTrace_StackFrames) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_StackTrace_StackFrames.Merge(m, src)
+}
+func (m *StackTrace_StackFrames) XXX_Size() int {
+ return xxx_messageInfo_StackTrace_StackFrames.Size(m)
+}
+func (m *StackTrace_StackFrames) XXX_DiscardUnknown() {
+ xxx_messageInfo_StackTrace_StackFrames.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StackTrace_StackFrames proto.InternalMessageInfo
+
+func (m *StackTrace_StackFrames) GetFrame() []*StackTrace_StackFrame {
+ if m != nil {
+ return m.Frame
+ }
+ return nil
+}
+
+func (m *StackTrace_StackFrames) GetDroppedFramesCount() int32 {
+ if m != nil {
+ return m.DroppedFramesCount
+ }
+ return 0
+}
+
+// A description of a binary module.
+type Module struct {
+ // TODO: document the meaning of this field.
+ // For example: main binary, kernel modules, and dynamic libraries
+ // such as libc.so, sharedlib.so.
+ Module *TruncatableString `protobuf:"bytes,1,opt,name=module,proto3" json:"module,omitempty"`
+ // A unique identifier for the module, usually a hash of its
+ // contents.
+ BuildId *TruncatableString `protobuf:"bytes,2,opt,name=build_id,json=buildId,proto3" json:"build_id,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *Module) Reset() { *m = Module{} }
+func (m *Module) String() string { return proto.CompactTextString(m) }
+func (*Module) ProtoMessage() {}
+func (*Module) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{4}
+}
+
+func (m *Module) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_Module.Unmarshal(m, b)
+}
+func (m *Module) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_Module.Marshal(b, m, deterministic)
+}
+func (m *Module) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_Module.Merge(m, src)
+}
+func (m *Module) XXX_Size() int {
+ return xxx_messageInfo_Module.Size(m)
+}
+func (m *Module) XXX_DiscardUnknown() {
+ xxx_messageInfo_Module.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Module proto.InternalMessageInfo
+
+func (m *Module) GetModule() *TruncatableString {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+func (m *Module) GetBuildId() *TruncatableString {
+ if m != nil {
+ return m.BuildId
+ }
+ return nil
+}
+
+// A string that might be shortened to a specified length.
+type TruncatableString struct {
+ // The shortened string. For example, if the original string was 500 bytes long and
+ // the limit of the string was 128 bytes, then this value contains the first 128
+ // bytes of the 500-byte string. Note that truncation always happens on a
+ // character boundary, to ensure that a truncated string is still valid UTF-8.
+ // Because it may contain multi-byte characters, the size of the truncated string
+ // may be less than the truncation limit.
+ Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+ // The number of bytes removed from the original string. If this
+ // value is 0, then the string was not shortened.
+ TruncatedByteCount int32 `protobuf:"varint,2,opt,name=truncated_byte_count,json=truncatedByteCount,proto3" json:"truncated_byte_count,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TruncatableString) Reset() { *m = TruncatableString{} }
+func (m *TruncatableString) String() string { return proto.CompactTextString(m) }
+func (*TruncatableString) ProtoMessage() {}
+func (*TruncatableString) Descriptor() ([]byte, []int) {
+ return fileDescriptor_8ea38bbb821bf584, []int{5}
+}
+
+func (m *TruncatableString) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TruncatableString.Unmarshal(m, b)
+}
+func (m *TruncatableString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TruncatableString.Marshal(b, m, deterministic)
+}
+func (m *TruncatableString) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TruncatableString.Merge(m, src)
+}
+func (m *TruncatableString) XXX_Size() int {
+ return xxx_messageInfo_TruncatableString.Size(m)
+}
+func (m *TruncatableString) XXX_DiscardUnknown() {
+ xxx_messageInfo_TruncatableString.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TruncatableString proto.InternalMessageInfo
+
+func (m *TruncatableString) GetValue() string {
+ if m != nil {
+ return m.Value
+ }
+ return ""
+}
+
+func (m *TruncatableString) GetTruncatedByteCount() int32 {
+ if m != nil {
+ return m.TruncatedByteCount
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("opencensus.proto.trace.v1.Span_SpanKind", Span_SpanKind_name, Span_SpanKind_value)
+ proto.RegisterEnum("opencensus.proto.trace.v1.Span_TimeEvent_MessageEvent_Type", Span_TimeEvent_MessageEvent_Type_name, Span_TimeEvent_MessageEvent_Type_value)
+ proto.RegisterEnum("opencensus.proto.trace.v1.Span_Link_Type", Span_Link_Type_name, Span_Link_Type_value)
+ proto.RegisterType((*Span)(nil), "opencensus.proto.trace.v1.Span")
+ proto.RegisterType((*Span_Tracestate)(nil), "opencensus.proto.trace.v1.Span.Tracestate")
+ proto.RegisterType((*Span_Tracestate_Entry)(nil), "opencensus.proto.trace.v1.Span.Tracestate.Entry")
+ proto.RegisterType((*Span_Attributes)(nil), "opencensus.proto.trace.v1.Span.Attributes")
+ proto.RegisterMapType((map[string]*AttributeValue)(nil), "opencensus.proto.trace.v1.Span.Attributes.AttributeMapEntry")
+ proto.RegisterType((*Span_TimeEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent")
+ proto.RegisterType((*Span_TimeEvent_Annotation)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.Annotation")
+ proto.RegisterType((*Span_TimeEvent_MessageEvent)(nil), "opencensus.proto.trace.v1.Span.TimeEvent.MessageEvent")
+ proto.RegisterType((*Span_TimeEvents)(nil), "opencensus.proto.trace.v1.Span.TimeEvents")
+ proto.RegisterType((*Span_Link)(nil), "opencensus.proto.trace.v1.Span.Link")
+ proto.RegisterType((*Span_Links)(nil), "opencensus.proto.trace.v1.Span.Links")
+ proto.RegisterType((*Status)(nil), "opencensus.proto.trace.v1.Status")
+ proto.RegisterType((*AttributeValue)(nil), "opencensus.proto.trace.v1.AttributeValue")
+ proto.RegisterType((*StackTrace)(nil), "opencensus.proto.trace.v1.StackTrace")
+ proto.RegisterType((*StackTrace_StackFrame)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrame")
+ proto.RegisterType((*StackTrace_StackFrames)(nil), "opencensus.proto.trace.v1.StackTrace.StackFrames")
+ proto.RegisterType((*Module)(nil), "opencensus.proto.trace.v1.Module")
+ proto.RegisterType((*TruncatableString)(nil), "opencensus.proto.trace.v1.TruncatableString")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/trace/v1/trace.proto", fileDescriptor_8ea38bbb821bf584)
+}
+
+var fileDescriptor_8ea38bbb821bf584 = []byte{
+ // 1557 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0xeb, 0x52, 0x1b, 0x47,
+ 0x16, 0x66, 0x74, 0xd7, 0x91, 0x90, 0x45, 0x1b, 0xdb, 0x83, 0xd6, 0xbb, 0x66, 0x65, 0x7b, 0x17,
+ 0xaf, 0x17, 0x61, 0xb0, 0xd7, 0xe5, 0x6b, 0x79, 0x11, 0x88, 0x48, 0x06, 0x2b, 0x72, 0x4b, 0xa6,
+ 0x72, 0xa9, 0xd4, 0xd4, 0x48, 0xd3, 0x88, 0x09, 0x52, 0xcf, 0x64, 0xa6, 0x87, 0x14, 0x7e, 0x81,
+ 0x54, 0x2a, 0xff, 0x52, 0x95, 0xca, 0x0b, 0xe4, 0x47, 0x5e, 0x24, 0x0f, 0x90, 0xca, 0x73, 0xe4,
+ 0x09, 0xf2, 0x27, 0xd5, 0xdd, 0x73, 0x13, 0xd8, 0xa0, 0xc8, 0x7f, 0xa8, 0x9e, 0xee, 0xf3, 0x7d,
+ 0x7d, 0x4e, 0x9f, 0x2b, 0x82, 0xdb, 0x96, 0x4d, 0xe8, 0x80, 0x50, 0xd7, 0x73, 0xd7, 0x6c, 0xc7,
+ 0x62, 0xd6, 0x1a, 0x73, 0xf4, 0x01, 0x59, 0x3b, 0x5e, 0x97, 0x8b, 0x9a, 0xd8, 0x44, 0x4b, 0x91,
+ 0x98, 0xdc, 0xa9, 0xc9, 0xd3, 0xe3, 0xf5, 0xca, 0xdd, 0x33, 0x0c, 0x0e, 0x71, 0x2d, 0xcf, 0x91,
+ 0x24, 0xc1, 0x5a, 0xa2, 0x2a, 0x37, 0x86, 0x96, 0x35, 0x1c, 0x11, 0x29, 0xd8, 0xf7, 0x0e, 0xd6,
+ 0x98, 0x39, 0x26, 0x2e, 0xd3, 0xc7, 0xb6, 0x2f, 0xf0, 0x8f, 0xd3, 0x02, 0x5f, 0x3b, 0xba, 0x6d,
+ 0x13, 0xc7, 0xbf, 0xb6, 0xfa, 0xcb, 0x15, 0x48, 0x75, 0x6d, 0x9d, 0xa2, 0x25, 0xc8, 0x09, 0x15,
+ 0x34, 0xd3, 0x50, 0x95, 0x65, 0x65, 0xa5, 0x88, 0xb3, 0xe2, 0xbb, 0x65, 0xa0, 0x6b, 0x90, 0x75,
+ 0x6d, 0x9d, 0xf2, 0x93, 0x84, 0x38, 0xc9, 0xf0, 0xcf, 0x96, 0x81, 0x5e, 0x02, 0x08, 0x19, 0x97,
+ 0xe9, 0x8c, 0xa8, 0x97, 0x96, 0x95, 0x95, 0xc2, 0xc6, 0x7f, 0x6a, 0xef, 0x35, 0xad, 0xc6, 0x2f,
+ 0xaa, 0xf5, 0x42, 0x04, 0x8e, 0xa1, 0xd1, 0x2d, 0x28, 0xd9, 0xba, 0x43, 0x28, 0xd3, 0x82, 0xbb,
+ 0x92, 0xe2, 0xae, 0xa2, 0xdc, 0xed, 0xca, 0x1b, 0xff, 0x0f, 0x29, 0xaa, 0x8f, 0x89, 0x9a, 0x12,
+ 0x77, 0xfd, 0xf7, 0x9c, 0xbb, 0x7a, 0x8e, 0x47, 0x07, 0x3a, 0xd3, 0xfb, 0x23, 0xd2, 0x65, 0x8e,
+ 0x49, 0x87, 0x58, 0x20, 0xd1, 0x33, 0x48, 0x1d, 0x99, 0xd4, 0x50, 0x4b, 0xcb, 0xca, 0x4a, 0x69,
+ 0x63, 0xe5, 0x22, 0x6d, 0xf9, 0x9f, 0x5d, 0x93, 0x1a, 0x58, 0xa0, 0xd0, 0x63, 0x00, 0x97, 0xe9,
+ 0x0e, 0xd3, 0xf8, 0x3b, 0xab, 0x69, 0xa1, 0x45, 0xa5, 0x26, 0xdf, 0xb8, 0x16, 0xbc, 0x71, 0xad,
+ 0x17, 0x38, 0x01, 0xe7, 0x85, 0x34, 0xff, 0x46, 0xff, 0x83, 0x1c, 0xa1, 0x86, 0x04, 0x66, 0x2e,
+ 0x04, 0x66, 0x09, 0x35, 0x04, 0xec, 0x25, 0x80, 0xce, 0x98, 0x63, 0xf6, 0x3d, 0x46, 0x5c, 0x35,
+ 0x3b, 0xdd, 0x1b, 0x6f, 0x86, 0x08, 0x1c, 0x43, 0xa3, 0x1d, 0x28, 0xb8, 0x4c, 0x1f, 0x1c, 0x69,
+ 0x42, 0x5a, 0xcd, 0x09, 0xb2, 0xdb, 0xe7, 0x91, 0x71, 0x69, 0xe1, 0x30, 0x0c, 0x6e, 0xb8, 0x46,
+ 0xbb, 0x50, 0xe0, 0x66, 0x68, 0xe4, 0x98, 0x50, 0xe6, 0xaa, 0xf9, 0x29, 0x1d, 0x6f, 0x8e, 0x49,
+ 0x43, 0x20, 0x30, 0xb0, 0x70, 0x8d, 0x9e, 0x42, 0x7a, 0x64, 0xd2, 0x23, 0x57, 0x85, 0x8b, 0xd5,
+ 0xe1, 0x34, 0x7b, 0x5c, 0x18, 0x4b, 0x0c, 0x7a, 0x0c, 0x19, 0x1e, 0x3e, 0x9e, 0xab, 0x16, 0x04,
+ 0xfa, 0x9f, 0xe7, 0x1b, 0xc3, 0x3c, 0x17, 0xfb, 0x00, 0x54, 0x87, 0x5c, 0x90, 0x4c, 0x6a, 0x59,
+ 0x80, 0xff, 0x75, 0x16, 0x1c, 0xa6, 0xdb, 0xf1, 0x7a, 0x0d, 0xfb, 0x6b, 0x1c, 0xe2, 0xd0, 0x27,
+ 0xf0, 0x37, 0x57, 0x1f, 0x13, 0xcd, 0x76, 0xac, 0x01, 0x71, 0x5d, 0x4d, 0x77, 0xb5, 0x58, 0x10,
+ 0xab, 0xc5, 0xf7, 0xb8, 0xb9, 0x6e, 0x59, 0xa3, 0x7d, 0x7d, 0xe4, 0x11, 0x7c, 0x8d, 0xc3, 0x3b,
+ 0x12, 0xbd, 0xe9, 0x76, 0xc2, 0x50, 0x47, 0x3b, 0x50, 0x1e, 0x1c, 0x9a, 0x23, 0x43, 0x66, 0xc3,
+ 0xc0, 0xf2, 0x28, 0x53, 0xe7, 0x05, 0xdd, 0xf5, 0x33, 0x74, 0x6f, 0x5a, 0x94, 0xdd, 0xdf, 0x90,
+ 0x84, 0x25, 0x81, 0xe2, 0x14, 0x5b, 0x1c, 0x53, 0xf9, 0x56, 0x01, 0x88, 0x32, 0x0e, 0xbd, 0x84,
+ 0x2c, 0xa1, 0xcc, 0x31, 0x89, 0xab, 0x2a, 0xcb, 0xc9, 0x95, 0xc2, 0xc6, 0xbd, 0xe9, 0xd3, 0xb5,
+ 0xd6, 0xa0, 0xcc, 0x39, 0xc1, 0x01, 0x41, 0x65, 0x0d, 0xd2, 0x62, 0x07, 0x95, 0x21, 0x79, 0x44,
+ 0x4e, 0x44, 0xd5, 0xc8, 0x63, 0xbe, 0x44, 0x8b, 0x90, 0x3e, 0xe6, 0xea, 0x88, 0x7a, 0x91, 0xc7,
+ 0xf2, 0xa3, 0xf2, 0x43, 0x02, 0x20, 0x8a, 0x4c, 0xa4, 0xc3, 0x7c, 0x18, 0x9b, 0xda, 0x58, 0xb7,
+ 0x7d, 0x8d, 0x9e, 0x4d, 0x1f, 0xdc, 0xd1, 0xf2, 0x95, 0x6e, 0x4b, 0xed, 0x8a, 0x7a, 0x6c, 0x0b,
+ 0x3d, 0x02, 0xd5, 0x70, 0x2c, 0xdb, 0x26, 0x86, 0x16, 0xa5, 0x81, 0xff, 0x9a, 0x5c, 0xb5, 0x34,
+ 0xbe, 0xea, 0x9f, 0x47, 0xa4, 0xf2, 0xdd, 0xbe, 0x84, 0x85, 0x33, 0xe4, 0xef, 0x30, 0xf4, 0x45,
+ 0xdc, 0xd0, 0xc2, 0xc6, 0x9d, 0x73, 0x74, 0x0f, 0xe9, 0xa4, 0xa3, 0x24, 0xee, 0x49, 0xe2, 0x91,
+ 0x52, 0xf9, 0x29, 0x0d, 0xf9, 0x30, 0x39, 0x50, 0x0d, 0x52, 0xa2, 0x46, 0x28, 0x17, 0xd6, 0x08,
+ 0x21, 0x87, 0xf6, 0x01, 0x74, 0x4a, 0x2d, 0xa6, 0x33, 0xd3, 0xa2, 0xbe, 0x1e, 0x0f, 0xa6, 0xce,
+ 0xc5, 0xda, 0x66, 0x88, 0x6d, 0xce, 0xe1, 0x18, 0x13, 0xfa, 0x02, 0xe6, 0xc7, 0xc4, 0x75, 0xf5,
+ 0xa1, 0x9f, 0xe7, 0xa2, 0x1e, 0x17, 0x36, 0x1e, 0x4e, 0x4f, 0xfd, 0x4a, 0xc2, 0xc5, 0x47, 0x73,
+ 0x0e, 0x17, 0xc7, 0xb1, 0xef, 0xca, 0xcf, 0x0a, 0x40, 0x74, 0x37, 0x6a, 0x43, 0xc1, 0x20, 0xee,
+ 0xc0, 0x31, 0x6d, 0x61, 0x86, 0x32, 0x43, 0x7d, 0x8f, 0x13, 0x9c, 0x2a, 0x9b, 0x89, 0x0f, 0x29,
+ 0x9b, 0x95, 0x3f, 0x14, 0x28, 0xc6, 0x6d, 0x41, 0x1f, 0x43, 0x8a, 0x9d, 0xd8, 0xd2, 0x45, 0xa5,
+ 0x8d, 0xa7, 0xb3, 0xbd, 0x48, 0xad, 0x77, 0x62, 0x13, 0x2c, 0x88, 0x50, 0x09, 0x12, 0x7e, 0x73,
+ 0x4d, 0xe1, 0x84, 0x69, 0xa0, 0xbb, 0xb0, 0xe0, 0xd1, 0x81, 0x35, 0xb6, 0x1d, 0xe2, 0xba, 0xc4,
+ 0xd0, 0x5c, 0xf3, 0x2d, 0x11, 0xef, 0x9f, 0xc2, 0xe5, 0xf8, 0x41, 0xd7, 0x7c, 0x4b, 0xd0, 0xbf,
+ 0xe1, 0xd2, 0x69, 0xd1, 0x94, 0x10, 0x2d, 0x4d, 0x0a, 0x56, 0x1f, 0x40, 0x8a, 0xdf, 0x89, 0x16,
+ 0xa1, 0xdc, 0xfb, 0xb4, 0xd3, 0xd0, 0xde, 0xb4, 0xbb, 0x9d, 0xc6, 0x56, 0x6b, 0xa7, 0xd5, 0xd8,
+ 0x2e, 0xcf, 0xa1, 0x1c, 0xa4, 0xba, 0x8d, 0x76, 0xaf, 0xac, 0xa0, 0x22, 0xe4, 0x70, 0x63, 0xab,
+ 0xd1, 0xda, 0x6f, 0x6c, 0x97, 0x13, 0xf5, 0xac, 0x1f, 0xe2, 0x95, 0xdf, 0x78, 0x29, 0x89, 0xea,
+ 0x76, 0x13, 0x20, 0x6a, 0x02, 0x7e, 0xee, 0xde, 0x99, 0xfa, 0x29, 0x70, 0x3e, 0x6c, 0x01, 0xe8,
+ 0x09, 0x2c, 0x85, 0x59, 0x1a, 0x46, 0xc4, 0x64, 0x9a, 0x5e, 0x0b, 0xd2, 0x34, 0x3a, 0x17, 0x79,
+ 0x8a, 0x5e, 0xc0, 0xf5, 0x00, 0x3b, 0x11, 0xad, 0x01, 0x3c, 0x29, 0xe0, 0x01, 0x7f, 0xfc, 0xfd,
+ 0xfd, 0x44, 0xff, 0x3e, 0x01, 0x29, 0xde, 0x52, 0x66, 0x1a, 0x80, 0x9e, 0xfb, 0x81, 0x90, 0x14,
+ 0x81, 0x70, 0x67, 0x9a, 0xd6, 0x15, 0x77, 0xfb, 0x64, 0x90, 0xa6, 0x3e, 0x24, 0x48, 0xab, 0xbb,
+ 0xe7, 0x3a, 0xf7, 0x0a, 0x2c, 0x6c, 0x35, 0x5b, 0x7b, 0xdb, 0xda, 0x5e, 0xab, 0xbd, 0xdb, 0xd8,
+ 0xd6, 0xba, 0x9d, 0xcd, 0x76, 0x59, 0x41, 0x57, 0x01, 0x75, 0x36, 0x71, 0xa3, 0xdd, 0x9b, 0xd8,
+ 0x4f, 0x54, 0xbe, 0x82, 0xb4, 0x68, 0xb3, 0xe8, 0x11, 0xa4, 0x78, 0xa3, 0xf5, 0xdd, 0x7b, 0x6b,
+ 0x1a, 0x03, 0xb1, 0x40, 0xa0, 0x1a, 0x5c, 0x0e, 0x1c, 0x23, 0x5a, 0xf5, 0x84, 0x3b, 0x17, 0xfc,
+ 0x23, 0x71, 0x89, 0xf0, 0x43, 0xf5, 0x39, 0xe4, 0x82, 0x59, 0x0b, 0x2d, 0xc1, 0x15, 0xae, 0x88,
+ 0xb6, 0xdb, 0x6a, 0x6f, 0x9f, 0x32, 0x04, 0x20, 0xd3, 0x6d, 0xe0, 0xfd, 0x06, 0x2e, 0x2b, 0x7c,
+ 0xbd, 0xb5, 0xd7, 0xe2, 0x31, 0x9b, 0xa8, 0x3e, 0x84, 0x8c, 0xec, 0xef, 0x08, 0x41, 0x6a, 0x60,
+ 0x19, 0x32, 0x39, 0xd3, 0x58, 0xac, 0x91, 0x0a, 0x59, 0x3f, 0x3a, 0xfc, 0x8e, 0x14, 0x7c, 0x56,
+ 0x7f, 0x55, 0xa0, 0x34, 0x59, 0x99, 0xd1, 0x6b, 0x28, 0xba, 0xa2, 0xa2, 0x68, 0xb2, 0xb4, 0xcf,
+ 0x50, 0x8b, 0x9a, 0x73, 0xb8, 0x20, 0x39, 0x24, 0xe5, 0xdf, 0x21, 0x6f, 0x52, 0xa6, 0x45, 0xad,
+ 0x22, 0xd9, 0x9c, 0xc3, 0x39, 0x93, 0x32, 0x79, 0x7c, 0x03, 0xa0, 0x6f, 0x59, 0x23, 0xff, 0x9c,
+ 0x07, 0x53, 0xae, 0x39, 0x87, 0xf3, 0xfd, 0x60, 0x4c, 0x40, 0x37, 0xa1, 0x68, 0x58, 0x5e, 0x7f,
+ 0x44, 0x7c, 0x11, 0x1e, 0x2a, 0x0a, 0xbf, 0x44, 0xee, 0x0a, 0xa1, 0x30, 0x51, 0xab, 0xdf, 0x65,
+ 0x00, 0xa2, 0xc9, 0x0d, 0xf5, 0xb8, 0x3d, 0x7c, 0xea, 0x3b, 0x70, 0xf4, 0xb1, 0x68, 0xfc, 0xdc,
+ 0x9e, 0xf5, 0xa9, 0xc6, 0x3e, 0xb9, 0xdc, 0x11, 0x40, 0x2c, 0x87, 0x47, 0xf9, 0x81, 0x56, 0xe1,
+ 0x72, 0x6c, 0x96, 0xd4, 0x0e, 0x75, 0xf7, 0x50, 0x0b, 0x6b, 0x58, 0x39, 0x1a, 0x16, 0x9b, 0xba,
+ 0x7b, 0xd8, 0x32, 0x2a, 0xbf, 0x27, 0x7d, 0x9d, 0x04, 0x1c, 0xbd, 0x86, 0xf9, 0x03, 0x8f, 0x0e,
+ 0x78, 0x22, 0x6b, 0x62, 0xa0, 0x9f, 0xa5, 0xe0, 0x17, 0x03, 0x8a, 0x36, 0xa7, 0xec, 0xc3, 0x55,
+ 0xcb, 0x31, 0x87, 0x26, 0xd5, 0x47, 0xda, 0x24, 0x77, 0x62, 0x06, 0xee, 0xc5, 0x80, 0x6b, 0x27,
+ 0x7e, 0x47, 0x0b, 0xf2, 0x07, 0xe6, 0x88, 0x48, 0xda, 0xe4, 0x0c, 0xb4, 0x39, 0x0e, 0x17, 0x54,
+ 0x37, 0xa0, 0x30, 0x32, 0x29, 0xd1, 0xa8, 0x37, 0xee, 0x13, 0x47, 0x78, 0x34, 0x89, 0x81, 0x6f,
+ 0xb5, 0xc5, 0x0e, 0xba, 0x09, 0xf3, 0x03, 0x6b, 0xe4, 0x8d, 0x69, 0x20, 0x92, 0x16, 0x22, 0x45,
+ 0xb9, 0xe9, 0x0b, 0xd5, 0xa1, 0x30, 0xb2, 0x74, 0x43, 0x1b, 0x5b, 0x86, 0x37, 0x0a, 0xfe, 0xaf,
+ 0x38, 0x6f, 0x08, 0x7e, 0x25, 0x04, 0x31, 0x70, 0x94, 0x5c, 0xa3, 0x2e, 0x94, 0xe4, 0x38, 0xab,
+ 0x1d, 0x13, 0xc7, 0xe5, 0xdd, 0x37, 0x3b, 0x83, 0x65, 0xf3, 0x92, 0x63, 0x5f, 0x52, 0x54, 0xbe,
+ 0x51, 0xa0, 0x10, 0x8b, 0x1d, 0xb4, 0x03, 0x69, 0x11, 0x7e, 0xd3, 0x8c, 0x9d, 0xef, 0x8a, 0x3e,
+ 0x2c, 0xe1, 0xe8, 0x1e, 0x2c, 0x06, 0x65, 0x45, 0x86, 0xf3, 0x44, 0x5d, 0x41, 0xfe, 0x99, 0xbc,
+ 0x54, 0x16, 0x96, 0x1f, 0x15, 0xc8, 0xf8, 0x96, 0x6e, 0x43, 0xc6, 0x7f, 0xa8, 0x59, 0xc2, 0xcd,
+ 0xc7, 0xa2, 0x8f, 0x20, 0xd7, 0xf7, 0xf8, 0x68, 0xee, 0x87, 0xfb, 0x5f, 0xe5, 0xc9, 0x0a, 0x74,
+ 0xcb, 0xa8, 0x7e, 0x0e, 0x0b, 0x67, 0x4e, 0xa3, 0xd1, 0x59, 0x89, 0x8d, 0xce, 0xdc, 0x6c, 0x26,
+ 0x45, 0x89, 0xa1, 0xf5, 0x4f, 0x18, 0x99, 0x34, 0x3b, 0x3c, 0xab, 0x9f, 0x30, 0x22, 0xcc, 0xae,
+ 0xdb, 0x70, 0xdd, 0xb4, 0xde, 0xaf, 0x57, 0x5d, 0xfe, 0x57, 0xd0, 0xe1, 0x9b, 0x1d, 0xe5, 0xb3,
+ 0xfa, 0xd0, 0x64, 0x87, 0x5e, 0xbf, 0x36, 0xb0, 0xc6, 0x6b, 0x52, 0x7e, 0xd5, 0xa4, 0x2e, 0x73,
+ 0xbc, 0x31, 0xa1, 0xb2, 0xdf, 0xae, 0x45, 0x54, 0xab, 0xf2, 0x67, 0x89, 0x21, 0xa1, 0xab, 0xc3,
+ 0xe8, 0xf7, 0x8d, 0x7e, 0x46, 0x6c, 0xdf, 0xff, 0x33, 0x00, 0x00, 0xff, 0xff, 0x1e, 0xe0, 0x94,
+ 0x45, 0x03, 0x11, 0x00, 0x00,
+}
diff --git a/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
new file mode 100644
index 00000000..2ac2d28c
--- /dev/null
+++ b/vendor/github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1/trace_config.pb.go
@@ -0,0 +1,358 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: opencensus/proto/trace/v1/trace_config.proto
+
+package v1
+
+import (
+ fmt "fmt"
+ proto "github.com/golang/protobuf/proto"
+ math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// How spans should be sampled:
+// - Always off
+// - Always on
+// - Always follow the parent Span's decision (off if no parent).
+type ConstantSampler_ConstantDecision int32
+
+const (
+ ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0
+ ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1
+ ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2
+)
+
+var ConstantSampler_ConstantDecision_name = map[int32]string{
+ 0: "ALWAYS_OFF",
+ 1: "ALWAYS_ON",
+ 2: "ALWAYS_PARENT",
+}
+
+var ConstantSampler_ConstantDecision_value = map[string]int32{
+ "ALWAYS_OFF": 0,
+ "ALWAYS_ON": 1,
+ "ALWAYS_PARENT": 2,
+}
+
+func (x ConstantSampler_ConstantDecision) String() string {
+ return proto.EnumName(ConstantSampler_ConstantDecision_name, int32(x))
+}
+
+func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) {
+ return fileDescriptor_5359209b41ff50c5, []int{2, 0}
+}
+
+// Global configuration of the trace service. All fields must be specified, or
+// the default (zero) values will be used for each type.
+type TraceConfig struct {
+ // The global default sampler used to make decisions on span sampling.
+ //
+ // Types that are valid to be assigned to Sampler:
+ // *TraceConfig_ProbabilitySampler
+ // *TraceConfig_ConstantSampler
+ // *TraceConfig_RateLimitingSampler
+ Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"`
+ // The global default max number of attributes per span.
+ MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"`
+ // The global default max number of annotation events per span.
+ MaxNumberOfAnnotations int64 `protobuf:"varint,5,opt,name=max_number_of_annotations,json=maxNumberOfAnnotations,proto3" json:"max_number_of_annotations,omitempty"`
+ // The global default max number of message events per span.
+ MaxNumberOfMessageEvents int64 `protobuf:"varint,6,opt,name=max_number_of_message_events,json=maxNumberOfMessageEvents,proto3" json:"max_number_of_message_events,omitempty"`
+ // The global default max number of link entries per span.
+ MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *TraceConfig) Reset() { *m = TraceConfig{} }
+func (m *TraceConfig) String() string { return proto.CompactTextString(m) }
+func (*TraceConfig) ProtoMessage() {}
+func (*TraceConfig) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5359209b41ff50c5, []int{0}
+}
+
+func (m *TraceConfig) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_TraceConfig.Unmarshal(m, b)
+}
+func (m *TraceConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_TraceConfig.Marshal(b, m, deterministic)
+}
+func (m *TraceConfig) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_TraceConfig.Merge(m, src)
+}
+func (m *TraceConfig) XXX_Size() int {
+ return xxx_messageInfo_TraceConfig.Size(m)
+}
+func (m *TraceConfig) XXX_DiscardUnknown() {
+ xxx_messageInfo_TraceConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TraceConfig proto.InternalMessageInfo
+
+type isTraceConfig_Sampler interface {
+ isTraceConfig_Sampler()
+}
+
+type TraceConfig_ProbabilitySampler struct {
+ ProbabilitySampler *ProbabilitySampler `protobuf:"bytes,1,opt,name=probability_sampler,json=probabilitySampler,proto3,oneof"`
+}
+
+type TraceConfig_ConstantSampler struct {
+ ConstantSampler *ConstantSampler `protobuf:"bytes,2,opt,name=constant_sampler,json=constantSampler,proto3,oneof"`
+}
+
+type TraceConfig_RateLimitingSampler struct {
+ RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"`
+}
+
+func (*TraceConfig_ProbabilitySampler) isTraceConfig_Sampler() {}
+
+func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {}
+
+func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {}
+
+func (m *TraceConfig) GetSampler() isTraceConfig_Sampler {
+ if m != nil {
+ return m.Sampler
+ }
+ return nil
+}
+
+func (m *TraceConfig) GetProbabilitySampler() *ProbabilitySampler {
+ if x, ok := m.GetSampler().(*TraceConfig_ProbabilitySampler); ok {
+ return x.ProbabilitySampler
+ }
+ return nil
+}
+
+func (m *TraceConfig) GetConstantSampler() *ConstantSampler {
+ if x, ok := m.GetSampler().(*TraceConfig_ConstantSampler); ok {
+ return x.ConstantSampler
+ }
+ return nil
+}
+
+func (m *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler {
+ if x, ok := m.GetSampler().(*TraceConfig_RateLimitingSampler); ok {
+ return x.RateLimitingSampler
+ }
+ return nil
+}
+
+func (m *TraceConfig) GetMaxNumberOfAttributes() int64 {
+ if m != nil {
+ return m.MaxNumberOfAttributes
+ }
+ return 0
+}
+
+func (m *TraceConfig) GetMaxNumberOfAnnotations() int64 {
+ if m != nil {
+ return m.MaxNumberOfAnnotations
+ }
+ return 0
+}
+
+func (m *TraceConfig) GetMaxNumberOfMessageEvents() int64 {
+ if m != nil {
+ return m.MaxNumberOfMessageEvents
+ }
+ return 0
+}
+
+func (m *TraceConfig) GetMaxNumberOfLinks() int64 {
+ if m != nil {
+ return m.MaxNumberOfLinks
+ }
+ return 0
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*TraceConfig) XXX_OneofWrappers() []interface{} {
+ return []interface{}{
+ (*TraceConfig_ProbabilitySampler)(nil),
+ (*TraceConfig_ConstantSampler)(nil),
+ (*TraceConfig_RateLimitingSampler)(nil),
+ }
+}
+
+// Sampler that tries to uniformly sample traces with a given probability.
+// The probability of sampling a trace is equal to that of the specified probability.
+type ProbabilitySampler struct {
+ // The desired probability of sampling. Must be within [0.0, 1.0].
+ SamplingProbability float64 `protobuf:"fixed64,1,opt,name=samplingProbability,proto3" json:"samplingProbability,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ProbabilitySampler) Reset() { *m = ProbabilitySampler{} }
+func (m *ProbabilitySampler) String() string { return proto.CompactTextString(m) }
+func (*ProbabilitySampler) ProtoMessage() {}
+func (*ProbabilitySampler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5359209b41ff50c5, []int{1}
+}
+
+func (m *ProbabilitySampler) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ProbabilitySampler.Unmarshal(m, b)
+}
+func (m *ProbabilitySampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ProbabilitySampler.Marshal(b, m, deterministic)
+}
+func (m *ProbabilitySampler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ProbabilitySampler.Merge(m, src)
+}
+func (m *ProbabilitySampler) XXX_Size() int {
+ return xxx_messageInfo_ProbabilitySampler.Size(m)
+}
+func (m *ProbabilitySampler) XXX_DiscardUnknown() {
+ xxx_messageInfo_ProbabilitySampler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ProbabilitySampler proto.InternalMessageInfo
+
+func (m *ProbabilitySampler) GetSamplingProbability() float64 {
+ if m != nil {
+ return m.SamplingProbability
+ }
+ return 0
+}
+
+// Sampler that always makes a constant decision on span sampling.
+type ConstantSampler struct {
+ Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opencensus.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *ConstantSampler) Reset() { *m = ConstantSampler{} }
+func (m *ConstantSampler) String() string { return proto.CompactTextString(m) }
+func (*ConstantSampler) ProtoMessage() {}
+func (*ConstantSampler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5359209b41ff50c5, []int{2}
+}
+
+func (m *ConstantSampler) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_ConstantSampler.Unmarshal(m, b)
+}
+func (m *ConstantSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_ConstantSampler.Marshal(b, m, deterministic)
+}
+func (m *ConstantSampler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_ConstantSampler.Merge(m, src)
+}
+func (m *ConstantSampler) XXX_Size() int {
+ return xxx_messageInfo_ConstantSampler.Size(m)
+}
+func (m *ConstantSampler) XXX_DiscardUnknown() {
+ xxx_messageInfo_ConstantSampler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConstantSampler proto.InternalMessageInfo
+
+func (m *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision {
+ if m != nil {
+ return m.Decision
+ }
+ return ConstantSampler_ALWAYS_OFF
+}
+
+// Sampler that tries to sample with a rate per time window.
+type RateLimitingSampler struct {
+ // Rate per second.
+ Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"`
+ XXX_NoUnkeyedLiteral struct{} `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+ XXX_sizecache int32 `json:"-"`
+}
+
+func (m *RateLimitingSampler) Reset() { *m = RateLimitingSampler{} }
+func (m *RateLimitingSampler) String() string { return proto.CompactTextString(m) }
+func (*RateLimitingSampler) ProtoMessage() {}
+func (*RateLimitingSampler) Descriptor() ([]byte, []int) {
+ return fileDescriptor_5359209b41ff50c5, []int{3}
+}
+
+func (m *RateLimitingSampler) XXX_Unmarshal(b []byte) error {
+ return xxx_messageInfo_RateLimitingSampler.Unmarshal(m, b)
+}
+func (m *RateLimitingSampler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+ return xxx_messageInfo_RateLimitingSampler.Marshal(b, m, deterministic)
+}
+func (m *RateLimitingSampler) XXX_Merge(src proto.Message) {
+ xxx_messageInfo_RateLimitingSampler.Merge(m, src)
+}
+func (m *RateLimitingSampler) XXX_Size() int {
+ return xxx_messageInfo_RateLimitingSampler.Size(m)
+}
+func (m *RateLimitingSampler) XXX_DiscardUnknown() {
+ xxx_messageInfo_RateLimitingSampler.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RateLimitingSampler proto.InternalMessageInfo
+
+func (m *RateLimitingSampler) GetQps() int64 {
+ if m != nil {
+ return m.Qps
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("opencensus.proto.trace.v1.ConstantSampler_ConstantDecision", ConstantSampler_ConstantDecision_name, ConstantSampler_ConstantDecision_value)
+ proto.RegisterType((*TraceConfig)(nil), "opencensus.proto.trace.v1.TraceConfig")
+ proto.RegisterType((*ProbabilitySampler)(nil), "opencensus.proto.trace.v1.ProbabilitySampler")
+ proto.RegisterType((*ConstantSampler)(nil), "opencensus.proto.trace.v1.ConstantSampler")
+ proto.RegisterType((*RateLimitingSampler)(nil), "opencensus.proto.trace.v1.RateLimitingSampler")
+}
+
+func init() {
+ proto.RegisterFile("opencensus/proto/trace/v1/trace_config.proto", fileDescriptor_5359209b41ff50c5)
+}
+
+var fileDescriptor_5359209b41ff50c5 = []byte{
+ // 486 bytes of a gzipped FileDescriptorProto
+ 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0xc1, 0x4e, 0xdb, 0x40,
+ 0x10, 0x86, 0x31, 0xa1, 0x50, 0x06, 0x01, 0xee, 0x5a, 0x54, 0x46, 0xe2, 0x80, 0x7c, 0x29, 0xaa,
+ 0x6a, 0xbb, 0xd0, 0x43, 0x55, 0x55, 0xaa, 0x94, 0x00, 0x51, 0x0f, 0x69, 0x88, 0x0c, 0x52, 0xd4,
+ 0x5e, 0xdc, 0xb5, 0xd9, 0xb8, 0xab, 0xc6, 0xb3, 0xae, 0x77, 0x1d, 0xd1, 0x77, 0xe9, 0x43, 0xf4,
+ 0x11, 0xab, 0xac, 0x5d, 0xdb, 0x49, 0x00, 0x71, 0xdb, 0xf9, 0xff, 0xf9, 0x7e, 0xaf, 0xbc, 0x33,
+ 0xf0, 0x46, 0x64, 0x0c, 0x63, 0x86, 0xb2, 0x90, 0x7e, 0x96, 0x0b, 0x25, 0x7c, 0x95, 0xd3, 0x98,
+ 0xf9, 0xb3, 0xd3, 0xf2, 0x10, 0xc6, 0x02, 0x27, 0x3c, 0xf1, 0xb4, 0x47, 0x0e, 0x9b, 0xee, 0x52,
+ 0xf1, 0x74, 0x93, 0x37, 0x3b, 0x75, 0xfe, 0x6c, 0xc0, 0xce, 0xcd, 0xbc, 0x38, 0xd7, 0x00, 0xf9,
+ 0x0e, 0x56, 0x96, 0x8b, 0x88, 0x46, 0x7c, 0xca, 0xd5, 0xef, 0x50, 0xd2, 0x34, 0x9b, 0xb2, 0xdc,
+ 0x36, 0x8e, 0x8d, 0x93, 0x9d, 0x33, 0xd7, 0x7b, 0x30, 0xc8, 0x1b, 0x35, 0xd4, 0x75, 0x09, 0x7d,
+ 0x5e, 0x0b, 0x48, 0xb6, 0xa2, 0x92, 0x31, 0x98, 0xb1, 0x40, 0xa9, 0x28, 0xaa, 0x3a, 0x7e, 0x5d,
+ 0xc7, 0xbf, 0x7e, 0x24, 0xfe, 0xbc, 0x42, 0x9a, 0xec, 0xfd, 0x78, 0x51, 0x22, 0xb7, 0x70, 0x90,
+ 0x53, 0xc5, 0xc2, 0x29, 0x4f, 0xb9, 0xe2, 0x98, 0xd4, 0xe9, 0x1d, 0x9d, 0xee, 0x3d, 0x92, 0x1e,
+ 0x50, 0xc5, 0x06, 0x15, 0xd6, 0x7c, 0xc1, 0xca, 0x57, 0x65, 0xf2, 0x1e, 0xec, 0x94, 0xde, 0x85,
+ 0x58, 0xa4, 0x11, 0xcb, 0x43, 0x31, 0x09, 0xa9, 0x52, 0x39, 0x8f, 0x0a, 0xc5, 0xa4, 0xbd, 0x71,
+ 0x6c, 0x9c, 0x74, 0x82, 0x83, 0x94, 0xde, 0x0d, 0xb5, 0x7d, 0x35, 0xe9, 0xd6, 0x26, 0xf9, 0x00,
+ 0x87, 0x4b, 0x20, 0xa2, 0x50, 0x54, 0x71, 0x81, 0xd2, 0x7e, 0xa6, 0xc9, 0x97, 0x6d, 0xb2, 0x71,
+ 0xc9, 0x27, 0x38, 0x5a, 0x44, 0x53, 0x26, 0x25, 0x4d, 0x58, 0xc8, 0x66, 0x0c, 0x95, 0xb4, 0x37,
+ 0x35, 0x6d, 0xb7, 0xe8, 0x2f, 0x65, 0xc3, 0xa5, 0xf6, 0x89, 0x0b, 0xd6, 0x22, 0x3f, 0xe5, 0xf8,
+ 0x53, 0xda, 0x5b, 0x1a, 0x33, 0x5b, 0xd8, 0x60, 0xae, 0xf7, 0xb6, 0x61, 0xab, 0xfa, 0x75, 0x4e,
+ 0x1f, 0xc8, 0xea, 0xc3, 0x92, 0xb7, 0x60, 0xe9, 0x06, 0x8e, 0x49, 0xcb, 0xd5, 0x43, 0x62, 0x04,
+ 0xf7, 0x59, 0xce, 0x5f, 0x03, 0xf6, 0x97, 0x9e, 0x90, 0x8c, 0xe1, 0xf9, 0x2d, 0x8b, 0xb9, 0xe4,
+ 0x02, 0x35, 0xba, 0x77, 0xf6, 0xf1, 0xe9, 0x03, 0x50, 0xd7, 0x17, 0x55, 0x44, 0x50, 0x87, 0x39,
+ 0x17, 0x60, 0x2e, 0xbb, 0x64, 0x0f, 0xa0, 0x3b, 0x18, 0x77, 0xbf, 0x5e, 0x87, 0x57, 0xfd, 0xbe,
+ 0xb9, 0x46, 0x76, 0x61, 0xfb, 0x7f, 0x3d, 0x34, 0x0d, 0xf2, 0x02, 0x76, 0xab, 0x72, 0xd4, 0x0d,
+ 0x2e, 0x87, 0x37, 0xe6, 0xba, 0xf3, 0x0a, 0xac, 0x7b, 0xc6, 0x82, 0x98, 0xd0, 0xf9, 0x95, 0x49,
+ 0x7d, 0xe1, 0x4e, 0x30, 0x3f, 0xf6, 0x66, 0x70, 0xc4, 0xc5, 0xc3, 0x37, 0xef, 0x99, 0xad, 0xfd,
+ 0x1a, 0xcd, 0xad, 0x91, 0xf1, 0xad, 0x97, 0x70, 0xf5, 0xa3, 0x88, 0xbc, 0x58, 0xa4, 0x7e, 0x49,
+ 0xb9, 0x1c, 0xa5, 0xca, 0x8b, 0x94, 0x61, 0xf9, 0xea, 0x7e, 0x13, 0xe8, 0x96, 0x1b, 0x9e, 0x30,
+ 0x74, 0x93, 0x66, 0xd1, 0xa3, 0x4d, 0x2d, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x13, 0xe2,
+ 0xd9, 0x56, 0x0c, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/coreos/etcd/Documentation/README.md b/vendor/github.com/coreos/etcd/Documentation/README.md
deleted file mode 120000
index 8828313f..00000000
--- a/vendor/github.com/coreos/etcd/Documentation/README.md
+++ /dev/null
@@ -1 +0,0 @@
-docs.md
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/client/README.md b/vendor/github.com/coreos/etcd/client/README.md
deleted file mode 100644
index 2be731ed..00000000
--- a/vendor/github.com/coreos/etcd/client/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# etcd/client
-
-etcd/client is the Go client library for etcd.
-
-[](https://godoc.org/github.com/coreos/etcd/client)
-
-etcd uses `cmd/vendor` directory to store external dependencies, which are
-to be compiled into etcd release binaries. `client` can be imported without
-vendoring. For full compatibility, it is recommended to vendor builds using
-etcd's vendored packages, using tools like godep, as in
-[vendor directories](https://golang.org/cmd/go/#hdr-Vendor_Directories).
-For more detail, please read [Go vendor design](https://golang.org/s/go15vendor).
-
-## Install
-
-```bash
-go get github.com/coreos/etcd/client
-```
-
-## Usage
-
-```go
-package main
-
-import (
- "log"
- "time"
- "context"
-
- "github.com/coreos/etcd/client"
-)
-
-func main() {
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: client.DefaultTransport,
- // set timeout per request to fail fast when the target endpoint is unavailable
- HeaderTimeoutPerRequest: time.Second,
- }
- c, err := client.New(cfg)
- if err != nil {
- log.Fatal(err)
- }
- kapi := client.NewKeysAPI(c)
- // set "/foo" key with "bar" value
- log.Print("Setting '/foo' key with 'bar' value")
- resp, err := kapi.Set(context.Background(), "/foo", "bar", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Set is done. Metadata is %q\n", resp)
- }
- // get "/foo" key's value
- log.Print("Getting '/foo' key value")
- resp, err = kapi.Get(context.Background(), "/foo", nil)
- if err != nil {
- log.Fatal(err)
- } else {
- // print common key info
- log.Printf("Get is done. Metadata is %q\n", resp)
- // print value
- log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value)
- }
-}
-```
-
-## Error Handling
-
-etcd client might return three types of errors.
-
-- context error
-
-Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered.
-
-- cluster error
-
-Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned.
-
-- response error
-
-If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error.
-
-Here is the example code to handle client errors:
-
-```go
-cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
-c, err := client.New(cfg)
-if err != nil {
- log.Fatal(err)
-}
-
-kapi := client.NewKeysAPI(c)
-resp, err := kapi.Set(ctx, "test", "bar", nil)
-if err != nil {
- if err == context.Canceled {
- // ctx is canceled by another routine
- } else if err == context.DeadlineExceeded {
- // ctx is attached with a deadline and it exceeded
- } else if cerr, ok := err.(*client.ClusterError); ok {
- // process (cerr.Errors)
- } else {
- // bad cluster endpoints, which are not etcd servers
- }
-}
-```
-
-
-## Caveat
-
-1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process.
-
-2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened.
-
-3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention.
-
-4. etcd/client cannot detect whether a member is healthy with watches and non-quorum read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. Instead, users can either issue quorum read requests or monitor the /health endpoint for member health information.
diff --git a/vendor/github.com/coreos/etcd/client/auth_role.go b/vendor/github.com/coreos/etcd/client/auth_role.go
deleted file mode 100644
index b6ba7e15..00000000
--- a/vendor/github.com/coreos/etcd/client/auth_role.go
+++ /dev/null
@@ -1,236 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
-)
-
-type Role struct {
- Role string `json:"role"`
- Permissions Permissions `json:"permissions"`
- Grant *Permissions `json:"grant,omitempty"`
- Revoke *Permissions `json:"revoke,omitempty"`
-}
-
-type Permissions struct {
- KV rwPermission `json:"kv"`
-}
-
-type rwPermission struct {
- Read []string `json:"read"`
- Write []string `json:"write"`
-}
-
-type PermissionType int
-
-const (
- ReadPermission PermissionType = iota
- WritePermission
- ReadWritePermission
-)
-
-// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to
-// interact with etcd's role creation and modification features.
-func NewAuthRoleAPI(c Client) AuthRoleAPI {
- return &httpAuthRoleAPI{
- client: c,
- }
-}
-
-type AuthRoleAPI interface {
- // AddRole adds a role.
- AddRole(ctx context.Context, role string) error
-
- // RemoveRole removes a role.
- RemoveRole(ctx context.Context, role string) error
-
- // GetRole retrieves role details.
- GetRole(ctx context.Context, role string) (*Role, error)
-
- // GrantRoleKV grants a role some permission prefixes for the KV store.
- GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // RevokeRoleKV revokes some permission prefixes for a role on the KV store.
- RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error)
-
- // ListRoles lists roles.
- ListRoles(ctx context.Context) ([]string, error)
-}
-
-type httpAuthRoleAPI struct {
- client httpClient
-}
-
-type authRoleAPIAction struct {
- verb string
- name string
- role *Role
-}
-
-type authRoleAPIList struct{}
-
-func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "roles", l.name)
- if l.role == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.role)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
- resp, body, err := r.client.Do(ctx, &authRoleAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
- var roleList struct {
- Roles []Role `json:"roles"`
- }
- if err = json.Unmarshal(body, &roleList); err != nil {
- return nil, err
- }
- ret := make([]string, 0, len(roleList.Roles))
- for _, r := range roleList.Roles {
- ret = append(ret, r.Role)
- }
- return ret, nil
-}
-
-func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error {
- role := &Role{
- Role: rolename,
- }
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error {
- return r.addRemoveRole(ctx, &authRoleAPIAction{
- verb: "DELETE",
- name: rolename,
- })
-}
-
-func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err := json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) {
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "GET",
- name: rolename,
- })
-}
-
-func buildRWPermission(prefixes []string, permType PermissionType) rwPermission {
- var out rwPermission
- switch permType {
- case ReadPermission:
- out.Read = prefixes
- case WritePermission:
- out.Write = prefixes
- case ReadWritePermission:
- out.Read = prefixes
- out.Write = prefixes
- }
- return out
-}
-
-func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Grant: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) {
- rwp := buildRWPermission(prefixes, permType)
- role := &Role{
- Role: rolename,
- Revoke: &Permissions{
- KV: rwp,
- },
- }
- return r.modRole(ctx, &authRoleAPIAction{
- verb: "PUT",
- name: rolename,
- role: role,
- })
-}
-
-func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) {
- resp, body, err := r.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var role Role
- if err = json.Unmarshal(body, &role); err != nil {
- return nil, err
- }
- return &role, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/auth_user.go b/vendor/github.com/coreos/etcd/client/auth_user.go
deleted file mode 100644
index 8e7e2efe..00000000
--- a/vendor/github.com/coreos/etcd/client/auth_user.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "net/http"
- "net/url"
- "path"
-)
-
-var (
- defaultV2AuthPrefix = "/v2/auth"
-)
-
-type User struct {
- User string `json:"user"`
- Password string `json:"password,omitempty"`
- Roles []string `json:"roles"`
- Grant []string `json:"grant,omitempty"`
- Revoke []string `json:"revoke,omitempty"`
-}
-
-// userListEntry is the user representation given by the server for ListUsers
-type userListEntry struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-type UserRoles struct {
- User string `json:"user"`
- Roles []Role `json:"roles"`
-}
-
-func v2AuthURL(ep url.URL, action string, name string) *url.URL {
- if name != "" {
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name)
- return &ep
- }
- ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action)
- return &ep
-}
-
-// NewAuthAPI constructs a new AuthAPI that uses HTTP to
-// interact with etcd's general auth features.
-func NewAuthAPI(c Client) AuthAPI {
- return &httpAuthAPI{
- client: c,
- }
-}
-
-type AuthAPI interface {
- // Enable auth.
- Enable(ctx context.Context) error
-
- // Disable auth.
- Disable(ctx context.Context) error
-}
-
-type httpAuthAPI struct {
- client httpClient
-}
-
-func (s *httpAuthAPI) Enable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"PUT"})
-}
-
-func (s *httpAuthAPI) Disable(ctx context.Context) error {
- return s.enableDisable(ctx, &authAPIAction{"DELETE"})
-}
-
-func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
- resp, body, err := s.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-type authAPIAction struct {
- verb string
-}
-
-func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "enable", "")
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
-}
-
-type authError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e authError) Error() string {
- return e.Message
-}
-
-// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to
-// interact with etcd's user creation and modification features.
-func NewAuthUserAPI(c Client) AuthUserAPI {
- return &httpAuthUserAPI{
- client: c,
- }
-}
-
-type AuthUserAPI interface {
- // AddUser adds a user.
- AddUser(ctx context.Context, username string, password string) error
-
- // RemoveUser removes a user.
- RemoveUser(ctx context.Context, username string) error
-
- // GetUser retrieves user details.
- GetUser(ctx context.Context, username string) (*User, error)
-
- // GrantUser grants a user some permission roles.
- GrantUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // RevokeUser revokes some permission roles from a user.
- RevokeUser(ctx context.Context, username string, roles []string) (*User, error)
-
- // ChangePassword changes the user's password.
- ChangePassword(ctx context.Context, username string, password string) (*User, error)
-
- // ListUsers lists the users.
- ListUsers(ctx context.Context) ([]string, error)
-}
-
-type httpAuthUserAPI struct {
- client httpClient
-}
-
-type authUserAPIAction struct {
- verb string
- username string
- user *User
-}
-
-type authUserAPIList struct{}
-
-func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", "")
- req, _ := http.NewRequest("GET", u.String(), nil)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2AuthURL(ep, "users", l.username)
- if l.user == nil {
- req, _ := http.NewRequest(l.verb, u.String(), nil)
- return req
- }
- b, err := json.Marshal(l.user)
- if err != nil {
- panic(err)
- }
- body := bytes.NewReader(b)
- req, _ := http.NewRequest(l.verb, u.String(), body)
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
- resp, body, err := u.client.Do(ctx, &authUserAPIList{})
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
-
- var userList struct {
- Users []userListEntry `json:"users"`
- }
-
- if err = json.Unmarshal(body, &userList); err != nil {
- return nil, err
- }
-
- ret := make([]string, 0, len(userList.Users))
- for _, u := range userList.Users {
- ret = append(ret, u.User)
- }
- return ret, nil
-}
-
-func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error {
- user := &User{
- User: username,
- Password: password,
- }
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error {
- return u.addRemoveUser(ctx, &authUserAPIAction{
- verb: "DELETE",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return err
- }
- return sec
- }
- return nil
-}
-
-func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) {
- return u.modUser(ctx, &authUserAPIAction{
- verb: "GET",
- username: username,
- })
-}
-
-func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Grant: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) {
- user := &User{
- User: username,
- Revoke: roles,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) {
- user := &User{
- User: username,
- Password: password,
- }
- return u.modUser(ctx, &authUserAPIAction{
- verb: "PUT",
- username: username,
- user: user,
- })
-}
-
-func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) {
- resp, body, err := u.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
- if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- var sec authError
- err = json.Unmarshal(body, &sec)
- if err != nil {
- return nil, err
- }
- return nil, sec
- }
- var user User
- if err = json.Unmarshal(body, &user); err != nil {
- var userR UserRoles
- if urerr := json.Unmarshal(body, &userR); urerr != nil {
- return nil, err
- }
- user.User = userR.User
- for _, r := range userR.Roles {
- user.Roles = append(user.Roles, r.Role)
- }
- }
- return &user, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/cancelreq.go b/vendor/github.com/coreos/etcd/client/cancelreq.go
deleted file mode 100644
index 76d1f040..00000000
--- a/vendor/github.com/coreos/etcd/client/cancelreq.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// borrowed from golang/net/context/ctxhttp/cancelreq.go
-
-package client
-
-import "net/http"
-
-func requestCanceler(tr CancelableTransport, req *http.Request) func() {
- ch := make(chan struct{})
- req.Cancel = ch
-
- return func() {
- close(ch)
- }
-}
diff --git a/vendor/github.com/coreos/etcd/client/client.go b/vendor/github.com/coreos/etcd/client/client.go
deleted file mode 100644
index e6874505..00000000
--- a/vendor/github.com/coreos/etcd/client/client.go
+++ /dev/null
@@ -1,710 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "io/ioutil"
- "math/rand"
- "net"
- "net/http"
- "net/url"
- "sort"
- "strconv"
- "sync"
- "time"
-
- "github.com/coreos/etcd/version"
-)
-
-var (
- ErrNoEndpoints = errors.New("client: no endpoints available")
- ErrTooManyRedirects = errors.New("client: too many redirects")
- ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
- ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
- errTooManyRedirectChecks = errors.New("client: too many redirect checks")
-
- // oneShotCtxValue is set on a context using WithValue(&oneShotValue) so
- // that Do() will not retry a request
- oneShotCtxValue interface{}
-)
-
-var DefaultRequestTimeout = 5 * time.Second
-
-var DefaultTransport CancelableTransport = &http.Transport{
- Proxy: http.ProxyFromEnvironment,
- Dial: (&net.Dialer{
- Timeout: 30 * time.Second,
- KeepAlive: 30 * time.Second,
- }).Dial,
- TLSHandshakeTimeout: 10 * time.Second,
-}
-
-type EndpointSelectionMode int
-
-const (
- // EndpointSelectionRandom is the default value of the 'SelectionMode'.
- // As the name implies, the client object will pick a node from the members
- // of the cluster in a random fashion. If the cluster has three members, A, B,
- // and C, the client picks any node from its three members as its request
- // destination.
- EndpointSelectionRandom EndpointSelectionMode = iota
-
- // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader',
- // requests are sent directly to the cluster leader. This reduces
- // forwarding roundtrips compared to making requests to etcd followers
- // who then forward them to the cluster leader. In the event of a leader
- // failure, however, clients configured this way cannot prioritize among
- // the remaining etcd followers. Therefore, when a client sets 'SelectionMode'
- // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to
- // maintain its knowledge of current cluster state.
- //
- // This mode should be used with Client.AutoSync().
- EndpointSelectionPrioritizeLeader
-)
-
-type Config struct {
- // Endpoints defines a set of URLs (schemes, hosts and ports only)
- // that can be used to communicate with a logical etcd cluster. For
- // example, a three-node cluster could be provided like so:
- //
- // Endpoints: []string{
- // "http://node1.example.com:2379",
- // "http://node2.example.com:2379",
- // "http://node3.example.com:2379",
- // }
- //
- // If multiple endpoints are provided, the Client will attempt to
- // use them all in the event that one or more of them are unusable.
- //
- // If Client.Sync is ever called, the Client may cache an alternate
- // set of endpoints to continue operation.
- Endpoints []string
-
- // Transport is used by the Client to drive HTTP requests. If not
- // provided, DefaultTransport will be used.
- Transport CancelableTransport
-
- // CheckRedirect specifies the policy for handling HTTP redirects.
- // If CheckRedirect is not nil, the Client calls it before
- // following an HTTP redirect. The sole argument is the number of
- // requests that have already been made. If CheckRedirect returns
- // an error, Client.Do will not make any further requests and return
- // the error back it to the caller.
- //
- // If CheckRedirect is nil, the Client uses its default policy,
- // which is to stop after 10 consecutive requests.
- CheckRedirect CheckRedirectFunc
-
- // Username specifies the user credential to add as an authorization header
- Username string
-
- // Password is the password for the specified user to add as an authorization header
- // to the request.
- Password string
-
- // HeaderTimeoutPerRequest specifies the time limit to wait for response
- // header in a single request made by the Client. The timeout includes
- // connection time, any redirects, and header wait time.
- //
- // For non-watch GET request, server returns the response body immediately.
- // For PUT/POST/DELETE request, server will attempt to commit request
- // before responding, which is expected to take `100ms + 2 * RTT`.
- // For watch request, server returns the header immediately to notify Client
- // watch start. But if server is behind some kind of proxy, the response
- // header may be cached at proxy, and Client cannot rely on this behavior.
- //
- // Especially, wait request will ignore this timeout.
- //
- // One API call may send multiple requests to different etcd servers until it
- // succeeds. Use context of the API to specify the overall timeout.
- //
- // A HeaderTimeoutPerRequest of zero means no timeout.
- HeaderTimeoutPerRequest time.Duration
-
- // SelectionMode is an EndpointSelectionMode enum that specifies the
- // policy for choosing the etcd cluster node to which requests are sent.
- SelectionMode EndpointSelectionMode
-}
-
-func (cfg *Config) transport() CancelableTransport {
- if cfg.Transport == nil {
- return DefaultTransport
- }
- return cfg.Transport
-}
-
-func (cfg *Config) checkRedirect() CheckRedirectFunc {
- if cfg.CheckRedirect == nil {
- return DefaultCheckRedirect
- }
- return cfg.CheckRedirect
-}
-
-// CancelableTransport mimics net/http.Transport, but requires that
-// the object also support request cancellation.
-type CancelableTransport interface {
- http.RoundTripper
- CancelRequest(req *http.Request)
-}
-
-type CheckRedirectFunc func(via int) error
-
-// DefaultCheckRedirect follows up to 10 redirects, but no more.
-var DefaultCheckRedirect CheckRedirectFunc = func(via int) error {
- if via > 10 {
- return ErrTooManyRedirects
- }
- return nil
-}
-
-type Client interface {
- // Sync updates the internal cache of the etcd cluster's membership.
- Sync(context.Context) error
-
- // AutoSync periodically calls Sync() every given interval.
- // The recommended sync interval is 10 seconds to 1 minute, which does
- // not bring too much overhead to server and makes client catch up the
- // cluster change in time.
- //
- // The example to use it:
- //
- // for {
- // err := client.AutoSync(ctx, 10*time.Second)
- // if err == context.DeadlineExceeded || err == context.Canceled {
- // break
- // }
- // log.Print(err)
- // }
- AutoSync(context.Context, time.Duration) error
-
- // Endpoints returns a copy of the current set of API endpoints used
- // by Client to resolve HTTP requests. If Sync has ever been called,
- // this may differ from the initial Endpoints provided in the Config.
- Endpoints() []string
-
- // SetEndpoints sets the set of API endpoints used by Client to resolve
- // HTTP requests. If the given endpoints are not valid, an error will be
- // returned
- SetEndpoints(eps []string) error
-
- // GetVersion retrieves the current etcd server and cluster version
- GetVersion(ctx context.Context) (*version.Versions, error)
-
- httpClient
-}
-
-func New(cfg Config) (Client, error) {
- c := &httpClusterClient{
- clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
- rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
- selectionMode: cfg.SelectionMode,
- }
- if cfg.Username != "" {
- c.credentials = &credentials{
- username: cfg.Username,
- password: cfg.Password,
- }
- }
- if err := c.SetEndpoints(cfg.Endpoints); err != nil {
- return nil, err
- }
- return c, nil
-}
-
-type httpClient interface {
- Do(context.Context, httpAction) (*http.Response, []byte, error)
-}
-
-func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory {
- return func(ep url.URL) httpClient {
- return &redirectFollowingHTTPClient{
- checkRedirect: cr,
- client: &simpleHTTPClient{
- transport: tr,
- endpoint: ep,
- headerTimeout: headerTimeout,
- },
- }
- }
-}
-
-type credentials struct {
- username string
- password string
-}
-
-type httpClientFactory func(url.URL) httpClient
-
-type httpAction interface {
- HTTPRequest(url.URL) *http.Request
-}
-
-type httpClusterClient struct {
- clientFactory httpClientFactory
- endpoints []url.URL
- pinned int
- credentials *credentials
- sync.RWMutex
- rand *rand.Rand
- selectionMode EndpointSelectionMode
-}
-
-func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
- ceps := make([]url.URL, len(eps))
- copy(ceps, eps)
-
- // To perform a lookup on the new endpoint list without using the current
- // client, we'll copy it
- clientCopy := &httpClusterClient{
- clientFactory: c.clientFactory,
- credentials: c.credentials,
- rand: c.rand,
-
- pinned: 0,
- endpoints: ceps,
- }
-
- mAPI := NewMembersAPI(clientCopy)
- leader, err := mAPI.Leader(ctx)
- if err != nil {
- return "", err
- }
- if len(leader.ClientURLs) == 0 {
- return "", ErrNoLeaderEndpoint
- }
-
- return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
-}
-
-func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
- if len(eps) == 0 {
- return []url.URL{}, ErrNoEndpoints
- }
-
- neps := make([]url.URL, len(eps))
- for i, ep := range eps {
- u, err := url.Parse(ep)
- if err != nil {
- return []url.URL{}, err
- }
- neps[i] = *u
- }
- return neps, nil
-}
-
-func (c *httpClusterClient) SetEndpoints(eps []string) error {
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- c.Lock()
- defer c.Unlock()
-
- c.endpoints = shuffleEndpoints(c.rand, neps)
- // We're not doing anything for PrioritizeLeader here. This is
- // due to not having a context meaning we can't call getLeaderEndpoint
- // However, if you're using PrioritizeLeader, you've already been told
- // to regularly call sync, where we do have a ctx, and can figure the
- // leader. PrioritizeLeader is also quite a loose guarantee, so deal
- // with it
- c.pinned = 0
-
- return nil
-}
-
-func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- action := act
- c.RLock()
- leps := len(c.endpoints)
- eps := make([]url.URL, leps)
- n := copy(eps, c.endpoints)
- pinned := c.pinned
-
- if c.credentials != nil {
- action = &authedAction{
- act: act,
- credentials: *c.credentials,
- }
- }
- c.RUnlock()
-
- if leps == 0 {
- return nil, nil, ErrNoEndpoints
- }
-
- if leps != n {
- return nil, nil, errors.New("unable to pick endpoint: copy failed")
- }
-
- var resp *http.Response
- var body []byte
- var err error
- cerr := &ClusterError{}
- isOneShot := ctx.Value(&oneShotCtxValue) != nil
-
- for i := pinned; i < leps+pinned; i++ {
- k := i % leps
- hc := c.clientFactory(eps[k])
- resp, body, err = hc.Do(ctx, action)
- if err != nil {
- cerr.Errors = append(cerr.Errors, err)
- if err == ctx.Err() {
- return nil, nil, ctx.Err()
- }
- if err == context.Canceled || err == context.DeadlineExceeded {
- return nil, nil, err
- }
- } else if resp.StatusCode/100 == 5 {
- switch resp.StatusCode {
- case http.StatusInternalServerError, http.StatusServiceUnavailable:
- // TODO: make sure this is a no leader response
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String()))
- default:
- cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode)))
- }
- err = cerr.Errors[0]
- }
- if err != nil {
- if !isOneShot {
- continue
- }
- c.Lock()
- c.pinned = (k + 1) % leps
- c.Unlock()
- return nil, nil, err
- }
- if k != pinned {
- c.Lock()
- c.pinned = k
- c.Unlock()
- }
- return resp, body, nil
- }
-
- return nil, nil, cerr
-}
-
-func (c *httpClusterClient) Endpoints() []string {
- c.RLock()
- defer c.RUnlock()
-
- eps := make([]string, len(c.endpoints))
- for i, ep := range c.endpoints {
- eps[i] = ep.String()
- }
-
- return eps
-}
-
-func (c *httpClusterClient) Sync(ctx context.Context) error {
- mAPI := NewMembersAPI(c)
- ms, err := mAPI.List(ctx)
- if err != nil {
- return err
- }
-
- var eps []string
- for _, m := range ms {
- eps = append(eps, m.ClientURLs...)
- }
-
- neps, err := c.parseEndpoints(eps)
- if err != nil {
- return err
- }
-
- npin := 0
-
- switch c.selectionMode {
- case EndpointSelectionRandom:
- c.RLock()
- eq := endpointsEqual(c.endpoints, neps)
- c.RUnlock()
-
- if eq {
- return nil
- }
- // When items in the endpoint list changes, we choose a new pin
- neps = shuffleEndpoints(c.rand, neps)
- case EndpointSelectionPrioritizeLeader:
- nle, err := c.getLeaderEndpoint(ctx, neps)
- if err != nil {
- return ErrNoLeaderEndpoint
- }
-
- for i, n := range neps {
- if n.String() == nle {
- npin = i
- break
- }
- }
- default:
- return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
- }
-
- c.Lock()
- defer c.Unlock()
- c.endpoints = neps
- c.pinned = npin
-
- return nil
-}
-
-func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
- ticker := time.NewTicker(interval)
- defer ticker.Stop()
- for {
- err := c.Sync(ctx)
- if err != nil {
- return err
- }
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-ticker.C:
- }
- }
-}
-
-func (c *httpClusterClient) GetVersion(ctx context.Context) (*version.Versions, error) {
- act := &getAction{Prefix: "/version"}
-
- resp, body, err := c.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- switch resp.StatusCode {
- case http.StatusOK:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- var vresp version.Versions
- if err := json.Unmarshal(body, &vresp); err != nil {
- return nil, ErrInvalidJSON
- }
- return &vresp, nil
- default:
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return nil, ErrInvalidJSON
- }
- return nil, etcdErr
- }
-}
-
-type roundTripResponse struct {
- resp *http.Response
- err error
-}
-
-type simpleHTTPClient struct {
- transport CancelableTransport
- endpoint url.URL
- headerTimeout time.Duration
-}
-
-func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- req := act.HTTPRequest(c.endpoint)
-
- if err := printcURL(req); err != nil {
- return nil, nil, err
- }
-
- isWait := false
- if req != nil && req.URL != nil {
- ws := req.URL.Query().Get("wait")
- if len(ws) != 0 {
- var err error
- isWait, err = strconv.ParseBool(ws)
- if err != nil {
- return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req)
- }
- }
- }
-
- var hctx context.Context
- var hcancel context.CancelFunc
- if !isWait && c.headerTimeout > 0 {
- hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout)
- } else {
- hctx, hcancel = context.WithCancel(ctx)
- }
- defer hcancel()
-
- reqcancel := requestCanceler(c.transport, req)
-
- rtchan := make(chan roundTripResponse, 1)
- go func() {
- resp, err := c.transport.RoundTrip(req)
- rtchan <- roundTripResponse{resp: resp, err: err}
- close(rtchan)
- }()
-
- var resp *http.Response
- var err error
-
- select {
- case rtresp := <-rtchan:
- resp, err = rtresp.resp, rtresp.err
- case <-hctx.Done():
- // cancel and wait for request to actually exit before continuing
- reqcancel()
- rtresp := <-rtchan
- resp = rtresp.resp
- switch {
- case ctx.Err() != nil:
- err = ctx.Err()
- case hctx.Err() != nil:
- err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String())
- default:
- panic("failed to get error from context")
- }
- }
-
- // always check for resp nil-ness to deal with possible
- // race conditions between channels above
- defer func() {
- if resp != nil {
- resp.Body.Close()
- }
- }()
-
- if err != nil {
- return nil, nil, err
- }
-
- var body []byte
- done := make(chan struct{})
- go func() {
- body, err = ioutil.ReadAll(resp.Body)
- done <- struct{}{}
- }()
-
- select {
- case <-ctx.Done():
- resp.Body.Close()
- <-done
- return nil, nil, ctx.Err()
- case <-done:
- }
-
- return resp, body, err
-}
-
-type authedAction struct {
- act httpAction
- credentials credentials
-}
-
-func (a *authedAction) HTTPRequest(url url.URL) *http.Request {
- r := a.act.HTTPRequest(url)
- r.SetBasicAuth(a.credentials.username, a.credentials.password)
- return r
-}
-
-type redirectFollowingHTTPClient struct {
- client httpClient
- checkRedirect CheckRedirectFunc
-}
-
-func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) {
- next := act
- for i := 0; i < 100; i++ {
- if i > 0 {
- if err := r.checkRedirect(i); err != nil {
- return nil, nil, err
- }
- }
- resp, body, err := r.client.Do(ctx, next)
- if err != nil {
- return nil, nil, err
- }
- if resp.StatusCode/100 == 3 {
- hdr := resp.Header.Get("Location")
- if hdr == "" {
- return nil, nil, fmt.Errorf("Location header not set")
- }
- loc, err := url.Parse(hdr)
- if err != nil {
- return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr)
- }
- next = &redirectedHTTPAction{
- action: act,
- location: *loc,
- }
- continue
- }
- return resp, body, nil
- }
-
- return nil, nil, errTooManyRedirectChecks
-}
-
-type redirectedHTTPAction struct {
- action httpAction
- location url.URL
-}
-
-func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request {
- orig := r.action.HTTPRequest(ep)
- orig.URL = &r.location
- return orig
-}
-
-func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
- // copied from Go 1.9<= rand.Rand.Perm
- n := len(eps)
- p := make([]int, n)
- for i := 0; i < n; i++ {
- j := r.Intn(i + 1)
- p[i] = p[j]
- p[j] = i
- }
- neps := make([]url.URL, n)
- for i, k := range p {
- neps[i] = eps[k]
- }
- return neps
-}
-
-func endpointsEqual(left, right []url.URL) bool {
- if len(left) != len(right) {
- return false
- }
-
- sLeft := make([]string, len(left))
- sRight := make([]string, len(right))
- for i, l := range left {
- sLeft[i] = l.String()
- }
- for i, r := range right {
- sRight[i] = r.String()
- }
-
- sort.Strings(sLeft)
- sort.Strings(sRight)
- for i := range sLeft {
- if sLeft[i] != sRight[i] {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/coreos/etcd/client/cluster_error.go b/vendor/github.com/coreos/etcd/client/cluster_error.go
deleted file mode 100644
index 34618cdb..00000000
--- a/vendor/github.com/coreos/etcd/client/cluster_error.go
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import "fmt"
-
-type ClusterError struct {
- Errors []error
-}
-
-func (ce *ClusterError) Error() string {
- s := ErrClusterUnavailable.Error()
- for i, e := range ce.Errors {
- s += fmt.Sprintf("; error #%d: %s\n", i, e)
- }
- return s
-}
-
-func (ce *ClusterError) Detail() string {
- s := ""
- for i, e := range ce.Errors {
- s += fmt.Sprintf("error #%d: %s\n", i, e)
- }
- return s
-}
diff --git a/vendor/github.com/coreos/etcd/client/curl.go b/vendor/github.com/coreos/etcd/client/curl.go
deleted file mode 100644
index c8bc9fba..00000000
--- a/vendor/github.com/coreos/etcd/client/curl.go
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "fmt"
- "io/ioutil"
- "net/http"
- "os"
-)
-
-var (
- cURLDebug = false
-)
-
-func EnablecURLDebug() {
- cURLDebug = true
-}
-
-func DisablecURLDebug() {
- cURLDebug = false
-}
-
-// printcURL prints the cURL equivalent request to stderr.
-// It returns an error if the body of the request cannot
-// be read.
-// The caller MUST cancel the request if there is an error.
-func printcURL(req *http.Request) error {
- if !cURLDebug {
- return nil
- }
- var (
- command string
- b []byte
- err error
- )
-
- if req.URL != nil {
- command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String())
- }
-
- if req.Body != nil {
- b, err = ioutil.ReadAll(req.Body)
- if err != nil {
- return err
- }
- command += fmt.Sprintf(" -d %q", string(b))
- }
-
- fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command)
-
- // reset body
- body := bytes.NewBuffer(b)
- req.Body = ioutil.NopCloser(body)
-
- return nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/discover.go b/vendor/github.com/coreos/etcd/client/discover.go
deleted file mode 100644
index 442e35fe..00000000
--- a/vendor/github.com/coreos/etcd/client/discover.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "github.com/coreos/etcd/pkg/srv"
-)
-
-// Discoverer is an interface that wraps the Discover method.
-type Discoverer interface {
- // Discover looks up the etcd servers for the domain.
- Discover(domain string) ([]string, error)
-}
-
-type srvDiscover struct{}
-
-// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records.
-func NewSRVDiscover() Discoverer {
- return &srvDiscover{}
-}
-
-func (d *srvDiscover) Discover(domain string) ([]string, error) {
- srvs, err := srv.GetClient("etcd-client", domain)
- if err != nil {
- return nil, err
- }
- return srvs.Endpoints, nil
-}
diff --git a/vendor/github.com/coreos/etcd/client/doc.go b/vendor/github.com/coreos/etcd/client/doc.go
deleted file mode 100644
index ad4eca4e..00000000
--- a/vendor/github.com/coreos/etcd/client/doc.go
+++ /dev/null
@@ -1,73 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-/*
-Package client provides bindings for the etcd APIs.
-
-Create a Config and exchange it for a Client:
-
- import (
- "net/http"
- "context"
-
- "github.com/coreos/etcd/client"
- )
-
- cfg := client.Config{
- Endpoints: []string{"http://127.0.0.1:2379"},
- Transport: DefaultTransport,
- }
-
- c, err := client.New(cfg)
- if err != nil {
- // handle error
- }
-
-Clients are safe for concurrent use by multiple goroutines.
-
-Create a KeysAPI using the Client, then use it to interact with etcd:
-
- kAPI := client.NewKeysAPI(c)
-
- // create a new key /foo with the value "bar"
- _, err = kAPI.Create(context.Background(), "/foo", "bar")
- if err != nil {
- // handle error
- }
-
- // delete the newly created key only if the value is still "bar"
- _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"})
- if err != nil {
- // handle error
- }
-
-Use a custom context to set timeouts on your operations:
-
- import "time"
-
- ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
- defer cancel()
-
- // set a new key, ignoring its previous state
- _, err := kAPI.Set(ctx, "/ping", "pong", nil)
- if err != nil {
- if err == context.DeadlineExceeded {
- // request took longer than 5s
- } else {
- // handle error
- }
- }
-
-*/
-package client
diff --git a/vendor/github.com/coreos/etcd/client/keys.generated.go b/vendor/github.com/coreos/etcd/client/keys.generated.go
deleted file mode 100644
index 237fdbe8..00000000
--- a/vendor/github.com/coreos/etcd/client/keys.generated.go
+++ /dev/null
@@ -1,5218 +0,0 @@
-// ************************************************************
-// DO NOT EDIT.
-// THIS FILE IS AUTO-GENERATED BY codecgen.
-// ************************************************************
-
-package client
-
-import (
- "errors"
- "fmt"
- "reflect"
- "runtime"
- time "time"
-
- codec1978 "github.com/ugorji/go/codec"
-)
-
-const (
- // ----- content types ----
- codecSelferC_UTF87612 = 1
- codecSelferC_RAW7612 = 0
- // ----- value types used ----
- codecSelferValueTypeArray7612 = 10
- codecSelferValueTypeMap7612 = 9
- // ----- containerStateValues ----
- codecSelfer_containerMapKey7612 = 2
- codecSelfer_containerMapValue7612 = 3
- codecSelfer_containerMapEnd7612 = 4
- codecSelfer_containerArrayElem7612 = 6
- codecSelfer_containerArrayEnd7612 = 7
-)
-
-var (
- codecSelferBitsize7612 = uint8(reflect.TypeOf(uint(0)).Bits())
- codecSelferOnlyMapOrArrayEncodeToStructErr7612 = errors.New(`only encoded map or array can be decoded into a struct`)
-)
-
-type codecSelfer7612 struct{}
-
-func init() {
- if codec1978.GenVersion != 8 {
- _, file, _, _ := runtime.Caller(0)
- err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v",
- 8, codec1978.GenVersion, file)
- panic(err)
- }
- if false { // reference the types, but skip this branch at build/run time
- var v0 time.Duration
- _ = v0
- }
-}
-
-func (x *Error) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeInt(int64(x.Code))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("errorCode"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeInt(int64(x.Code))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Message))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("message"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Message))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Cause))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("cause"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Cause))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeUint(uint64(x.Index))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("index"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeUint(uint64(x.Index))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *Error) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *Error) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "errorCode":
- if r.TryDecodeAsNil() {
- x.Code = 0
- } else {
- yyv4 := &x.Code
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*int)(yyv4)) = int(r.DecodeInt(codecSelferBitsize7612))
- }
- }
- case "message":
- if r.TryDecodeAsNil() {
- x.Message = ""
- } else {
- yyv6 := &x.Message
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "cause":
- if r.TryDecodeAsNil() {
- x.Cause = ""
- } else {
- yyv8 := &x.Cause
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "index":
- if r.TryDecodeAsNil() {
- x.Index = 0
- } else {
- yyv10 := &x.Index
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*uint64)(yyv10)) = uint64(r.DecodeUint(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *Error) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Code = 0
- } else {
- yyv13 := &x.Code
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*int)(yyv13)) = int(r.DecodeInt(codecSelferBitsize7612))
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Message = ""
- } else {
- yyv15 := &x.Message
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Cause = ""
- } else {
- yyv17 := &x.Cause
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Index = 0
- } else {
- yyv19 := &x.Index
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*uint64)(yyv19)) = uint64(r.DecodeUint(64))
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x PrevExistType) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x))
- }
-}
-
-func (x *PrevExistType) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- *((*string)(x)) = r.DecodeString()
- }
-}
-
-func (x *WatcherOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(2)
- } else {
- r.WriteMapStart(2)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeUint(uint64(x.AfterIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("AfterIndex"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeUint(uint64(x.AfterIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *WatcherOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *WatcherOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "AfterIndex":
- if r.TryDecodeAsNil() {
- x.AfterIndex = 0
- } else {
- yyv4 := &x.AfterIndex
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*uint64)(yyv4)) = uint64(r.DecodeUint(64))
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv6 := &x.Recursive
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*bool)(yyv6)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *WatcherOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj8 int
- var yyb8 bool
- var yyhl8 bool = l >= 0
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.AfterIndex = 0
- } else {
- yyv9 := &x.AfterIndex
- yym10 := z.DecBinary()
- _ = yym10
- if false {
- } else {
- *((*uint64)(yyv9)) = uint64(r.DecodeUint(64))
- }
- }
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv11 := &x.Recursive
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*bool)(yyv11)) = r.DecodeBool()
- }
- }
- for {
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj8-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *CreateInOrderOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(1)
- } else {
- r.WriteMapStart(1)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *CreateInOrderOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *CreateInOrderOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv4 := &x.TTL
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv4) {
- } else {
- *((*int64)(yyv4)) = int64(r.DecodeInt(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *CreateInOrderOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj6 int
- var yyb6 bool
- var yyhl6 bool = l >= 0
- yyj6++
- if yyhl6 {
- yyb6 = yyj6 > l
- } else {
- yyb6 = r.CheckBreak()
- }
- if yyb6 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv7 := &x.TTL
- yym8 := z.DecBinary()
- _ = yym8
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv7) {
- } else {
- *((*int64)(yyv7)) = int64(r.DecodeInt(64))
- }
- }
- for {
- yyj6++
- if yyhl6 {
- yyb6 = yyj6 > l
- } else {
- yyb6 = r.CheckBreak()
- }
- if yyb6 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj6-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *SetOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(7)
- } else {
- r.WriteMapStart(7)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- x.PrevExist.CodecEncodeSelf(e)
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevExist"))
- r.WriteMapElemValue()
- x.PrevExist.CodecEncodeSelf(e)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Refresh"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym19 := z.EncBinary()
- _ = yym19
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym20 := z.EncBinary()
- _ = yym20
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym22 := z.EncBinary()
- _ = yym22
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess"))
- r.WriteMapElemValue()
- yym23 := z.EncBinary()
- _ = yym23
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *SetOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *SetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv4 := &x.PrevValue
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv6 := &x.PrevIndex
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*uint64)(yyv6)) = uint64(r.DecodeUint(64))
- }
- }
- case "PrevExist":
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv8 := &x.PrevExist
- yyv8.CodecDecodeSelf(d)
- }
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv9 := &x.TTL
- yym10 := z.DecBinary()
- _ = yym10
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv9) {
- } else {
- *((*int64)(yyv9)) = int64(r.DecodeInt(64))
- }
- }
- case "Refresh":
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv11 := &x.Refresh
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*bool)(yyv11)) = r.DecodeBool()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv13 := &x.Dir
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*bool)(yyv13)) = r.DecodeBool()
- }
- }
- case "NoValueOnSuccess":
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv15 := &x.NoValueOnSuccess
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*bool)(yyv15)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *SetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj17 int
- var yyb17 bool
- var yyhl17 bool = l >= 0
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv18 := &x.PrevValue
- yym19 := z.DecBinary()
- _ = yym19
- if false {
- } else {
- *((*string)(yyv18)) = r.DecodeString()
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv20 := &x.PrevIndex
- yym21 := z.DecBinary()
- _ = yym21
- if false {
- } else {
- *((*uint64)(yyv20)) = uint64(r.DecodeUint(64))
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv22 := &x.PrevExist
- yyv22.CodecDecodeSelf(d)
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv23 := &x.TTL
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv23) {
- } else {
- *((*int64)(yyv23)) = int64(r.DecodeInt(64))
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv25 := &x.Refresh
- yym26 := z.DecBinary()
- _ = yym26
- if false {
- } else {
- *((*bool)(yyv25)) = r.DecodeBool()
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv27 := &x.Dir
- yym28 := z.DecBinary()
- _ = yym28
- if false {
- } else {
- *((*bool)(yyv27)) = r.DecodeBool()
- }
- }
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv29 := &x.NoValueOnSuccess
- yym30 := z.DecBinary()
- _ = yym30
- if false {
- } else {
- *((*bool)(yyv29)) = r.DecodeBool()
- }
- }
- for {
- yyj17++
- if yyhl17 {
- yyb17 = yyj17 > l
- } else {
- yyb17 = r.CheckBreak()
- }
- if yyb17 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj17-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *GetOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(3)
- } else {
- r.WriteMapStart(3)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeBool(bool(x.Sort))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Sort"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeBool(bool(x.Sort))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Quorum"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *GetOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *GetOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv4 := &x.Recursive
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*bool)(yyv4)) = r.DecodeBool()
- }
- }
- case "Sort":
- if r.TryDecodeAsNil() {
- x.Sort = false
- } else {
- yyv6 := &x.Sort
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*bool)(yyv6)) = r.DecodeBool()
- }
- }
- case "Quorum":
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv8 := &x.Quorum
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*bool)(yyv8)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *GetOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj10 int
- var yyb10 bool
- var yyhl10 bool = l >= 0
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv11 := &x.Recursive
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*bool)(yyv11)) = r.DecodeBool()
- }
- }
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Sort = false
- } else {
- yyv13 := &x.Sort
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*bool)(yyv13)) = r.DecodeBool()
- }
- }
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv15 := &x.Quorum
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*bool)(yyv15)) = r.DecodeBool()
- }
- }
- for {
- yyj10++
- if yyhl10 {
- yyb10 = yyj10 > l
- } else {
- yyb10 = r.CheckBreak()
- }
- if yyb10 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj10-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *DeleteOptions) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *DeleteOptions) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *DeleteOptions) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv4 := &x.PrevValue
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv6 := &x.PrevIndex
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*uint64)(yyv6)) = uint64(r.DecodeUint(64))
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv8 := &x.Recursive
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*bool)(yyv8)) = r.DecodeBool()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv10 := &x.Dir
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*bool)(yyv10)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *DeleteOptions) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv13 := &x.PrevValue
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*string)(yyv13)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv15 := &x.PrevIndex
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*uint64)(yyv15)) = uint64(r.DecodeUint(64))
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv17 := &x.Recursive
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*bool)(yyv17)) = r.DecodeBool()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv19 := &x.Dir
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(3)
- } else {
- r.WriteMapStart(3)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Action))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("action"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Action))
- }
- }
- var yyn6 bool
- if x.Node == nil {
- yyn6 = true
- goto LABEL6
- }
- LABEL6:
- if yyr2 || yy2arr2 {
- if yyn6 {
- r.WriteArrayElem()
- r.EncodeNil()
- } else {
- r.WriteArrayElem()
- if x.Node == nil {
- r.EncodeNil()
- } else {
- x.Node.CodecEncodeSelf(e)
- }
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("node"))
- r.WriteMapElemValue()
- if yyn6 {
- r.EncodeNil()
- } else {
- if x.Node == nil {
- r.EncodeNil()
- } else {
- x.Node.CodecEncodeSelf(e)
- }
- }
- }
- var yyn9 bool
- if x.PrevNode == nil {
- yyn9 = true
- goto LABEL9
- }
- LABEL9:
- if yyr2 || yy2arr2 {
- if yyn9 {
- r.WriteArrayElem()
- r.EncodeNil()
- } else {
- r.WriteArrayElem()
- if x.PrevNode == nil {
- r.EncodeNil()
- } else {
- x.PrevNode.CodecEncodeSelf(e)
- }
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("prevNode"))
- r.WriteMapElemValue()
- if yyn9 {
- r.EncodeNil()
- } else {
- if x.PrevNode == nil {
- r.EncodeNil()
- } else {
- x.PrevNode.CodecEncodeSelf(e)
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "action":
- if r.TryDecodeAsNil() {
- x.Action = ""
- } else {
- yyv4 := &x.Action
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "node":
- if x.Node == nil {
- x.Node = new(Node)
- }
- if r.TryDecodeAsNil() {
- if x.Node != nil {
- x.Node = nil
- }
- } else {
- if x.Node == nil {
- x.Node = new(Node)
- }
- x.Node.CodecDecodeSelf(d)
- }
- case "prevNode":
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- if r.TryDecodeAsNil() {
- if x.PrevNode != nil {
- x.PrevNode = nil
- }
- } else {
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- x.PrevNode.CodecDecodeSelf(d)
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj8 int
- var yyb8 bool
- var yyhl8 bool = l >= 0
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Action = ""
- } else {
- yyv9 := &x.Action
- yym10 := z.DecBinary()
- _ = yym10
- if false {
- } else {
- *((*string)(yyv9)) = r.DecodeString()
- }
- }
- if x.Node == nil {
- x.Node = new(Node)
- }
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- if x.Node != nil {
- x.Node = nil
- }
- } else {
- if x.Node == nil {
- x.Node = new(Node)
- }
- x.Node.CodecDecodeSelf(d)
- }
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- if x.PrevNode != nil {
- x.PrevNode = nil
- }
- } else {
- if x.PrevNode == nil {
- x.PrevNode = new(Node)
- }
- x.PrevNode.CodecDecodeSelf(d)
- }
- for {
- yyj8++
- if yyhl8 {
- yyb8 = yyj8 > l
- } else {
- yyb8 = r.CheckBreak()
- }
- if yyb8 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj8-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- var yyq2 [8]bool
- _ = yyq2
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- yyq2[1] = x.Dir != false
- yyq2[6] = x.Expiration != nil
- yyq2[7] = x.TTL != 0
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(8)
- } else {
- var yynn2 = 5
- for _, b := range yyq2 {
- if b {
- yynn2++
- }
- }
- r.WriteMapStart(yynn2)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("key"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- if yyq2[1] {
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.EncodeBool(false)
- }
- } else {
- if yyq2[1] {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("dir"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("value"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- if x.Nodes == nil {
- r.EncodeNil()
- } else {
- x.Nodes.CodecEncodeSelf(e)
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("nodes"))
- r.WriteMapElemValue()
- if x.Nodes == nil {
- r.EncodeNil()
- } else {
- x.Nodes.CodecEncodeSelf(e)
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeUint(uint64(x.CreatedIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("createdIndex"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeUint(uint64(x.CreatedIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym19 := z.EncBinary()
- _ = yym19
- if false {
- } else {
- r.EncodeUint(uint64(x.ModifiedIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("modifiedIndex"))
- r.WriteMapElemValue()
- yym20 := z.EncBinary()
- _ = yym20
- if false {
- } else {
- r.EncodeUint(uint64(x.ModifiedIndex))
- }
- }
- var yyn21 bool
- if x.Expiration == nil {
- yyn21 = true
- goto LABEL21
- }
- LABEL21:
- if yyr2 || yy2arr2 {
- if yyn21 {
- r.WriteArrayElem()
- r.EncodeNil()
- } else {
- r.WriteArrayElem()
- if yyq2[6] {
- if x.Expiration == nil {
- r.EncodeNil()
- } else {
- yym22 := z.EncBinary()
- _ = yym22
- if false {
- } else if yym23 := z.TimeRtidIfBinc(); yym23 != 0 {
- r.EncodeBuiltin(yym23, x.Expiration)
- } else if z.HasExtensions() && z.EncExt(x.Expiration) {
- } else if yym22 {
- z.EncBinaryMarshal(x.Expiration)
- } else if !yym22 && z.IsJSONHandle() {
- z.EncJSONMarshal(x.Expiration)
- } else {
- z.EncFallback(x.Expiration)
- }
- }
- } else {
- r.EncodeNil()
- }
- }
- } else {
- if yyq2[6] {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("expiration"))
- r.WriteMapElemValue()
- if yyn21 {
- r.EncodeNil()
- } else {
- if x.Expiration == nil {
- r.EncodeNil()
- } else {
- yym24 := z.EncBinary()
- _ = yym24
- if false {
- } else if yym25 := z.TimeRtidIfBinc(); yym25 != 0 {
- r.EncodeBuiltin(yym25, x.Expiration)
- } else if z.HasExtensions() && z.EncExt(x.Expiration) {
- } else if yym24 {
- z.EncBinaryMarshal(x.Expiration)
- } else if !yym24 && z.IsJSONHandle() {
- z.EncJSONMarshal(x.Expiration)
- } else {
- z.EncFallback(x.Expiration)
- }
- }
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- if yyq2[7] {
- yym27 := z.EncBinary()
- _ = yym27
- if false {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.EncodeInt(0)
- }
- } else {
- if yyq2[7] {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("ttl"))
- r.WriteMapElemValue()
- yym28 := z.EncBinary()
- _ = yym28
- if false {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv4 := &x.Key
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv6 := &x.Dir
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*bool)(yyv6)) = r.DecodeBool()
- }
- }
- case "value":
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv8 := &x.Value
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "nodes":
- if r.TryDecodeAsNil() {
- x.Nodes = nil
- } else {
- yyv10 := &x.Nodes
- yyv10.CodecDecodeSelf(d)
- }
- case "createdIndex":
- if r.TryDecodeAsNil() {
- x.CreatedIndex = 0
- } else {
- yyv11 := &x.CreatedIndex
- yym12 := z.DecBinary()
- _ = yym12
- if false {
- } else {
- *((*uint64)(yyv11)) = uint64(r.DecodeUint(64))
- }
- }
- case "modifiedIndex":
- if r.TryDecodeAsNil() {
- x.ModifiedIndex = 0
- } else {
- yyv13 := &x.ModifiedIndex
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*uint64)(yyv13)) = uint64(r.DecodeUint(64))
- }
- }
- case "expiration":
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- if r.TryDecodeAsNil() {
- if x.Expiration != nil {
- x.Expiration = nil
- }
- } else {
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else if yym17 := z.TimeRtidIfBinc(); yym17 != 0 {
- r.DecodeBuiltin(yym17, x.Expiration)
- } else if z.HasExtensions() && z.DecExt(x.Expiration) {
- } else if yym16 {
- z.DecBinaryUnmarshal(x.Expiration)
- } else if !yym16 && z.IsJSONHandle() {
- z.DecJSONUnmarshal(x.Expiration)
- } else {
- z.DecFallback(x.Expiration, false)
- }
- }
- case "ttl":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv18 := &x.TTL
- yym19 := z.DecBinary()
- _ = yym19
- if false {
- } else {
- *((*int64)(yyv18)) = int64(r.DecodeInt(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj20 int
- var yyb20 bool
- var yyhl20 bool = l >= 0
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv21 := &x.Key
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*string)(yyv21)) = r.DecodeString()
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv23 := &x.Dir
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else {
- *((*bool)(yyv23)) = r.DecodeBool()
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv25 := &x.Value
- yym26 := z.DecBinary()
- _ = yym26
- if false {
- } else {
- *((*string)(yyv25)) = r.DecodeString()
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Nodes = nil
- } else {
- yyv27 := &x.Nodes
- yyv27.CodecDecodeSelf(d)
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.CreatedIndex = 0
- } else {
- yyv28 := &x.CreatedIndex
- yym29 := z.DecBinary()
- _ = yym29
- if false {
- } else {
- *((*uint64)(yyv28)) = uint64(r.DecodeUint(64))
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.ModifiedIndex = 0
- } else {
- yyv30 := &x.ModifiedIndex
- yym31 := z.DecBinary()
- _ = yym31
- if false {
- } else {
- *((*uint64)(yyv30)) = uint64(r.DecodeUint(64))
- }
- }
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- if x.Expiration != nil {
- x.Expiration = nil
- }
- } else {
- if x.Expiration == nil {
- x.Expiration = new(time.Time)
- }
- yym33 := z.DecBinary()
- _ = yym33
- if false {
- } else if yym34 := z.TimeRtidIfBinc(); yym34 != 0 {
- r.DecodeBuiltin(yym34, x.Expiration)
- } else if z.HasExtensions() && z.DecExt(x.Expiration) {
- } else if yym33 {
- z.DecBinaryUnmarshal(x.Expiration)
- } else if !yym33 && z.IsJSONHandle() {
- z.DecJSONUnmarshal(x.Expiration)
- } else {
- z.DecFallback(x.Expiration, false)
- }
- }
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv35 := &x.TTL
- yym36 := z.DecBinary()
- _ = yym36
- if false {
- } else {
- *((*int64)(yyv35)) = int64(r.DecodeInt(64))
- }
- }
- for {
- yyj20++
- if yyhl20 {
- yyb20 = yyj20 > l
- } else {
- yyb20 = r.CheckBreak()
- }
- if yyb20 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj20-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- h.encNodes((Nodes)(x), e)
- }
- }
-}
-
-func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- h.decNodes((*Nodes)(x), d)
- }
-}
-
-func (x *httpKeysAPI) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(0)
- } else {
- r.WriteMapStart(0)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *httpKeysAPI) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *httpKeysAPI) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *httpKeysAPI) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj4 int
- var yyb4 bool
- var yyhl4 bool = l >= 0
- for {
- yyj4++
- if yyhl4 {
- yyb4 = yyj4 > l
- } else {
- yyb4 = r.CheckBreak()
- }
- if yyb4 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj4-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *httpWatcher) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(0)
- } else {
- r.WriteMapStart(0)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *httpWatcher) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *httpWatcher) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *httpWatcher) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj4 int
- var yyb4 bool
- var yyhl4 bool = l >= 0
- for {
- yyj4++
- if yyhl4 {
- yyb4 = yyj4 > l
- } else {
- yyb4 = r.CheckBreak()
- }
- if yyb4 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj4-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *getAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(5)
- } else {
- r.WriteMapStart(5)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeBool(bool(x.Sorted))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Sorted"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeBool(bool(x.Sorted))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Quorum"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeBool(bool(x.Quorum))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *getAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *getAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv8 := &x.Recursive
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*bool)(yyv8)) = r.DecodeBool()
- }
- }
- case "Sorted":
- if r.TryDecodeAsNil() {
- x.Sorted = false
- } else {
- yyv10 := &x.Sorted
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*bool)(yyv10)) = r.DecodeBool()
- }
- }
- case "Quorum":
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv12 := &x.Quorum
- yym13 := z.DecBinary()
- _ = yym13
- if false {
- } else {
- *((*bool)(yyv12)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *getAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj14 int
- var yyb14 bool
- var yyhl14 bool = l >= 0
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv15 := &x.Prefix
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv17 := &x.Key
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv19 := &x.Recursive
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Sorted = false
- } else {
- yyv21 := &x.Sorted
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*bool)(yyv21)) = r.DecodeBool()
- }
- }
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Quorum = false
- } else {
- yyv23 := &x.Quorum
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else {
- *((*bool)(yyv23)) = r.DecodeBool()
- }
- }
- for {
- yyj14++
- if yyhl14 {
- yyb14 = yyj14 > l
- } else {
- yyb14 = r.CheckBreak()
- }
- if yyb14 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj14-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *waitAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeUint(uint64(x.WaitIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("WaitIndex"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeUint(uint64(x.WaitIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *waitAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *waitAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "WaitIndex":
- if r.TryDecodeAsNil() {
- x.WaitIndex = 0
- } else {
- yyv8 := &x.WaitIndex
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*uint64)(yyv8)) = uint64(r.DecodeUint(64))
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv10 := &x.Recursive
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*bool)(yyv10)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *waitAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv13 := &x.Prefix
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*string)(yyv13)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv15 := &x.Key
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.WaitIndex = 0
- } else {
- yyv17 := &x.WaitIndex
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*uint64)(yyv17)) = uint64(r.DecodeUint(64))
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv19 := &x.Recursive
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *setAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(10)
- } else {
- r.WriteMapStart(10)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Value"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- x.PrevExist.CodecEncodeSelf(e)
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevExist"))
- r.WriteMapElemValue()
- x.PrevExist.CodecEncodeSelf(e)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym22 := z.EncBinary()
- _ = yym22
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym23 := z.EncBinary()
- _ = yym23
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym25 := z.EncBinary()
- _ = yym25
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Refresh"))
- r.WriteMapElemValue()
- yym26 := z.EncBinary()
- _ = yym26
- if false {
- } else {
- r.EncodeBool(bool(x.Refresh))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym28 := z.EncBinary()
- _ = yym28
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym29 := z.EncBinary()
- _ = yym29
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym31 := z.EncBinary()
- _ = yym31
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("NoValueOnSuccess"))
- r.WriteMapElemValue()
- yym32 := z.EncBinary()
- _ = yym32
- if false {
- } else {
- r.EncodeBool(bool(x.NoValueOnSuccess))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *setAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *setAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "Value":
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv8 := &x.Value
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv10 := &x.PrevValue
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*string)(yyv10)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv12 := &x.PrevIndex
- yym13 := z.DecBinary()
- _ = yym13
- if false {
- } else {
- *((*uint64)(yyv12)) = uint64(r.DecodeUint(64))
- }
- }
- case "PrevExist":
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv14 := &x.PrevExist
- yyv14.CodecDecodeSelf(d)
- }
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv15 := &x.TTL
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv15) {
- } else {
- *((*int64)(yyv15)) = int64(r.DecodeInt(64))
- }
- }
- case "Refresh":
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv17 := &x.Refresh
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*bool)(yyv17)) = r.DecodeBool()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv19 := &x.Dir
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*bool)(yyv19)) = r.DecodeBool()
- }
- }
- case "NoValueOnSuccess":
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv21 := &x.NoValueOnSuccess
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*bool)(yyv21)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *setAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj23 int
- var yyb23 bool
- var yyhl23 bool = l >= 0
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv24 := &x.Prefix
- yym25 := z.DecBinary()
- _ = yym25
- if false {
- } else {
- *((*string)(yyv24)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv26 := &x.Key
- yym27 := z.DecBinary()
- _ = yym27
- if false {
- } else {
- *((*string)(yyv26)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv28 := &x.Value
- yym29 := z.DecBinary()
- _ = yym29
- if false {
- } else {
- *((*string)(yyv28)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv30 := &x.PrevValue
- yym31 := z.DecBinary()
- _ = yym31
- if false {
- } else {
- *((*string)(yyv30)) = r.DecodeString()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv32 := &x.PrevIndex
- yym33 := z.DecBinary()
- _ = yym33
- if false {
- } else {
- *((*uint64)(yyv32)) = uint64(r.DecodeUint(64))
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevExist = ""
- } else {
- yyv34 := &x.PrevExist
- yyv34.CodecDecodeSelf(d)
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv35 := &x.TTL
- yym36 := z.DecBinary()
- _ = yym36
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv35) {
- } else {
- *((*int64)(yyv35)) = int64(r.DecodeInt(64))
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Refresh = false
- } else {
- yyv37 := &x.Refresh
- yym38 := z.DecBinary()
- _ = yym38
- if false {
- } else {
- *((*bool)(yyv37)) = r.DecodeBool()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv39 := &x.Dir
- yym40 := z.DecBinary()
- _ = yym40
- if false {
- } else {
- *((*bool)(yyv39)) = r.DecodeBool()
- }
- }
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.NoValueOnSuccess = false
- } else {
- yyv41 := &x.NoValueOnSuccess
- yym42 := z.DecBinary()
- _ = yym42
- if false {
- } else {
- *((*bool)(yyv41)) = r.DecodeBool()
- }
- }
- for {
- yyj23++
- if yyhl23 {
- yyb23 = yyj23 > l
- } else {
- yyb23 = r.CheckBreak()
- }
- if yyb23 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj23-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *deleteAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(6)
- } else {
- r.WriteMapStart(6)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Key"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Key))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevValue"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.PrevValue))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("PrevIndex"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else {
- r.EncodeUint(uint64(x.PrevIndex))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym16 := z.EncBinary()
- _ = yym16
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym17 := z.EncBinary()
- _ = yym17
- if false {
- } else {
- r.EncodeBool(bool(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym19 := z.EncBinary()
- _ = yym19
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Recursive"))
- r.WriteMapElemValue()
- yym20 := z.EncBinary()
- _ = yym20
- if false {
- } else {
- r.EncodeBool(bool(x.Recursive))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *deleteAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *deleteAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Key":
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv6 := &x.Key
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "PrevValue":
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv8 := &x.PrevValue
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "PrevIndex":
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv10 := &x.PrevIndex
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else {
- *((*uint64)(yyv10)) = uint64(r.DecodeUint(64))
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv12 := &x.Dir
- yym13 := z.DecBinary()
- _ = yym13
- if false {
- } else {
- *((*bool)(yyv12)) = r.DecodeBool()
- }
- }
- case "Recursive":
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv14 := &x.Recursive
- yym15 := z.DecBinary()
- _ = yym15
- if false {
- } else {
- *((*bool)(yyv14)) = r.DecodeBool()
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *deleteAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj16 int
- var yyb16 bool
- var yyhl16 bool = l >= 0
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv17 := &x.Prefix
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Key = ""
- } else {
- yyv19 := &x.Key
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else {
- *((*string)(yyv19)) = r.DecodeString()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevValue = ""
- } else {
- yyv21 := &x.PrevValue
- yym22 := z.DecBinary()
- _ = yym22
- if false {
- } else {
- *((*string)(yyv21)) = r.DecodeString()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.PrevIndex = 0
- } else {
- yyv23 := &x.PrevIndex
- yym24 := z.DecBinary()
- _ = yym24
- if false {
- } else {
- *((*uint64)(yyv23)) = uint64(r.DecodeUint(64))
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = false
- } else {
- yyv25 := &x.Dir
- yym26 := z.DecBinary()
- _ = yym26
- if false {
- } else {
- *((*bool)(yyv25)) = r.DecodeBool()
- }
- }
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Recursive = false
- } else {
- yyv27 := &x.Recursive
- yym28 := z.DecBinary()
- _ = yym28
- if false {
- } else {
- *((*bool)(yyv27)) = r.DecodeBool()
- }
- }
- for {
- yyj16++
- if yyhl16 {
- yyb16 = yyj16 > l
- } else {
- yyb16 = r.CheckBreak()
- }
- if yyb16 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj16-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x *createInOrderAction) CodecEncodeSelf(e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- if x == nil {
- r.EncodeNil()
- } else {
- yym1 := z.EncBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.EncExt(x) {
- } else {
- yysep2 := !z.EncBinary()
- yy2arr2 := z.EncBasicHandle().StructToArray
- _, _ = yysep2, yy2arr2
- const yyr2 bool = false
- if yyr2 || yy2arr2 {
- r.WriteArrayStart(4)
- } else {
- r.WriteMapStart(4)
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym4 := z.EncBinary()
- _ = yym4
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Prefix"))
- r.WriteMapElemValue()
- yym5 := z.EncBinary()
- _ = yym5
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Prefix))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym7 := z.EncBinary()
- _ = yym7
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Dir))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Dir"))
- r.WriteMapElemValue()
- yym8 := z.EncBinary()
- _ = yym8
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Dir))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym10 := z.EncBinary()
- _ = yym10
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("Value"))
- r.WriteMapElemValue()
- yym11 := z.EncBinary()
- _ = yym11
- if false {
- } else {
- r.EncodeString(codecSelferC_UTF87612, string(x.Value))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayElem()
- yym13 := z.EncBinary()
- _ = yym13
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- } else {
- r.WriteMapElemKey()
- r.EncodeString(codecSelferC_UTF87612, string("TTL"))
- r.WriteMapElemValue()
- yym14 := z.EncBinary()
- _ = yym14
- if false {
- } else if z.HasExtensions() && z.EncExt(x.TTL) {
- } else {
- r.EncodeInt(int64(x.TTL))
- }
- }
- if yyr2 || yy2arr2 {
- r.WriteArrayEnd()
- } else {
- r.WriteMapEnd()
- }
- }
- }
-}
-
-func (x *createInOrderAction) CodecDecodeSelf(d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- yym1 := z.DecBinary()
- _ = yym1
- if false {
- } else if z.HasExtensions() && z.DecExt(x) {
- } else {
- yyct2 := r.ContainerType()
- if yyct2 == codecSelferValueTypeMap7612 {
- yyl2 := r.ReadMapStart()
- if yyl2 == 0 {
- r.ReadMapEnd()
- } else {
- x.codecDecodeSelfFromMap(yyl2, d)
- }
- } else if yyct2 == codecSelferValueTypeArray7612 {
- yyl2 := r.ReadArrayStart()
- if yyl2 == 0 {
- r.ReadArrayEnd()
- } else {
- x.codecDecodeSelfFromArray(yyl2, d)
- }
- } else {
- panic(codecSelferOnlyMapOrArrayEncodeToStructErr7612)
- }
- }
-}
-
-func (x *createInOrderAction) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yys3Slc = z.DecScratchBuffer() // default slice to decode into
- _ = yys3Slc
- var yyhl3 bool = l >= 0
- for yyj3 := 0; ; yyj3++ {
- if yyhl3 {
- if yyj3 >= l {
- break
- }
- } else {
- if r.CheckBreak() {
- break
- }
- }
- r.ReadMapElemKey()
- yys3Slc = r.DecodeStringAsBytes()
- yys3 := string(yys3Slc)
- r.ReadMapElemValue()
- switch yys3 {
- case "Prefix":
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv4 := &x.Prefix
- yym5 := z.DecBinary()
- _ = yym5
- if false {
- } else {
- *((*string)(yyv4)) = r.DecodeString()
- }
- }
- case "Dir":
- if r.TryDecodeAsNil() {
- x.Dir = ""
- } else {
- yyv6 := &x.Dir
- yym7 := z.DecBinary()
- _ = yym7
- if false {
- } else {
- *((*string)(yyv6)) = r.DecodeString()
- }
- }
- case "Value":
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv8 := &x.Value
- yym9 := z.DecBinary()
- _ = yym9
- if false {
- } else {
- *((*string)(yyv8)) = r.DecodeString()
- }
- }
- case "TTL":
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv10 := &x.TTL
- yym11 := z.DecBinary()
- _ = yym11
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv10) {
- } else {
- *((*int64)(yyv10)) = int64(r.DecodeInt(64))
- }
- }
- default:
- z.DecStructFieldNotFound(-1, yys3)
- } // end switch yys3
- } // end for yyj3
- r.ReadMapEnd()
-}
-
-func (x *createInOrderAction) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
- var yyj12 int
- var yyb12 bool
- var yyhl12 bool = l >= 0
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Prefix = ""
- } else {
- yyv13 := &x.Prefix
- yym14 := z.DecBinary()
- _ = yym14
- if false {
- } else {
- *((*string)(yyv13)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Dir = ""
- } else {
- yyv15 := &x.Dir
- yym16 := z.DecBinary()
- _ = yym16
- if false {
- } else {
- *((*string)(yyv15)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.Value = ""
- } else {
- yyv17 := &x.Value
- yym18 := z.DecBinary()
- _ = yym18
- if false {
- } else {
- *((*string)(yyv17)) = r.DecodeString()
- }
- }
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- r.ReadArrayEnd()
- return
- }
- r.ReadArrayElem()
- if r.TryDecodeAsNil() {
- x.TTL = 0
- } else {
- yyv19 := &x.TTL
- yym20 := z.DecBinary()
- _ = yym20
- if false {
- } else if z.HasExtensions() && z.DecExt(yyv19) {
- } else {
- *((*int64)(yyv19)) = int64(r.DecodeInt(64))
- }
- }
- for {
- yyj12++
- if yyhl12 {
- yyb12 = yyj12 > l
- } else {
- yyb12 = r.CheckBreak()
- }
- if yyb12 {
- break
- }
- r.ReadArrayElem()
- z.DecStructFieldNotFound(yyj12-1, "")
- }
- r.ReadArrayEnd()
-}
-
-func (x codecSelfer7612) encNodes(v Nodes, e *codec1978.Encoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperEncoder(e)
- _, _, _ = h, z, r
- r.WriteArrayStart(len(v))
- for _, yyv1 := range v {
- r.WriteArrayElem()
- if yyv1 == nil {
- r.EncodeNil()
- } else {
- yyv1.CodecEncodeSelf(e)
- }
- }
- r.WriteArrayEnd()
-}
-
-func (x codecSelfer7612) decNodes(v *Nodes, d *codec1978.Decoder) {
- var h codecSelfer7612
- z, r := codec1978.GenHelperDecoder(d)
- _, _, _ = h, z, r
-
- yyv1 := *v
- yyh1, yyl1 := z.DecSliceHelperStart()
- var yyc1 bool
- _ = yyc1
- if yyl1 == 0 {
- if yyv1 == nil {
- yyv1 = []*Node{}
- yyc1 = true
- } else if len(yyv1) != 0 {
- yyv1 = yyv1[:0]
- yyc1 = true
- }
- } else {
- yyhl1 := yyl1 > 0
- var yyrl1 int
- _ = yyrl1
- if yyhl1 {
- if yyl1 > cap(yyv1) {
- yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
- if yyrl1 <= cap(yyv1) {
- yyv1 = yyv1[:yyrl1]
- } else {
- yyv1 = make([]*Node, yyrl1)
- }
- yyc1 = true
- } else if yyl1 != len(yyv1) {
- yyv1 = yyv1[:yyl1]
- yyc1 = true
- }
- }
- var yyj1 int
- // var yydn1 bool
- for ; (yyhl1 && yyj1 < yyl1) || !(yyhl1 || r.CheckBreak()); yyj1++ {
- if yyj1 == 0 && len(yyv1) == 0 {
- if yyhl1 {
- yyrl1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 8)
- } else {
- yyrl1 = 8
- }
- yyv1 = make([]*Node, yyrl1)
- yyc1 = true
- }
- yyh1.ElemContainerState(yyj1)
- // yydn1 = r.TryDecodeAsNil()
-
- // if indefinite, etc, then expand the slice if necessary
- var yydb1 bool
- if yyj1 >= len(yyv1) {
- yyv1 = append(yyv1, nil)
- yyc1 = true
-
- }
- if yydb1 {
- z.DecSwallow()
- } else {
- if r.TryDecodeAsNil() {
- if yyv1[yyj1] != nil {
- *yyv1[yyj1] = Node{}
- }
- } else {
- if yyv1[yyj1] == nil {
- yyv1[yyj1] = new(Node)
- }
- yyw2 := yyv1[yyj1]
- yyw2.CodecDecodeSelf(d)
- }
-
- }
-
- }
- if yyj1 < len(yyv1) {
- yyv1 = yyv1[:yyj1]
- yyc1 = true
- } else if yyj1 == 0 && yyv1 == nil {
- yyv1 = make([]*Node, 0)
- yyc1 = true
- }
- }
- yyh1.End()
- if yyc1 {
- *v = yyv1
- }
-
-}
diff --git a/vendor/github.com/coreos/etcd/client/keys.go b/vendor/github.com/coreos/etcd/client/keys.go
deleted file mode 100644
index 8b9fd3f8..00000000
--- a/vendor/github.com/coreos/etcd/client/keys.go
+++ /dev/null
@@ -1,681 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go
-
-import (
- "context"
- "encoding/json"
- "errors"
- "fmt"
- "net/http"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "github.com/coreos/etcd/pkg/pathutil"
- "github.com/ugorji/go/codec"
-)
-
-const (
- ErrorCodeKeyNotFound = 100
- ErrorCodeTestFailed = 101
- ErrorCodeNotFile = 102
- ErrorCodeNotDir = 104
- ErrorCodeNodeExist = 105
- ErrorCodeRootROnly = 107
- ErrorCodeDirNotEmpty = 108
- ErrorCodeUnauthorized = 110
-
- ErrorCodePrevValueRequired = 201
- ErrorCodeTTLNaN = 202
- ErrorCodeIndexNaN = 203
- ErrorCodeInvalidField = 209
- ErrorCodeInvalidForm = 210
-
- ErrorCodeRaftInternal = 300
- ErrorCodeLeaderElect = 301
-
- ErrorCodeWatcherCleared = 400
- ErrorCodeEventIndexCleared = 401
-)
-
-type Error struct {
- Code int `json:"errorCode"`
- Message string `json:"message"`
- Cause string `json:"cause"`
- Index uint64 `json:"index"`
-}
-
-func (e Error) Error() string {
- return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index)
-}
-
-var (
- ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.")
- ErrEmptyBody = errors.New("client: response body is empty")
-)
-
-// PrevExistType is used to define an existence condition when setting
-// or deleting Nodes.
-type PrevExistType string
-
-const (
- PrevIgnore = PrevExistType("")
- PrevExist = PrevExistType("true")
- PrevNoExist = PrevExistType("false")
-)
-
-var (
- defaultV2KeysPrefix = "/v2/keys"
-)
-
-// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value
-// API over HTTP.
-func NewKeysAPI(c Client) KeysAPI {
- return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix)
-}
-
-// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller
-// to provide a custom base URL path. This should only be used in
-// very rare cases.
-func NewKeysAPIWithPrefix(c Client, p string) KeysAPI {
- return &httpKeysAPI{
- client: c,
- prefix: p,
- }
-}
-
-type KeysAPI interface {
- // Get retrieves a set of Nodes from etcd
- Get(ctx context.Context, key string, opts *GetOptions) (*Response, error)
-
- // Set assigns a new value to a Node identified by a given key. The caller
- // may define a set of conditions in the SetOptions. If SetOptions.Dir=true
- // then value is ignored.
- Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error)
-
- // Delete removes a Node identified by the given key, optionally destroying
- // all of its children as well. The caller may define a set of required
- // conditions in an DeleteOptions object.
- Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error)
-
- // Create is an alias for Set w/ PrevExist=false
- Create(ctx context.Context, key, value string) (*Response, error)
-
- // CreateInOrder is used to atomically create in-order keys within the given directory.
- CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error)
-
- // Update is an alias for Set w/ PrevExist=true
- Update(ctx context.Context, key, value string) (*Response, error)
-
- // Watcher builds a new Watcher targeted at a specific Node identified
- // by the given key. The Watcher may be configured at creation time
- // through a WatcherOptions object. The returned Watcher is designed
- // to emit events that happen to a Node, and optionally to its children.
- Watcher(key string, opts *WatcherOptions) Watcher
-}
-
-type WatcherOptions struct {
- // AfterIndex defines the index after-which the Watcher should
- // start emitting events. For example, if a value of 5 is
- // provided, the first event will have an index >= 6.
- //
- // Setting AfterIndex to 0 (default) means that the Watcher
- // should start watching for events starting at the current
- // index, whatever that may be.
- AfterIndex uint64
-
- // Recursive specifies whether or not the Watcher should emit
- // events that occur in children of the given keyspace. If set
- // to false (default), events will be limited to those that
- // occur for the exact key.
- Recursive bool
-}
-
-type CreateInOrderOptions struct {
- // TTL defines a period of time after-which the Node should
- // expire and no longer exist. Values <= 0 are ignored. Given
- // that the zero-value is ignored, TTL cannot be used to set
- // a TTL of 0.
- TTL time.Duration
-}
-
-type SetOptions struct {
- // PrevValue specifies what the current value of the Node must
- // be in order for the Set operation to succeed.
- //
- // Leaving this field empty means that the caller wishes to
- // ignore the current value of the Node. This cannot be used
- // to compare the Node's current value to an empty string.
- //
- // PrevValue is ignored if Dir=true
- PrevValue string
-
- // PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Set operation to succeed.
- //
- // If PrevIndex is set to 0 (default), no comparison is made.
- PrevIndex uint64
-
- // PrevExist specifies whether the Node must currently exist
- // (PrevExist) or not (PrevNoExist). If the caller does not
- // care about existence, set PrevExist to PrevIgnore, or simply
- // leave it unset.
- PrevExist PrevExistType
-
- // TTL defines a period of time after-which the Node should
- // expire and no longer exist. Values <= 0 are ignored. Given
- // that the zero-value is ignored, TTL cannot be used to set
- // a TTL of 0.
- TTL time.Duration
-
- // Refresh set to true means a TTL value can be updated
- // without firing a watch or changing the node value. A
- // value must not be provided when refreshing a key.
- Refresh bool
-
- // Dir specifies whether or not this Node should be created as a directory.
- Dir bool
-
- // NoValueOnSuccess specifies whether the response contains the current value of the Node.
- // If set, the response will only contain the current value when the request fails.
- NoValueOnSuccess bool
-}
-
-type GetOptions struct {
- // Recursive defines whether or not all children of the Node
- // should be returned.
- Recursive bool
-
- // Sort instructs the server whether or not to sort the Nodes.
- // If true, the Nodes are sorted alphabetically by key in
- // ascending order (A to z). If false (default), the Nodes will
- // not be sorted and the ordering used should not be considered
- // predictable.
- Sort bool
-
- // Quorum specifies whether it gets the latest committed value that
- // has been applied in quorum of members, which ensures external
- // consistency (or linearizability).
- Quorum bool
-}
-
-type DeleteOptions struct {
- // PrevValue specifies what the current value of the Node must
- // be in order for the Delete operation to succeed.
- //
- // Leaving this field empty means that the caller wishes to
- // ignore the current value of the Node. This cannot be used
- // to compare the Node's current value to an empty string.
- PrevValue string
-
- // PrevIndex indicates what the current ModifiedIndex of the
- // Node must be in order for the Delete operation to succeed.
- //
- // If PrevIndex is set to 0 (default), no comparison is made.
- PrevIndex uint64
-
- // Recursive defines whether or not all children of the Node
- // should be deleted. If set to true, all children of the Node
- // identified by the given key will be deleted. If left unset
- // or explicitly set to false, only a single Node will be
- // deleted.
- Recursive bool
-
- // Dir specifies whether or not this Node should be removed as a directory.
- Dir bool
-}
-
-type Watcher interface {
- // Next blocks until an etcd event occurs, then returns a Response
- // representing that event. The behavior of Next depends on the
- // WatcherOptions used to construct the Watcher. Next is designed to
- // be called repeatedly, each time blocking until a subsequent event
- // is available.
- //
- // If the provided context is cancelled, Next will return a non-nil
- // error. Any other failures encountered while waiting for the next
- // event (connection issues, deserialization failures, etc) will
- // also result in a non-nil error.
- Next(context.Context) (*Response, error)
-}
-
-type Response struct {
- // Action is the name of the operation that occurred. Possible values
- // include get, set, delete, update, create, compareAndSwap,
- // compareAndDelete and expire.
- Action string `json:"action"`
-
- // Node represents the state of the relevant etcd Node.
- Node *Node `json:"node"`
-
- // PrevNode represents the previous state of the Node. PrevNode is non-nil
- // only if the Node existed before the action occurred and the action
- // caused a change to the Node.
- PrevNode *Node `json:"prevNode"`
-
- // Index holds the cluster-level index at the time the Response was generated.
- // This index is not tied to the Node(s) contained in this Response.
- Index uint64 `json:"-"`
-
- // ClusterID holds the cluster-level ID reported by the server. This
- // should be different for different etcd clusters.
- ClusterID string `json:"-"`
-}
-
-type Node struct {
- // Key represents the unique location of this Node (e.g. "/foo/bar").
- Key string `json:"key"`
-
- // Dir reports whether node describes a directory.
- Dir bool `json:"dir,omitempty"`
-
- // Value is the current data stored on this Node. If this Node
- // is a directory, Value will be empty.
- Value string `json:"value"`
-
- // Nodes holds the children of this Node, only if this Node is a directory.
- // This slice of will be arbitrarily deep (children, grandchildren, great-
- // grandchildren, etc.) if a recursive Get or Watch request were made.
- Nodes Nodes `json:"nodes"`
-
- // CreatedIndex is the etcd index at-which this Node was created.
- CreatedIndex uint64 `json:"createdIndex"`
-
- // ModifiedIndex is the etcd index at-which this Node was last modified.
- ModifiedIndex uint64 `json:"modifiedIndex"`
-
- // Expiration is the server side expiration time of the key.
- Expiration *time.Time `json:"expiration,omitempty"`
-
- // TTL is the time to live of the key in second.
- TTL int64 `json:"ttl,omitempty"`
-}
-
-func (n *Node) String() string {
- return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL)
-}
-
-// TTLDuration returns the Node's TTL as a time.Duration object
-func (n *Node) TTLDuration() time.Duration {
- return time.Duration(n.TTL) * time.Second
-}
-
-type Nodes []*Node
-
-// interfaces for sorting
-
-func (ns Nodes) Len() int { return len(ns) }
-func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key }
-func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] }
-
-type httpKeysAPI struct {
- client httpClient
- prefix string
-}
-
-func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) {
- act := &setAction{
- Prefix: k.prefix,
- Key: key,
- Value: val,
- }
-
- if opts != nil {
- act.PrevValue = opts.PrevValue
- act.PrevIndex = opts.PrevIndex
- act.PrevExist = opts.PrevExist
- act.TTL = opts.TTL
- act.Refresh = opts.Refresh
- act.Dir = opts.Dir
- act.NoValueOnSuccess = opts.NoValueOnSuccess
- }
-
- doCtx := ctx
- if act.PrevExist == PrevNoExist {
- doCtx = context.WithValue(doCtx, &oneShotCtxValue, &oneShotCtxValue)
- }
- resp, body, err := k.client.Do(doCtx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) {
- return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist})
-}
-
-func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) {
- act := &createInOrderAction{
- Prefix: k.prefix,
- Dir: dir,
- Value: val,
- }
-
- if opts != nil {
- act.TTL = opts.TTL
- }
-
- resp, body, err := k.client.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) {
- return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist})
-}
-
-func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) {
- act := &deleteAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.PrevValue = opts.PrevValue
- act.PrevIndex = opts.PrevIndex
- act.Dir = opts.Dir
- act.Recursive = opts.Recursive
- }
-
- doCtx := context.WithValue(ctx, &oneShotCtxValue, &oneShotCtxValue)
- resp, body, err := k.client.Do(doCtx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) {
- act := &getAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.Recursive = opts.Recursive
- act.Sorted = opts.Sort
- act.Quorum = opts.Quorum
- }
-
- resp, body, err := k.client.Do(ctx, act)
- if err != nil {
- return nil, err
- }
-
- return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body)
-}
-
-func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher {
- act := waitAction{
- Prefix: k.prefix,
- Key: key,
- }
-
- if opts != nil {
- act.Recursive = opts.Recursive
- if opts.AfterIndex > 0 {
- act.WaitIndex = opts.AfterIndex + 1
- }
- }
-
- return &httpWatcher{
- client: k.client,
- nextWait: act,
- }
-}
-
-type httpWatcher struct {
- client httpClient
- nextWait waitAction
-}
-
-func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) {
- for {
- httpresp, body, err := hw.client.Do(ctx, &hw.nextWait)
- if err != nil {
- return nil, err
- }
-
- resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body)
- if err != nil {
- if err == ErrEmptyBody {
- continue
- }
- return nil, err
- }
-
- hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1
- return resp, nil
- }
-}
-
-// v2KeysURL forms a URL representing the location of a key.
-// The endpoint argument represents the base URL of an etcd
-// server. The prefix is the path needed to route from the
-// provided endpoint's path to the root of the keys API
-// (typically "/v2/keys").
-func v2KeysURL(ep url.URL, prefix, key string) *url.URL {
- // We concatenate all parts together manually. We cannot use
- // path.Join because it does not reserve trailing slash.
- // We call CanonicalURLPath to further cleanup the path.
- if prefix != "" && prefix[0] != '/' {
- prefix = "/" + prefix
- }
- if key != "" && key[0] != '/' {
- key = "/" + key
- }
- ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key)
- return &ep
-}
-
-type getAction struct {
- Prefix string
- Key string
- Recursive bool
- Sorted bool
- Quorum bool
-}
-
-func (g *getAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, g.Prefix, g.Key)
-
- params := u.Query()
- params.Set("recursive", strconv.FormatBool(g.Recursive))
- params.Set("sorted", strconv.FormatBool(g.Sorted))
- params.Set("quorum", strconv.FormatBool(g.Quorum))
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type waitAction struct {
- Prefix string
- Key string
- WaitIndex uint64
- Recursive bool
-}
-
-func (w *waitAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, w.Prefix, w.Key)
-
- params := u.Query()
- params.Set("wait", "true")
- params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10))
- params.Set("recursive", strconv.FormatBool(w.Recursive))
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type setAction struct {
- Prefix string
- Key string
- Value string
- PrevValue string
- PrevIndex uint64
- PrevExist PrevExistType
- TTL time.Duration
- Refresh bool
- Dir bool
- NoValueOnSuccess bool
-}
-
-func (a *setAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Key)
-
- params := u.Query()
- form := url.Values{}
-
- // we're either creating a directory or setting a key
- if a.Dir {
- params.Set("dir", strconv.FormatBool(a.Dir))
- } else {
- // These options are only valid for setting a key
- if a.PrevValue != "" {
- params.Set("prevValue", a.PrevValue)
- }
- form.Add("value", a.Value)
- }
-
- // Options which apply to both setting a key and creating a dir
- if a.PrevIndex != 0 {
- params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
- }
- if a.PrevExist != PrevIgnore {
- params.Set("prevExist", string(a.PrevExist))
- }
- if a.TTL > 0 {
- form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
- }
-
- if a.Refresh {
- form.Add("refresh", "true")
- }
- if a.NoValueOnSuccess {
- params.Set("noValueOnSuccess", strconv.FormatBool(a.NoValueOnSuccess))
- }
-
- u.RawQuery = params.Encode()
- body := strings.NewReader(form.Encode())
-
- req, _ := http.NewRequest("PUT", u.String(), body)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req
-}
-
-type deleteAction struct {
- Prefix string
- Key string
- PrevValue string
- PrevIndex uint64
- Dir bool
- Recursive bool
-}
-
-func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Key)
-
- params := u.Query()
- if a.PrevValue != "" {
- params.Set("prevValue", a.PrevValue)
- }
- if a.PrevIndex != 0 {
- params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10))
- }
- if a.Dir {
- params.Set("dir", "true")
- }
- if a.Recursive {
- params.Set("recursive", "true")
- }
- u.RawQuery = params.Encode()
-
- req, _ := http.NewRequest("DELETE", u.String(), nil)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req
-}
-
-type createInOrderAction struct {
- Prefix string
- Dir string
- Value string
- TTL time.Duration
-}
-
-func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request {
- u := v2KeysURL(ep, a.Prefix, a.Dir)
-
- form := url.Values{}
- form.Add("value", a.Value)
- if a.TTL > 0 {
- form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10))
- }
- body := strings.NewReader(form.Encode())
-
- req, _ := http.NewRequest("POST", u.String(), body)
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
- return req
-}
-
-func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) {
- switch code {
- case http.StatusOK, http.StatusCreated:
- if len(body) == 0 {
- return nil, ErrEmptyBody
- }
- res, err = unmarshalSuccessfulKeysResponse(header, body)
- default:
- err = unmarshalFailedKeysResponse(body)
- }
- return res, err
-}
-
-func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) {
- var res Response
- err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res)
- if err != nil {
- return nil, ErrInvalidJSON
- }
- if header.Get("X-Etcd-Index") != "" {
- res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64)
- if err != nil {
- return nil, err
- }
- }
- res.ClusterID = header.Get("X-Etcd-Cluster-ID")
- return &res, nil
-}
-
-func unmarshalFailedKeysResponse(body []byte) error {
- var etcdErr Error
- if err := json.Unmarshal(body, &etcdErr); err != nil {
- return ErrInvalidJSON
- }
- return etcdErr
-}
diff --git a/vendor/github.com/coreos/etcd/client/members.go b/vendor/github.com/coreos/etcd/client/members.go
deleted file mode 100644
index aafa3d1b..00000000
--- a/vendor/github.com/coreos/etcd/client/members.go
+++ /dev/null
@@ -1,303 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "net/http"
- "net/url"
- "path"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-var (
- defaultV2MembersPrefix = "/v2/members"
- defaultLeaderSuffix = "/leader"
-)
-
-type Member struct {
- // ID is the unique identifier of this Member.
- ID string `json:"id"`
-
- // Name is a human-readable, non-unique identifier of this Member.
- Name string `json:"name"`
-
- // PeerURLs represents the HTTP(S) endpoints this Member uses to
- // participate in etcd's consensus protocol.
- PeerURLs []string `json:"peerURLs"`
-
- // ClientURLs represents the HTTP(S) endpoints on which this Member
- // serves its client-facing APIs.
- ClientURLs []string `json:"clientURLs"`
-}
-
-type memberCollection []Member
-
-func (c *memberCollection) UnmarshalJSON(data []byte) error {
- d := struct {
- Members []Member
- }{}
-
- if err := json.Unmarshal(data, &d); err != nil {
- return err
- }
-
- if d.Members == nil {
- *c = make([]Member, 0)
- return nil
- }
-
- *c = d.Members
- return nil
-}
-
-type memberCreateOrUpdateRequest struct {
- PeerURLs types.URLs
-}
-
-func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) {
- s := struct {
- PeerURLs []string `json:"peerURLs"`
- }{
- PeerURLs: make([]string, len(m.PeerURLs)),
- }
-
- for i, u := range m.PeerURLs {
- s.PeerURLs[i] = u.String()
- }
-
- return json.Marshal(&s)
-}
-
-// NewMembersAPI constructs a new MembersAPI that uses HTTP to
-// interact with etcd's membership API.
-func NewMembersAPI(c Client) MembersAPI {
- return &httpMembersAPI{
- client: c,
- }
-}
-
-type MembersAPI interface {
- // List enumerates the current cluster membership.
- List(ctx context.Context) ([]Member, error)
-
- // Add instructs etcd to accept a new Member into the cluster.
- Add(ctx context.Context, peerURL string) (*Member, error)
-
- // Remove demotes an existing Member out of the cluster.
- Remove(ctx context.Context, mID string) error
-
- // Update instructs etcd to update an existing Member in the cluster.
- Update(ctx context.Context, mID string, peerURLs []string) error
-
- // Leader gets current leader of the cluster
- Leader(ctx context.Context) (*Member, error)
-}
-
-type httpMembersAPI struct {
- client httpClient
-}
-
-func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) {
- req := &membersAPIActionList{}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
-
- var mCollection memberCollection
- if err := json.Unmarshal(body, &mCollection); err != nil {
- return nil, err
- }
-
- return []Member(mCollection), nil
-}
-
-func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) {
- urls, err := types.NewURLs([]string{peerURL})
- if err != nil {
- return nil, err
- }
-
- req := &membersAPIActionAdd{peerURLs: urls}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil {
- return nil, err
- }
-
- if resp.StatusCode != http.StatusCreated {
- var merr membersError
- if err := json.Unmarshal(body, &merr); err != nil {
- return nil, err
- }
- return nil, merr
- }
-
- var memb Member
- if err := json.Unmarshal(body, &memb); err != nil {
- return nil, err
- }
-
- return &memb, nil
-}
-
-func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error {
- urls, err := types.NewURLs(peerURLs)
- if err != nil {
- return err
- }
-
- req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil {
- return err
- }
-
- if resp.StatusCode != http.StatusNoContent {
- var merr membersError
- if err := json.Unmarshal(body, &merr); err != nil {
- return err
- }
- return merr
- }
-
- return nil
-}
-
-func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
- req := &membersAPIActionRemove{memberID: memberID}
- resp, _, err := m.client.Do(ctx, req)
- if err != nil {
- return err
- }
-
- return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
-}
-
-func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
- req := &membersAPIActionLeader{}
- resp, body, err := m.client.Do(ctx, req)
- if err != nil {
- return nil, err
- }
-
- if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
- return nil, err
- }
-
- var leader Member
- if err := json.Unmarshal(body, &leader); err != nil {
- return nil, err
- }
-
- return &leader, nil
-}
-
-type membersAPIActionList struct{}
-
-func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-type membersAPIActionRemove struct {
- memberID string
-}
-
-func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- u.Path = path.Join(u.Path, d.memberID)
- req, _ := http.NewRequest("DELETE", u.String(), nil)
- return req
-}
-
-type membersAPIActionAdd struct {
- peerURLs types.URLs
-}
-
-func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
- b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b))
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-type membersAPIActionUpdate struct {
- memberID string
- peerURLs types.URLs
-}
-
-func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs}
- u.Path = path.Join(u.Path, a.memberID)
- b, _ := json.Marshal(&m)
- req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b))
- req.Header.Set("Content-Type", "application/json")
- return req
-}
-
-func assertStatusCode(got int, want ...int) (err error) {
- for _, w := range want {
- if w == got {
- return nil
- }
- }
- return fmt.Errorf("unexpected status code %d", got)
-}
-
-type membersAPIActionLeader struct{}
-
-func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
- u := v2MembersURL(ep)
- u.Path = path.Join(u.Path, defaultLeaderSuffix)
- req, _ := http.NewRequest("GET", u.String(), nil)
- return req
-}
-
-// v2MembersURL add the necessary path to the provided endpoint
-// to route requests to the default v2 members API.
-func v2MembersURL(ep url.URL) *url.URL {
- ep.Path = path.Join(ep.Path, defaultV2MembersPrefix)
- return &ep
-}
-
-type membersError struct {
- Message string `json:"message"`
- Code int `json:"-"`
-}
-
-func (e membersError) Error() string {
- return e.Message
-}
diff --git a/vendor/github.com/coreos/etcd/client/util.go b/vendor/github.com/coreos/etcd/client/util.go
deleted file mode 100644
index 15a8babf..00000000
--- a/vendor/github.com/coreos/etcd/client/util.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2016 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package client
-
-import (
- "regexp"
-)
-
-var (
- roleNotFoundRegExp *regexp.Regexp
- userNotFoundRegExp *regexp.Regexp
-)
-
-func init() {
- roleNotFoundRegExp = regexp.MustCompile("auth: Role .* does not exist.")
- userNotFoundRegExp = regexp.MustCompile("auth: User .* does not exist.")
-}
-
-// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
-func IsKeyNotFound(err error) bool {
- if cErr, ok := err.(Error); ok {
- return cErr.Code == ErrorCodeKeyNotFound
- }
- return false
-}
-
-// IsRoleNotFound returns true if the error means role not found of v2 API.
-func IsRoleNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return roleNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
-
-// IsUserNotFound returns true if the error means user not found of v2 API.
-func IsUserNotFound(err error) bool {
- if ae, ok := err.(authError); ok {
- return userNotFoundRegExp.MatchString(ae.Message)
- }
- return false
-}
diff --git a/vendor/github.com/coreos/etcd/clientv3/client.go b/vendor/github.com/coreos/etcd/clientv3/client.go
index 71328077..78db5d4b 100644
--- a/vendor/github.com/coreos/etcd/clientv3/client.go
+++ b/vendor/github.com/coreos/etcd/clientv3/client.go
@@ -56,7 +56,7 @@ type Client struct {
cfg Config
creds *credentials.TransportCredentials
balancer *healthBalancer
- mu *sync.Mutex
+ mu *sync.RWMutex
ctx context.Context
cancel context.CancelFunc
@@ -110,11 +110,13 @@ func (c *Client) Close() error {
func (c *Client) Ctx() context.Context { return c.ctx }
// Endpoints lists the registered endpoints for the client.
-func (c *Client) Endpoints() (eps []string) {
+func (c *Client) Endpoints() []string {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
// copy the slice; protect original endpoints from being changed
- eps = make([]string, len(c.cfg.Endpoints))
+ eps := make([]string, len(c.cfg.Endpoints))
copy(eps, c.cfg.Endpoints)
- return
+ return eps
}
// SetEndpoints updates client's endpoints.
@@ -387,7 +389,7 @@ func newClient(cfg *Config) (*Client, error) {
creds: creds,
ctx: ctx,
cancel: cancel,
- mu: new(sync.Mutex),
+ mu: new(sync.RWMutex),
callOpts: defaultCallOpts,
}
if cfg.Username != "" && cfg.Password != "" {
diff --git a/vendor/github.com/coreos/etcd/clientv3/lease.go b/vendor/github.com/coreos/etcd/clientv3/lease.go
index 4097b3af..3729cf37 100644
--- a/vendor/github.com/coreos/etcd/clientv3/lease.go
+++ b/vendor/github.com/coreos/etcd/clientv3/lease.go
@@ -77,8 +77,6 @@ const (
// defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client.
defaultTTL = 5 * time.Second
- // a small buffer to store unsent lease responses.
- leaseResponseChSize = 16
// NoLease is a lease ID for the absence of a lease.
NoLease LeaseID = 0
@@ -86,6 +84,11 @@ const (
retryConnWait = 500 * time.Millisecond
)
+// LeaseResponseChSize is the size of buffer to store unsent lease responses.
+// WARNING: DO NOT UPDATE.
+// Only for testing purposes.
+var LeaseResponseChSize = 16
+
// ErrKeepAliveHalted is returned if client keep alive loop halts with an unexpected error.
//
// This usually means that automatic lease renewal via KeepAlive is broken, but KeepAliveOnce will still work as expected.
@@ -258,7 +261,7 @@ func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
- ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)
+ ch := make(chan *LeaseKeepAliveResponse, LeaseResponseChSize)
l.mu.Lock()
// ensure that recvKeepAliveLoop is still running
@@ -514,9 +517,10 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
for _, ch := range ka.chs {
select {
case ch <- karesp:
- ka.nextKeepAlive = nextKeepAlive
default:
}
+ // still advance in order to rate-limit keep-alive sends
+ ka.nextKeepAlive = nextKeepAlive
}
}
diff --git a/vendor/github.com/coreos/etcd/cmd/etcd b/vendor/github.com/coreos/etcd/cmd/etcd
deleted file mode 120000
index b870225a..00000000
--- a/vendor/github.com/coreos/etcd/cmd/etcd
+++ /dev/null
@@ -1 +0,0 @@
-../
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/cmd/etcdctl b/vendor/github.com/coreos/etcd/cmd/etcdctl
deleted file mode 120000
index 05bb269d..00000000
--- a/vendor/github.com/coreos/etcd/cmd/etcdctl
+++ /dev/null
@@ -1 +0,0 @@
-../etcdctl
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/cmd/functional b/vendor/github.com/coreos/etcd/cmd/functional
deleted file mode 120000
index 44faa31a..00000000
--- a/vendor/github.com/coreos/etcd/cmd/functional
+++ /dev/null
@@ -1 +0,0 @@
-../functional
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/cmd/tools b/vendor/github.com/coreos/etcd/cmd/tools
deleted file mode 120000
index 4887d6e0..00000000
--- a/vendor/github.com/coreos/etcd/cmd/tools
+++ /dev/null
@@ -1 +0,0 @@
-../tools
\ No newline at end of file
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
index ec6b6397..3d3536a3 100644
--- a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
@@ -59,7 +59,7 @@ func (as *InternalRaftStringer) String() string {
case as.Request.Put != nil:
return fmt.Sprintf("header:<%s> put:<%s>",
as.Request.Header.String(),
- newLoggablePutRequest(as.Request.Put).String(),
+ NewLoggablePutRequest(as.Request.Put).String(),
)
case as.Request.Txn != nil:
return fmt.Sprintf("header:<%s> txn:<%s>",
@@ -121,7 +121,7 @@ func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
func (as *requestOpStringer) String() string {
switch op := as.Op.Request.(type) {
case *RequestOp_RequestPut:
- return fmt.Sprintf("request_put:<%s>", newLoggablePutRequest(op.RequestPut).String())
+ return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
case *RequestOp_RequestTxn:
return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
default:
@@ -167,7 +167,7 @@ type loggablePutRequest struct {
IgnoreLease bool `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
}
-func newLoggablePutRequest(request *PutRequest) *loggablePutRequest {
+func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
return &loggablePutRequest{
request.Key,
len(request.Value),
diff --git a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/github.com/coreos/etcd/pkg/pathutil/path.go
deleted file mode 100644
index f26254ba..00000000
--- a/vendor/github.com/coreos/etcd/pkg/pathutil/path.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package pathutil implements utility functions for handling slash-separated
-// paths.
-package pathutil
-
-import "path"
-
-// CanonicalURLPath returns the canonical url path for p, which follows the rules:
-// 1. the path always starts with "/"
-// 2. replace multiple slashes with a single slash
-// 3. replace each '.' '..' path name element with equivalent one
-// 4. keep the trailing slash
-// The function is borrowed from stdlib http.cleanPath in server.go.
-func CanonicalURLPath(p string) string {
- if p == "" {
- return "/"
- }
- if p[0] != '/' {
- p = "/" + p
- }
- np := path.Clean(p)
- // path.Clean removes trailing slash except for root,
- // put the trailing slash back if necessary.
- if p[len(p)-1] == '/' && np != "/" {
- np += "/"
- }
- return np
-}
diff --git a/vendor/github.com/coreos/etcd/pkg/srv/srv.go b/vendor/github.com/coreos/etcd/pkg/srv/srv.go
deleted file mode 100644
index 600061ce..00000000
--- a/vendor/github.com/coreos/etcd/pkg/srv/srv.go
+++ /dev/null
@@ -1,141 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package srv looks up DNS SRV records.
-package srv
-
-import (
- "fmt"
- "net"
- "net/url"
- "strings"
-
- "github.com/coreos/etcd/pkg/types"
-)
-
-var (
- // indirection for testing
- lookupSRV = net.LookupSRV // net.DefaultResolver.LookupSRV when ctxs don't conflict
- resolveTCPAddr = net.ResolveTCPAddr
-)
-
-// GetCluster gets the cluster information via DNS discovery.
-// Also sees each entry as a separate instance.
-func GetCluster(service, name, dns string, apurls types.URLs) ([]string, error) {
- tempName := int(0)
- tcp2ap := make(map[string]url.URL)
-
- // First, resolve the apurls
- for _, url := range apurls {
- tcpAddr, err := resolveTCPAddr("tcp", url.Host)
- if err != nil {
- return nil, err
- }
- tcp2ap[tcpAddr.String()] = url
- }
-
- stringParts := []string{}
- updateNodeMap := func(service, scheme string) error {
- _, addrs, err := lookupSRV(service, "tcp", dns)
- if err != nil {
- return err
- }
- for _, srv := range addrs {
- port := fmt.Sprintf("%d", srv.Port)
- host := net.JoinHostPort(srv.Target, port)
- tcpAddr, terr := resolveTCPAddr("tcp", host)
- if terr != nil {
- err = terr
- continue
- }
- n := ""
- url, ok := tcp2ap[tcpAddr.String()]
- if ok {
- n = name
- }
- if n == "" {
- n = fmt.Sprintf("%d", tempName)
- tempName++
- }
- // SRV records have a trailing dot but URL shouldn't.
- shortHost := strings.TrimSuffix(srv.Target, ".")
- urlHost := net.JoinHostPort(shortHost, port)
- if ok && url.Scheme != scheme {
- err = fmt.Errorf("bootstrap at %s from DNS for %s has scheme mismatch with expected peer %s", scheme+"://"+urlHost, service, url.String())
- } else {
- stringParts = append(stringParts, fmt.Sprintf("%s=%s://%s", n, scheme, urlHost))
- }
- }
- if len(stringParts) == 0 {
- return err
- }
- return nil
- }
-
- failCount := 0
- err := updateNodeMap(service+"-ssl", "https")
- srvErr := make([]string, 2)
- if err != nil {
- srvErr[0] = fmt.Sprintf("error querying DNS SRV records for _%s-ssl %s", service, err)
- failCount++
- }
- err = updateNodeMap(service, "http")
- if err != nil {
- srvErr[1] = fmt.Sprintf("error querying DNS SRV records for _%s %s", service, err)
- failCount++
- }
- if failCount == 2 {
- return nil, fmt.Errorf("srv: too many errors querying DNS SRV records (%q, %q)", srvErr[0], srvErr[1])
- }
- return stringParts, nil
-}
-
-type SRVClients struct {
- Endpoints []string
- SRVs []*net.SRV
-}
-
-// GetClient looks up the client endpoints for a service and domain.
-func GetClient(service, domain string) (*SRVClients, error) {
- var urls []*url.URL
- var srvs []*net.SRV
-
- updateURLs := func(service, scheme string) error {
- _, addrs, err := lookupSRV(service, "tcp", domain)
- if err != nil {
- return err
- }
- for _, srv := range addrs {
- urls = append(urls, &url.URL{
- Scheme: scheme,
- Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)),
- })
- }
- srvs = append(srvs, addrs...)
- return nil
- }
-
- errHTTPS := updateURLs(service+"-ssl", "https")
- errHTTP := updateURLs(service, "http")
-
- if errHTTPS != nil && errHTTP != nil {
- return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP)
- }
-
- endpoints := make([]string, len(urls))
- for i := range urls {
- endpoints[i] = urls[i].String()
- }
- return &SRVClients{Endpoints: endpoints, SRVs: srvs}, nil
-}
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
deleted file mode 100644
index 03ef91d4..00000000
--- a/vendor/github.com/coreos/etcd/version/version.go
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2015 The etcd Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package version implements etcd version parsing and contains latest version
-// information.
-package version
-
-import (
- "fmt"
- "strings"
-
- "github.com/coreos/go-semver/semver"
-)
-
-var (
- // MinClusterVersion is the min cluster version this etcd binary is compatible with.
- MinClusterVersion = "3.0.0"
- Version = "3.3.8"
- APIVersion = "unknown"
-
- // Git SHA Value will be set during build
- GitSHA = "Not provided (use ./build instead of go build)"
-)
-
-func init() {
- ver, err := semver.NewVersion(Version)
- if err == nil {
- APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
- }
-}
-
-type Versions struct {
- Server string `json:"etcdserver"`
- Cluster string `json:"etcdcluster"`
- // TODO: raft state machine version
-}
-
-// Cluster only keeps the major.minor.
-func Cluster(v string) string {
- vs := strings.Split(v, ".")
- if len(vs) <= 2 {
- return v
- }
- return fmt.Sprintf("%s.%s", vs[0], vs[1])
-}
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
deleted file mode 100644
index 110fc23e..00000000
--- a/vendor/github.com/coreos/go-semver/semver/semver.go
+++ /dev/null
@@ -1,268 +0,0 @@
-// Copyright 2013-2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Semantic Versions http://semver.org
-package semver
-
-import (
- "bytes"
- "errors"
- "fmt"
- "strconv"
- "strings"
-)
-
-type Version struct {
- Major int64
- Minor int64
- Patch int64
- PreRelease PreRelease
- Metadata string
-}
-
-type PreRelease string
-
-func splitOff(input *string, delim string) (val string) {
- parts := strings.SplitN(*input, delim, 2)
-
- if len(parts) == 2 {
- *input = parts[0]
- val = parts[1]
- }
-
- return val
-}
-
-func New(version string) *Version {
- return Must(NewVersion(version))
-}
-
-func NewVersion(version string) (*Version, error) {
- v := Version{}
-
- if err := v.Set(version); err != nil {
- return nil, err
- }
-
- return &v, nil
-}
-
-// Must is a helper for wrapping NewVersion and will panic if err is not nil.
-func Must(v *Version, err error) *Version {
- if err != nil {
- panic(err)
- }
- return v
-}
-
-// Set parses and updates v from the given version string. Implements flag.Value
-func (v *Version) Set(version string) error {
- metadata := splitOff(&version, "+")
- preRelease := PreRelease(splitOff(&version, "-"))
- dotParts := strings.SplitN(version, ".", 3)
-
- if len(dotParts) != 3 {
- return fmt.Errorf("%s is not in dotted-tri format", version)
- }
-
- parsed := make([]int64, 3, 3)
-
- for i, v := range dotParts[:3] {
- val, err := strconv.ParseInt(v, 10, 64)
- parsed[i] = val
- if err != nil {
- return err
- }
- }
-
- v.Metadata = metadata
- v.PreRelease = preRelease
- v.Major = parsed[0]
- v.Minor = parsed[1]
- v.Patch = parsed[2]
- return nil
-}
-
-func (v Version) String() string {
- var buffer bytes.Buffer
-
- fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
-
- if v.PreRelease != "" {
- fmt.Fprintf(&buffer, "-%s", v.PreRelease)
- }
-
- if v.Metadata != "" {
- fmt.Fprintf(&buffer, "+%s", v.Metadata)
- }
-
- return buffer.String()
-}
-
-func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
- var data string
- if err := unmarshal(&data); err != nil {
- return err
- }
- return v.Set(data)
-}
-
-func (v Version) MarshalJSON() ([]byte, error) {
- return []byte(`"` + v.String() + `"`), nil
-}
-
-func (v *Version) UnmarshalJSON(data []byte) error {
- l := len(data)
- if l == 0 || string(data) == `""` {
- return nil
- }
- if l < 2 || data[0] != '"' || data[l-1] != '"' {
- return errors.New("invalid semver string")
- }
- return v.Set(string(data[1 : l-1]))
-}
-
-// Compare tests if v is less than, equal to, or greater than versionB,
-// returning -1, 0, or +1 respectively.
-func (v Version) Compare(versionB Version) int {
- if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
- return cmp
- }
- return preReleaseCompare(v, versionB)
-}
-
-// Equal tests if v is equal to versionB.
-func (v Version) Equal(versionB Version) bool {
- return v.Compare(versionB) == 0
-}
-
-// LessThan tests if v is less than versionB.
-func (v Version) LessThan(versionB Version) bool {
- return v.Compare(versionB) < 0
-}
-
-// Slice converts the comparable parts of the semver into a slice of integers.
-func (v Version) Slice() []int64 {
- return []int64{v.Major, v.Minor, v.Patch}
-}
-
-func (p PreRelease) Slice() []string {
- preRelease := string(p)
- return strings.Split(preRelease, ".")
-}
-
-func preReleaseCompare(versionA Version, versionB Version) int {
- a := versionA.PreRelease
- b := versionB.PreRelease
-
- /* Handle the case where if two versions are otherwise equal it is the
- * one without a PreRelease that is greater */
- if len(a) == 0 && (len(b) > 0) {
- return 1
- } else if len(b) == 0 && (len(a) > 0) {
- return -1
- }
-
- // If there is a prerelease, check and compare each part.
- return recursivePreReleaseCompare(a.Slice(), b.Slice())
-}
-
-func recursiveCompare(versionA []int64, versionB []int64) int {
- if len(versionA) == 0 {
- return 0
- }
-
- a := versionA[0]
- b := versionB[0]
-
- if a > b {
- return 1
- } else if a < b {
- return -1
- }
-
- return recursiveCompare(versionA[1:], versionB[1:])
-}
-
-func recursivePreReleaseCompare(versionA []string, versionB []string) int {
- // A larger set of pre-release fields has a higher precedence than a smaller set,
- // if all of the preceding identifiers are equal.
- if len(versionA) == 0 {
- if len(versionB) > 0 {
- return -1
- }
- return 0
- } else if len(versionB) == 0 {
- // We're longer than versionB so return 1.
- return 1
- }
-
- a := versionA[0]
- b := versionB[0]
-
- aInt := false
- bInt := false
-
- aI, err := strconv.Atoi(versionA[0])
- if err == nil {
- aInt = true
- }
-
- bI, err := strconv.Atoi(versionB[0])
- if err == nil {
- bInt = true
- }
-
- // Handle Integer Comparison
- if aInt && bInt {
- if aI > bI {
- return 1
- } else if aI < bI {
- return -1
- }
- }
-
- // Handle String Comparison
- if a > b {
- return 1
- } else if a < b {
- return -1
- }
-
- return recursivePreReleaseCompare(versionA[1:], versionB[1:])
-}
-
-// BumpMajor increments the Major field by 1 and resets all other fields to their default values
-func (v *Version) BumpMajor() {
- v.Major += 1
- v.Minor = 0
- v.Patch = 0
- v.PreRelease = PreRelease("")
- v.Metadata = ""
-}
-
-// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
-func (v *Version) BumpMinor() {
- v.Minor += 1
- v.Patch = 0
- v.PreRelease = PreRelease("")
- v.Metadata = ""
-}
-
-// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
-func (v *Version) BumpPatch() {
- v.Patch += 1
- v.PreRelease = PreRelease("")
- v.Metadata = ""
-}
diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go
deleted file mode 100644
index e256b41a..00000000
--- a/vendor/github.com/coreos/go-semver/semver/sort.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2013-2015 CoreOS, Inc.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package semver
-
-import (
- "sort"
-)
-
-type Versions []*Version
-
-func (s Versions) Len() int {
- return len(s)
-}
-
-func (s Versions) Swap(i, j int) {
- s[i], s[j] = s[j], s[i]
-}
-
-func (s Versions) Less(i, j int) bool {
- return s[i].LessThan(*s[j])
-}
-
-// Sort sorts the given slice of Version
-func Sort(versions []*Version) {
- sort.Sort(Versions(versions))
-}
diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE
index c8364161..bc52e96f 100644
--- a/vendor/github.com/davecgh/go-spew/LICENSE
+++ b/vendor/github.com/davecgh/go-spew/LICENSE
@@ -2,7 +2,7 @@ ISC License
Copyright (c) 2012-2016 Dave Collins
-Permission to use, copy, modify, and distribute this software for any
+Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go
index 8a4a6589..79299478 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypass.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go
@@ -16,7 +16,9 @@
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build !js,!appengine,!safe,!disableunsafe
+// Go versions prior to 1.4 are disabled because they use a different layout
+// for interfaces which make the implementation of unsafeReflectValue more complex.
+// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew
@@ -34,80 +36,49 @@ const (
ptrSize = unsafe.Sizeof((*byte)(nil))
)
+type flag uintptr
+
var (
- // offsetPtr, offsetScalar, and offsetFlag are the offsets for the
- // internal reflect.Value fields. These values are valid before golang
- // commit ecccf07e7f9d which changed the format. The are also valid
- // after commit 82f48826c6c7 which changed the format again to mirror
- // the original format. Code in the init function updates these offsets
- // as necessary.
- offsetPtr = uintptr(ptrSize)
- offsetScalar = uintptr(0)
- offsetFlag = uintptr(ptrSize * 2)
-
- // flagKindWidth and flagKindShift indicate various bits that the
- // reflect package uses internally to track kind information.
- //
- // flagRO indicates whether or not the value field of a reflect.Value is
- // read-only.
- //
- // flagIndir indicates whether the value field of a reflect.Value is
- // the actual data or a pointer to the data.
- //
- // These values are valid before golang commit 90a7c3c86944 which
- // changed their positions. Code in the init function updates these
- // flags as necessary.
- flagKindWidth = uintptr(5)
- flagKindShift = uintptr(flagKindWidth - 1)
- flagRO = uintptr(1 << 0)
- flagIndir = uintptr(1 << 1)
+ // flagRO indicates whether the value field of a reflect.Value
+ // is read-only.
+ flagRO flag
+
+ // flagAddr indicates whether the address of the reflect.Value's
+ // value may be taken.
+ flagAddr flag
)
-func init() {
- // Older versions of reflect.Value stored small integers directly in the
- // ptr field (which is named val in the older versions). Versions
- // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
- // scalar for this purpose which unfortunately came before the flag
- // field, so the offset of the flag field is different for those
- // versions.
- //
- // This code constructs a new reflect.Value from a known small integer
- // and checks if the size of the reflect.Value struct indicates it has
- // the scalar field. When it does, the offsets are updated accordingly.
- vv := reflect.ValueOf(0xf00)
- if unsafe.Sizeof(vv) == (ptrSize * 4) {
- offsetScalar = ptrSize * 2
- offsetFlag = ptrSize * 3
- }
+// flagKindMask holds the bits that make up the kind
+// part of the flags field. In all the supported versions,
+// it is in the lower 5 bits.
+const flagKindMask = flag(0x1f)
- // Commit 90a7c3c86944 changed the flag positions such that the low
- // order bits are the kind. This code extracts the kind from the flags
- // field and ensures it's the correct type. When it's not, the flag
- // order has been changed to the newer format, so the flags are updated
- // accordingly.
- upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
- upfv := *(*uintptr)(upf)
- flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) {
- flagKindShift = 0
- flagRO = 1 << 5
- flagIndir = 1 << 6
-
- // Commit adf9b30e5594 modified the flags to separate the
- // flagRO flag into two bits which specifies whether or not the
- // field is embedded. This causes flagIndir to move over a bit
- // and means that flagRO is the combination of either of the
- // original flagRO bit and the new bit.
- //
- // This code detects the change by extracting what used to be
- // the indirect bit to ensure it's set. When it's not, the flag
- // order has been changed to the newer format, so the flags are
- // updated accordingly.
- if upfv&flagIndir == 0 {
- flagRO = 3 << 5
- flagIndir = 1 << 7
- }
+// Different versions of Go have used different
+// bit layouts for the flags type. This table
+// records the known combinations.
+var okFlags = []struct {
+ ro, addr flag
+}{{
+ // From Go 1.4 to 1.5
+ ro: 1 << 5,
+ addr: 1 << 7,
+}, {
+ // Up to Go tip.
+ ro: 1<<5 | 1<<6,
+ addr: 1 << 8,
+}}
+
+var flagValOffset = func() uintptr {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
}
+ return field.Offset
+}()
+
+// flagField returns a pointer to the flag field of a reflect.Value.
+func flagField(v *reflect.Value) *flag {
+ return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset))
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
@@ -119,34 +90,56 @@ func init() {
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
-func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
- indirects := 1
- vt := v.Type()
- upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
- rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
- if rvf&flagIndir != 0 {
- vt = reflect.PtrTo(v.Type())
- indirects++
- } else if offsetScalar != 0 {
- // The value is in the scalar field when it's not one of the
- // reference types.
- switch vt.Kind() {
- case reflect.Uintptr:
- case reflect.Chan:
- case reflect.Func:
- case reflect.Map:
- case reflect.Ptr:
- case reflect.UnsafePointer:
- default:
- upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
- offsetScalar)
- }
+func unsafeReflectValue(v reflect.Value) reflect.Value {
+ if !v.IsValid() || (v.CanInterface() && v.CanAddr()) {
+ return v
}
+ flagFieldPtr := flagField(&v)
+ *flagFieldPtr &^= flagRO
+ *flagFieldPtr |= flagAddr
+ return v
+}
- pv := reflect.NewAt(vt, upv)
- rv = pv
- for i := 0; i < indirects; i++ {
- rv = rv.Elem()
+// Sanity checks against future reflect package changes
+// to the type or semantics of the Value.flag field.
+func init() {
+ field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag")
+ if !ok {
+ panic("reflect.Value has no flag field")
+ }
+ if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() {
+ panic("reflect.Value flag field has changed kind")
+ }
+ type t0 int
+ var t struct {
+ A t0
+ // t0 will have flagEmbedRO set.
+ t0
+ // a will have flagStickyRO set
+ a t0
+ }
+ vA := reflect.ValueOf(t).FieldByName("A")
+ va := reflect.ValueOf(t).FieldByName("a")
+ vt0 := reflect.ValueOf(t).FieldByName("t0")
+
+ // Infer flagRO from the difference between the flags
+ // for the (otherwise identical) fields in t.
+ flagPublic := *flagField(&vA)
+ flagWithRO := *flagField(&va) | *flagField(&vt0)
+ flagRO = flagPublic ^ flagWithRO
+
+ // Infer flagAddr from the difference between a value
+ // taken from a pointer and not.
+ vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A")
+ flagNoPtr := *flagField(&vA)
+ flagPtr := *flagField(&vPtrA)
+ flagAddr = flagNoPtr ^ flagPtr
+
+ // Check that the inferred flags tally with one of the known versions.
+ for _, f := range okFlags {
+ if flagRO == f.ro && flagAddr == f.addr {
+ return
+ }
}
- return rv
+ panic("reflect.Value read-only flag has changed semantics")
}
diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
index 1fe3cf3d..205c28d6 100644
--- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
+++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go
@@ -16,7 +16,7 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
-// +build js appengine safe disableunsafe
+// +build js appengine safe disableunsafe !go1.4
package spew
diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go
index 7c519ff4..1be8ce94 100644
--- a/vendor/github.com/davecgh/go-spew/spew/common.go
+++ b/vendor/github.com/davecgh/go-spew/spew/common.go
@@ -180,7 +180,7 @@ func printComplex(w io.Writer, c complex128, floatPrecision int) {
w.Write(closeParenBytes)
}
-// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
+// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go
index df1d582a..f78d89fc 100644
--- a/vendor/github.com/davecgh/go-spew/spew/dump.go
+++ b/vendor/github.com/davecgh/go-spew/spew/dump.go
@@ -35,16 +35,16 @@ var (
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
- cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
+ cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`)
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
- cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
+ cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`)
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
- cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
+ cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`)
)
// dumpState contains information about the state of a dump operation.
@@ -143,10 +143,10 @@ func (d *dumpState) dumpPtr(v reflect.Value) {
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
- case nilFound == true:
+ case nilFound:
d.w.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
d.w.Write(circularBytes)
default:
diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go
index c49875ba..b04edb7d 100644
--- a/vendor/github.com/davecgh/go-spew/spew/format.go
+++ b/vendor/github.com/davecgh/go-spew/spew/format.go
@@ -182,10 +182,10 @@ func (f *formatState) formatPtr(v reflect.Value) {
// Display dereferenced value.
switch {
- case nilFound == true:
+ case nilFound:
f.fs.Write(nilAngleBytes)
- case cycleFound == true:
+ case cycleFound:
f.fs.Write(circularShortBytes)
default:
diff --git a/vendor/github.com/devigned/tab/.gitignore b/vendor/github.com/devigned/tab/.gitignore
new file mode 100644
index 00000000..b3efc391
--- /dev/null
+++ b/vendor/github.com/devigned/tab/.gitignore
@@ -0,0 +1,14 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+.idea
\ No newline at end of file
diff --git a/vendor/github.com/devigned/tab/LICENSE b/vendor/github.com/devigned/tab/LICENSE
new file mode 100644
index 00000000..a936fe63
--- /dev/null
+++ b/vendor/github.com/devigned/tab/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 David Justice
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/devigned/tab/Makefile b/vendor/github.com/devigned/tab/Makefile
new file mode 100644
index 00000000..94576708
--- /dev/null
+++ b/vendor/github.com/devigned/tab/Makefile
@@ -0,0 +1,86 @@
+PACKAGE = github.com/devigned/tab
+DATE ?= $(shell date +%FT%T%z)
+VERSION ?= $(shell git describe --tags --always --dirty --match=v* 2> /dev/null || \
+ cat $(CURDIR)/.version 2> /dev/null || echo v0)
+BIN = $(GOPATH)/bin
+BASE = $(CURDIR)
+PKGS = $(or $(PKG),$(shell cd $(BASE) && env GOPATH=$(GOPATH) $(GO) list ./... | grep -vE "^$(PACKAGE)/templates/"))
+TESTPKGS = $(shell env GOPATH=$(GOPATH) $(GO) list -f '{{ if or .TestGoFiles .XTestGoFiles }}{{ .ImportPath }}{{ end }}' $(PKGS))
+GO_FILES = find . -iname '*.go' -type f
+
+GO = go
+GODOC = godoc
+GOFMT = gofmt
+GOCYCLO = gocyclo
+
+V = 0
+Q = $(if $(filter 1,$V),,@)
+M = $(shell printf "\033[34;1m▶\033[0m")
+TIMEOUT = 1100
+
+.PHONY: all
+all: fmt lint vet ; $(info $(M) building library…) @ ## Build program
+ $Q cd $(BASE) && $(GO) build -tags release
+
+# Tools
+
+GOLINT = $(BIN)/golint
+$(BIN)/golint: ; $(info $(M) building golint…)
+ $Q go get github.com/golang/lint/golint
+
+# Tests
+
+TEST_TARGETS := test-default test-bench test-verbose test-race test-debug test-cover
+.PHONY: $(TEST_TARGETS) test-xml check test tests
+test-bench: ARGS=-run=__absolutelynothing__ -bench=. ## Run benchmarks
+test-verbose: ARGS=-v ## Run tests in verbose mode
+test-debug: ARGS=-v -debug ## Run tests in verbose mode with debug output
+test-race: ARGS=-race ## Run tests with race detector
+test-cover: ARGS=-cover -coverprofile=cover.out -v ## Run tests in verbose mode with coverage
+$(TEST_TARGETS): NAME=$(MAKECMDGOALS:test-%=%)
+$(TEST_TARGETS): test
+check test tests: cyclo lint vet; $(info $(M) running $(NAME:%=% )tests…) @ ## Run tests
+ $Q cd $(BASE) && $(GO) test -timeout $(TIMEOUT)s $(ARGS) $(TESTPKGS)
+
+.PHONY: vet
+vet: $(GOLINT) ; $(info $(M) running vet…) @ ## Run vet
+ $Q cd $(BASE) && $(GO) vet ./...
+
+.PHONY: lint
+lint: $(GOLINT) ; $(info $(M) running golint…) @ ## Run golint
+ $Q cd $(BASE) && ret=0 && for pkg in $(PKGS); do \
+ test -z "$$($(GOLINT) $$pkg | tee /dev/stderr)" || ret=1 ; \
+ done ; exit $$ret
+
+.PHONY: fmt
+fmt: ; $(info $(M) running gofmt…) @ ## Run gofmt on all source files
+ @ret=0 && for d in $$($(GO) list -f '{{.Dir}}' ./...); do \
+ $(GOFMT) -l -w $$d/*.go || ret=$$? ; \
+ done ; exit $$ret
+
+.PHONY: cyclo
+cyclo: ; $(info $(M) running gocyclo...) @ ## Run gocyclo on all source files
+ $Q cd $(BASE) && $(GOCYCLO) -over 19 $$($(GO_FILES))
+
+.Phony: destroy-sb
+destroy-sb: ; $(info $(M) running sb destroy...)
+ $(Q) terraform destroy -auto-approve
+
+# Dependency management
+go.sum: go.mod
+ $Q cd $(BASE) && $(GO) mod tidy
+
+# Misc
+
+.PHONY: clean
+clean: ; $(info $(M) cleaning…) @ ## Cleanup everything
+ @rm -rf test/tests.* test/coverage.*
+
+.PHONY: help
+help:
+ @grep -E '^[ a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
+ awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-15s\033[0m %s\n", $$1, $$2}'
+
+.PHONY: version
+version:
+ @echo $(VERSION)
\ No newline at end of file
diff --git a/vendor/github.com/devigned/tab/README.md b/vendor/github.com/devigned/tab/README.md
new file mode 100644
index 00000000..98681c9e
--- /dev/null
+++ b/vendor/github.com/devigned/tab/README.md
@@ -0,0 +1,49 @@
+# Trace Abstraction (tab)
+OpenTracing and OpenCensus abstraction for tracing and logging.
+
+Why? Well, sometimes you want to let the consumer choose the tracing / logging implementation.
+
+## Getting Started
+### Installing the library
+
+```
+go get -u github.com/devigned/tab/...
+```
+
+If you need to install Go, follow [the official instructions](https://golang.org/dl/)
+
+### Usage
+
+```go
+package main
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/devigned/tab"
+ _ "github.com/devigned/tab/opencensus" // use OpenCensus
+ // _ "github.com/devigned/tab/opentracing" // use OpenTracing
+)
+
+func main() {
+ // start a root span
+ ctx, span := tab.StartSpan(context.Background(), "main")
+ defer span.End() // close span when done
+
+ // pass context w/ span to child func
+ printHelloWorld(ctx)
+}
+
+func printHelloWorld(ctx context.Context) {
+ // start new span from parent
+ _, span := tab.StartSpan(ctx, "printHelloWorld")
+ defer span.End() // close span when done
+
+ // add attribute to span
+ span.AddAttributes(tab.StringAttribute("interesting", "value"))
+ fmt.Println("Hello World!")
+ tab.For(ctx).Info("after println call")
+}
+
+```
\ No newline at end of file
diff --git a/vendor/github.com/devigned/tab/go.mod b/vendor/github.com/devigned/tab/go.mod
new file mode 100644
index 00000000..3b749b87
--- /dev/null
+++ b/vendor/github.com/devigned/tab/go.mod
@@ -0,0 +1,3 @@
+module github.com/devigned/tab
+
+go 1.12
diff --git a/vendor/github.com/devigned/tab/go.sum b/vendor/github.com/devigned/tab/go.sum
new file mode 100644
index 00000000..e69de29b
diff --git a/vendor/github.com/devigned/tab/trace.go b/vendor/github.com/devigned/tab/trace.go
new file mode 100644
index 00000000..2c72e5f5
--- /dev/null
+++ b/vendor/github.com/devigned/tab/trace.go
@@ -0,0 +1,200 @@
+package tab
+
+import (
+ "context"
+)
+
+var (
+ tracer Tracer = new(NoOpTracer)
+)
+
+// Register a Tracer instance
+func Register(t Tracer) {
+ tracer = t
+}
+
+// BoolAttribute returns a bool-valued attribute.
+func BoolAttribute(key string, value bool) Attribute {
+ return Attribute{Key: key, Value: value}
+}
+
+// StringAttribute returns a string-valued attribute.
+func StringAttribute(key, value string) Attribute {
+ return Attribute{Key: key, Value: value}
+}
+
+// Int64Attribute returns an int64-valued attribute.
+func Int64Attribute(key string, value int64) Attribute {
+ return Attribute{Key: key, Value: value}
+}
+
+// StartSpan starts a new child span
+func StartSpan(ctx context.Context, operationName string, opts ...interface{}) (context.Context, Spanner) {
+ if tracer == nil {
+ return ctx, new(noOpSpanner)
+ }
+ return tracer.StartSpan(ctx, operationName, opts)
+}
+
+// StartSpanWithRemoteParent starts a new child span of the span from the given parent.
+func StartSpanWithRemoteParent(ctx context.Context, operationName string, carrier Carrier, opts ...interface{}) (context.Context, Spanner) {
+ if tracer == nil {
+ return ctx, new(noOpSpanner)
+ }
+ return tracer.StartSpanWithRemoteParent(ctx, operationName, carrier, opts)
+}
+
+// FromContext returns the Span stored in a context, or nil if there isn't one.
+func FromContext(ctx context.Context) Spanner {
+ if tracer == nil {
+ return new(noOpSpanner)
+ }
+ return tracer.FromContext(ctx)
+}
+
+// NewContext returns a new context with the given Span attached.
+func NewContext(ctx context.Context, span Spanner) context.Context {
+ if tracer == nil {
+ return ctx
+ }
+ return tracer.NewContext(ctx, span)
+}
+
+type (
+ // Attribute is a key value pair for decorating spans
+ Attribute struct {
+ Key string
+ Value interface{}
+ }
+
+ // Carrier is an abstraction over OpenTracing and OpenCensus propagation carrier
+ Carrier interface {
+ Set(key string, value interface{})
+ GetKeyValues() map[string]interface{}
+ }
+
+ // Spanner is an abstraction over OpenTracing and OpenCensus Spans
+ Spanner interface {
+ AddAttributes(attributes ...Attribute)
+ End()
+ Logger() Logger
+ Inject(carrier Carrier) error
+ InternalSpan() interface{}
+ }
+
+ // Tracer is an abstraction over OpenTracing and OpenCensus trace implementations
+ Tracer interface {
+ StartSpan(ctx context.Context, operationName string, opts ...interface{}) (context.Context, Spanner)
+ StartSpanWithRemoteParent(ctx context.Context, operationName string, carrier Carrier, opts ...interface{}) (context.Context, Spanner)
+ FromContext(ctx context.Context) Spanner
+ NewContext(parent context.Context, span Spanner) context.Context
+ }
+
+ // Logger is a generic interface for logging
+ Logger interface {
+ Info(msg string, attributes ...Attribute)
+ Error(err error, attributes ...Attribute)
+ Fatal(msg string, attributes ...Attribute)
+ Debug(msg string, attributes ...Attribute)
+ }
+
+ // SpanLogger is a Logger implementation which logs to a tracing span
+ SpanLogger struct {
+ Span Spanner
+ }
+
+ // NoOpTracer is a Tracer implementation that does nothing, thus no op
+ NoOpTracer struct{}
+
+ noOpLogger struct{}
+
+ noOpSpanner struct{}
+)
+
+// StartSpan returns the input context and a no op Spanner
+func (nt *NoOpTracer) StartSpan(ctx context.Context, operationName string, opts ...interface{}) (context.Context, Spanner) {
+ return ctx, new(noOpSpanner)
+}
+
+// StartSpanWithRemoteParent returns the input context and a no op Spanner
+func (nt *NoOpTracer) StartSpanWithRemoteParent(ctx context.Context, operationName string, carrier Carrier, opts ...interface{}) (context.Context, Spanner) {
+ return ctx, new(noOpSpanner)
+}
+
+// FromContext returns a no op Spanner without regard to the input context
+func (nt *NoOpTracer) FromContext(ctx context.Context) Spanner {
+ return new(noOpSpanner)
+}
+
+// NewContext returns the parent context
+func (nt *NoOpTracer) NewContext(parent context.Context, span Spanner) context.Context {
+ return parent
+}
+
+// AddAttributes is a nop
+func (ns *noOpSpanner) AddAttributes(attributes ...Attribute) {}
+
+// End is a nop
+func (ns *noOpSpanner) End() {}
+
+// Logger returns a nopLogger
+func (ns *noOpSpanner) Logger() Logger {
+ return new(noOpLogger)
+}
+
+// Inject is a nop
+func (ns *noOpSpanner) Inject(carrier Carrier) error {
+ return nil
+}
+
+// InternalSpan returns nil
+func (ns *noOpSpanner) InternalSpan() interface{} {
+ return nil
+}
+
+// For will return a logger for a given context
+func For(ctx context.Context) Logger {
+ if span := tracer.FromContext(ctx); span != nil {
+ return span.Logger()
+ }
+ return new(noOpLogger)
+}
+
+// Info logs an info tag with message to a span
+func (sl SpanLogger) Info(msg string, attributes ...Attribute) {
+ sl.logToSpan("info", msg, attributes...)
+}
+
+// Error logs an error tag with message to a span
+func (sl SpanLogger) Error(err error, attributes ...Attribute) {
+ attributes = append(attributes, BoolAttribute("error", true))
+ sl.logToSpan("error", err.Error(), attributes...)
+}
+
+// Fatal logs an error tag with message to a span
+func (sl SpanLogger) Fatal(msg string, attributes ...Attribute) {
+ attributes = append(attributes, BoolAttribute("error", true))
+ sl.logToSpan("fatal", msg, attributes...)
+}
+
+// Debug logs a debug tag with message to a span
+func (sl SpanLogger) Debug(msg string, attributes ...Attribute) {
+ sl.logToSpan("debug", msg, attributes...)
+}
+
+func (sl SpanLogger) logToSpan(level string, msg string, attributes ...Attribute) {
+ attrs := append(attributes, StringAttribute("event", msg), StringAttribute("level", level))
+ sl.Span.AddAttributes(attrs...)
+}
+
+// Info nops log entry
+func (sl noOpLogger) Info(msg string, attributes ...Attribute) {}
+
+// Error nops log entry
+func (sl noOpLogger) Error(err error, attributes ...Attribute) {}
+
+// Fatal nops log entry
+func (sl noOpLogger) Fatal(msg string, attributes ...Attribute) {}
+
+// Debug nops log entry
+func (sl noOpLogger) Debug(msg string, attributes ...Attribute) {}
diff --git a/vendor/github.com/dimchansky/utfbom/.travis.yml b/vendor/github.com/dimchansky/utfbom/.travis.yml
index df88e37b..3512c851 100644
--- a/vendor/github.com/dimchansky/utfbom/.travis.yml
+++ b/vendor/github.com/dimchansky/utfbom/.travis.yml
@@ -1,8 +1,8 @@
language: go
go:
- - 1.7
- - tip
+ - '1.10'
+ - '1.11'
# sudo=false makes the build run using a container
sudo: false
@@ -15,4 +15,4 @@ before_install:
script:
- gofiles=$(find ./ -name '*.go') && [ -z "$gofiles" ] || unformatted=$(goimports -l $gofiles) && [ -z "$unformatted" ] || (echo >&2 "Go files must be formatted with gofmt. Following files has problem:\n $unformatted" && false)
- golint ./... # This won't break the build, just show warnings
- - $HOME/gopath/bin/goveralls -service=travis-ci
\ No newline at end of file
+ - $HOME/gopath/bin/goveralls -service=travis-ci
diff --git a/vendor/github.com/dimchansky/utfbom/README.md b/vendor/github.com/dimchansky/utfbom/README.md
index 2f06ecac..8ece2800 100644
--- a/vendor/github.com/dimchansky/utfbom/README.md
+++ b/vendor/github.com/dimchansky/utfbom/README.md
@@ -37,22 +37,7 @@ func trySkip(byteData []byte) {
// skip BOM and detect encoding
sr, enc := utfbom.Skip(bytes.NewReader(byteData))
- var encStr string
- switch enc {
- case utfbom.UTF8:
- encStr = "UTF8"
- case utfbom.UTF16BigEndian:
- encStr = "UTF16 big endian"
- case utfbom.UTF16LittleEndian:
- encStr = "UTF16 little endian"
- case utfbom.UTF32BigEndian:
- encStr = "UTF32 big endian"
- case utfbom.UTF32LittleEndian:
- encStr = "UTF32 little endian"
- default:
- encStr = "Unknown, no byte-order mark found"
- }
- fmt.Println("Detected encoding:", encStr)
+ fmt.Printf("Detected encoding: %s\n", enc)
output, err = ioutil.ReadAll(sr)
if err != nil {
fmt.Println(err)
@@ -74,7 +59,7 @@ ReadAll with BOM detection and skipping [104 101 108 108 111]
Input: [104 101 108 108 111]
ReadAll with BOM skipping [104 101 108 108 111]
-Detected encoding: Unknown, no byte-order mark found
+Detected encoding: Unknown
ReadAll with BOM detection and skipping [104 101 108 108 111]
```
diff --git a/vendor/github.com/dimchansky/utfbom/utfbom.go b/vendor/github.com/dimchansky/utfbom/utfbom.go
index 648184a1..77a303e5 100644
--- a/vendor/github.com/dimchansky/utfbom/utfbom.go
+++ b/vendor/github.com/dimchansky/utfbom/utfbom.go
@@ -32,6 +32,24 @@ const (
UTF32LittleEndian
)
+// String returns a user-friendly string representation of the encoding. Satisfies fmt.Stringer interface.
+func (e Encoding) String() string {
+ switch e {
+ case UTF8:
+ return "UTF8"
+ case UTF16BigEndian:
+ return "UTF16BigEndian"
+ case UTF16LittleEndian:
+ return "UTF16LittleEndian"
+ case UTF32BigEndian:
+ return "UTF32BigEndian"
+ case UTF32LittleEndian:
+ return "UTF32LittleEndian"
+ default:
+ return "Unknown"
+ }
+}
+
const maxConsecutiveEmptyReads = 100
// Skip creates Reader which automatically detects BOM (Unicode Byte Order Mark) and removes it as necessary.
diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS
deleted file mode 100644
index 246e2a33..00000000
--- a/vendor/github.com/docker/docker/AUTHORS
+++ /dev/null
@@ -1,1652 +0,0 @@
-# This file lists all individuals having contributed content to the repository.
-# For how it is generated, see `hack/generate-authors.sh`.
-
-Aanand Prasad
-Aaron Davidson
-Aaron Feng
-Aaron Huslage
-Aaron Lehmann
-Aaron Welch
-Abel Muiño
-Abhijeet Kasurde
-Abhinav Ajgaonkar
-Abhishek Chanda
-Abin Shahab
-Adam Avilla
-Adam Kunk
-Adam Miller
-Adam Mills
-Adam Singer
-Adam Walz
-Aditi Rajagopal
-Aditya
-Adolfo Ochagavía
-Adria Casas
-Adrian Moisey
-Adrian Mouat
-Adrian Oprea
-Adrien Folie
-Adrien Gallouët
-Ahmed Kamal
-Ahmet Alp Balkan
-Aidan Feldman
-Aidan Hobson Sayers
-AJ Bowen
-Ajey Charantimath
-ajneu
-Akihiro Suda
-Al Tobey
-alambike
-Alan Scherger
-Alan Thompson
-Albert Callarisa
-Albert Zhang
-Aleksa Sarai
-Aleksandrs Fadins
-Alena Prokharchyk
-Alessandro Boch
-Alessio Biancalana
-Alex Chan
-Alex Coventry
-Alex Crawford
-Alex Ellis
-Alex Gaynor
-Alex Olshansky
-Alex Samorukov
-Alex Warhawk
-Alexander Artemenko
-Alexander Boyd
-Alexander Larsson
-Alexander Morozov
-Alexander Shopov
-Alexandre Beslic
-Alexandre González
-Alexandru Sfirlogea
-Alexey Guskov
-Alexey Kotlyarov
-Alexey Shamrin
-Alexis THOMAS
-Ali Dehghani
-Allen Madsen
-Allen Sun
-almoehi
-Alvaro Saurin
-Alvin Richards
-amangoel
-Amen Belayneh
-Amit Bakshi
-Amit Krishnan
-Amit Shukla
-Amy Lindburg
-Anand Patil
-AnandkumarPatel
-Anatoly Borodin
-Anchal Agrawal
-Anders Janmyr
-Andre Dublin <81dublin@gmail.com>
-Andre Granovsky
-Andrea Luzzardi
-Andrea Turli
-Andreas Köhler
-Andreas Savvides
-Andreas Tiefenthaler
-Andrei Gherzan
-Andrew C. Bodine
-Andrew Clay Shafer
-Andrew Duckworth
-Andrew France
-Andrew Gerrand
-Andrew Guenther
-Andrew Kuklewicz
-Andrew Macgregor
-Andrew Macpherson
-Andrew Martin
-Andrew Munsell
-Andrew Po
-Andrew Weiss
-Andrew Williams
-Andrews Medina
-Andrey Petrov
-Andrey Stolbovsky
-André Martins
-andy
-Andy Chambers
-andy diller
-Andy Goldstein
-Andy Kipp
-Andy Rothfusz
-Andy Smith
-Andy Wilson
-Anes Hasicic
-Anil Belur
-Anil Madhavapeddy
-Ankush Agarwal
-Anonmily
-Anthon van der Neut
-Anthony Baire
-Anthony Bishopric
-Anthony Dahanne
-Anton Löfgren
-Anton Nikitin
-Anton Polonskiy
-Anton Tiurin
-Antonio Murdaca
-Antonis Kalipetis
-Antony Messerli
-Anuj Bahuguna
-Anusha Ragunathan
-apocas
-ArikaChen
-Arnaud Lefebvre
-Arnaud Porterie
-Arthur Barr
-Arthur Gautier
-Artur Meyster
-Arun Gupta
-Asbjørn Enge
-averagehuman
-Avi Das
-Avi Miller
-Avi Vaid
-ayoshitake
-Azat Khuyiyakhmetov
-Bardia Keyoumarsi
-Barnaby Gray
-Barry Allard
-Bartłomiej Piotrowski
-Bastiaan Bakker
-bdevloed
-Ben Firshman
-Ben Golub
-Ben Hall
-Ben Sargent
-Ben Severson
-Ben Toews
-Ben Wiklund
-Benjamin Atkin
-Benoit Chesneau
-Bernerd Schaefer
-Bert Goethals
-Bharath Thiruveedula
-Bhiraj Butala
-Bilal Amarni
-Bill W
-bin liu
-Blake Geno
-Boaz Shuster
-bobby abbott
-boucher
-Bouke Haarsma
-Boyd Hemphill
-boynux
-Bradley Cicenas
-Bradley Wright
-Brandon Liu
-Brandon Philips
-Brandon Rhodes
-Brendan Dixon
-Brent Salisbury
-Brett Higgins
-Brett Kochendorfer
-Brian (bex) Exelbierd
-Brian Bland
-Brian DeHamer
-Brian Dorsey
-Brian Flad
-Brian Goff
-Brian McCallister
-Brian Olsen
-Brian Shumate
-Brian Torres-Gil
-Brian Trump
-Brice Jaglin
-Briehan Lombaard
-Bruno Bigras
-Bruno Binet
-Bruno Gazzera
-Bruno Renié
-Bryan Bess
-Bryan Boreham
-Bryan Matsuo
-Bryan Murphy
-buddhamagnet
-Burke Libbey
-Byung Kang
-Caleb Spare
-Calen Pennington
-Cameron Boehmer
-Cameron Spear
-Campbell Allen
-Candid Dauth
-Cao Weiwei
-Carl Henrik Lunde
-Carl Loa Odin
-Carl X. Su
-Carlos Alexandro Becker
-Carlos Sanchez
-Carol Fager-Higgins
-Cary
-Casey Bisson
-Cedric Davies
-Cezar Sa Espinola
-Chad Swenson
-Chance Zibolski
-Chander G
-Charles Chan
-Charles Hooper
-Charles Law
-Charles Lindsay
-Charles Merriam
-Charles Sarrazin
-Charles Smith
-Charlie Lewis
-Chase Bolt
-ChaYoung You
-Chen Chao
-Chen Hanxiao
-cheney90
-Chewey
-Chia-liang Kao