From 5c0eed3faa421bc8ec619c850eacd7302dc58d1f Mon Sep 17 00:00:00 2001 From: kubevirt-bot Date: Mon, 21 Dec 2020 08:06:59 +0000 Subject: [PATCH 01/19] Update Image Digests Signed-off-by: HCO Bump Bot --- deploy/images.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deploy/images.env b/deploy/images.env index 47be8ef0e2..1e9933d612 100755 --- a/deploy/images.env +++ b/deploy/images.env @@ -22,4 +22,4 @@ HCO_WEBHOOK_IMAGE=quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594 NMO_IMAGE=quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a CONVERSION_IMAGE=quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815 VMWARE_IMAGE=quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 -DIGEST_LIST=docker.io/kubevirt/virt-operator@sha256:4c33eaab42033c84310b62e146274f95a55699235c59846c8cec3009a91a723f,docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21,docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5,docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12,docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236,quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d,quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c,docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711,docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc,docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813,docker.io/kubevirt/cdi-cloner@sha256:c8946ef116d4d2fccf08b895faf07d9bb85f76d315e4380416261db9970306f1,docker.io/kubevirt/cdi-importer@sha256:d16dd224def1713a51278f6f164a04c7e9b38364b794f14d18c2d8d14ab04eb3,docker.io/kubevirt/cdi-uploadproxy@sha256:9e5ae41e3da07c30ac9a0326c89f2d64021abfc5d74ee6c4b750d6915f02eeb5,docker.io/kubevirt/cdi-uploadserver@sha256:3915ba0501fa0e3ee9ae178d6617d0c1ac5f34a83243f723421c42de99e705e5,quay.io/kubevirt/hostpath-provisioner-operator@sha256:a51e9b075a60600244757386f5894b314170543edb1d7f4738f4860a19602072,quay.io/kubevirt/hostpath-provisioner@sha256:3838d8e713d2e85a807a9c302501b25c248ba3f3c1602fe50480e6510de43e11,quay.io/kubevirt/vm-import-operator@sha256:74fc74dab0671ef1098e69872e47bcb6f85a40b4b18a1e23fd6d3cfc36dfee32,quay.io/kubevirt/vm-import-controller@sha256:481f4a493a66d1310734ac135e8dbaa5dc01c9d93f6e9ecc9326b81c1c08dbfe,quay.io/kubevirt/vm-import-virtv2v@sha256:97caccb965d771afefd901c71381b6c1126e4177b477d47f2ca5ca57c5b06593,quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da,quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594403627756bae3b4d6579967819fc544ced3c371d971a090a99fe0a9e9,quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a,quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815,quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 +DIGEST_LIST=docker.io/kubevirt/virt-operator@sha256:4c33eaab42033c84310b62e146274f95a55699235c59846c8cec3009a91a723f,docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21,docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5,docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12,docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236,quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d,quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c,docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711,docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc,docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813,docker.io/kubevirt/cdi-cloner@sha256:c8946ef116d4d2fccf08b895faf07d9bb85f76d315e4380416261db9970306f1,docker.io/kubevirt/cdi-importer@sha256:d16dd224def1713a51278f6f164a04c7e9b38364b794f14d18c2d8d14ab04eb3,docker.io/kubevirt/cdi-uploadproxy@sha256:9e5ae41e3da07c30ac9a0326c89f2d64021abfc5d74ee6c4b750d6915f02eeb5,docker.io/kubevirt/cdi-uploadserver@sha256:3915ba0501fa0e3ee9ae178d6617d0c1ac5f34a83243f723421c42de99e705e5,quay.io/kubevirt/hostpath-provisioner-operator@sha256:a51e9b075a60600244757386f5894b314170543edb1d7f4738f4860a19602072,quay.io/kubevirt/hostpath-provisioner@sha256:3838d8e713d2e85a807a9c302501b25c248ba3f3c1602fe50480e6510de43e11,quay.io/kubevirt/vm-import-operator@sha256:74fc74dab0671ef1098e69872e47bcb6f85a40b4b18a1e23fd6d3cfc36dfee32,quay.io/kubevirt/vm-import-controller@sha256:481f4a493a66d1310734ac135e8dbaa5dc01c9d93f6e9ecc9326b81c1c08dbfe,quay.io/kubevirt/vm-import-virtv2v@sha256:97caccb965d771afefd901c71381b6c1126e4177b477d47f2ca5ca57c5b06593,quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da,quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594403627756bae3b4d6579967819fc544ced3c371d971a090a99fe0a9e9,quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a,quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815,quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 \ No newline at end of file From e73e56743974dcda7125d1bdd3814d7b9b355660 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Wed, 25 Nov 2020 20:25:30 +0200 Subject: [PATCH 02/19] Vendor kubevirt.io/ssp-operator Signed-off-by: Zvi Cahana --- go.mod | 10 +- go.sum | 280 ++------------ .../pkg/apis/monitoring/v1/register.go | 2 + .../pkg/apis/monitoring/v1/thanos_types.go | 6 +- .../pkg/apis/monitoring/v1/types.go | 228 +++++++++-- .../monitoring/v1/zz_generated.deepcopy.go | 281 ++++++++++++-- vendor/github.com/go-openapi/swag/.travis.yml | 6 +- vendor/github.com/go-openapi/swag/convert.go | 16 +- .../go-openapi/swag/convert_types.go | 195 ++++++++-- vendor/github.com/go-openapi/swag/go.mod | 4 +- vendor/github.com/go-openapi/swag/go.sum | 4 +- vendor/github.com/go-openapi/swag/json.go | 8 +- vendor/github.com/go-openapi/swag/loading.go | 18 + .../pkg/apis/addtoscheme_kubevirt_v1.go | 10 - .../kubevirt-ssp-operator/pkg/apis/apis.go | 13 - .../pkg/apis/kubevirt/v1/doc.go | 4 - .../pkg/apis/kubevirt/v1/register.go | 17 - .../pkg/apis/kubevirt/v1/types.go | 193 ---------- .../apis/kubevirt/v1/zz_generated.deepcopy.go | 362 ------------------ .../apis/kubevirt/v1/zz_generated.openapi.go | 191 --------- vendor/github.com/onsi/gomega/.travis.yml | 3 + vendor/github.com/onsi/gomega/CHANGELOG.md | 7 + .../github.com/onsi/gomega/format/format.go | 8 +- vendor/github.com/onsi/gomega/go.mod | 2 +- vendor/github.com/onsi/gomega/go.sum | 3 + vendor/github.com/onsi/gomega/gomega_dsl.go | 2 +- .../github.com/operator-framework/api/LICENSE | 201 ++++++++++ .../v1alpha1/clusterserviceversion_types.go | 13 +- .../operators/v1alpha1/installplan_types.go | 3 + .../v1alpha1/zz_generated.deepcopy.go | 6 + vendor/golang.org/x/net/http2/transport.go | 4 +- vendor/k8s.io/utils/pointer/pointer.go | 49 ++- vendor/k8s.io/utils/trace/trace.go | 2 +- vendor/kubevirt.io/ssp-operator/LICENSE | 202 ++++++++++ .../api/v1beta1/groupversion_info.go | 36 ++ .../ssp-operator/api/v1beta1/ssp_types.go | 86 +++++ .../ssp-operator/api/v1beta1/ssp_webhook.go | 93 +++++ .../api/v1beta1/zz_generated.deepcopy.go | 176 +++++++++ vendor/modules.txt | 21 +- 39 files changed, 1586 insertions(+), 1179 deletions(-) delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/addtoscheme_kubevirt_v1.go delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/apis.go delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/doc.go delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/register.go delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/types.go delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.deepcopy.go delete mode 100644 vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.openapi.go create mode 100644 vendor/github.com/operator-framework/api/LICENSE create mode 100644 vendor/kubevirt.io/ssp-operator/LICENSE create mode 100644 vendor/kubevirt.io/ssp-operator/api/v1beta1/groupversion_info.go create mode 100644 vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_types.go create mode 100644 vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_webhook.go create mode 100644 vendor/kubevirt.io/ssp-operator/api/v1beta1/zz_generated.deepcopy.go diff --git a/go.mod b/go.mod index 2ad356afe1..2c4a22307a 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,8 @@ go 1.15 require ( github.com/blang/semver v3.5.1+incompatible - github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc + github.com/coreos/prometheus-operator v0.41.1 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 - github.com/go-kit/kit v0.10.0 // indirect github.com/go-logfmt/logfmt v0.5.0 // indirect github.com/go-logr/logr v0.2.1 github.com/go-logr/zapr v0.2.0 // indirect @@ -16,18 +15,16 @@ require ( github.com/googleapis/gnostic v0.5.1 // indirect github.com/imdario/mergo v0.3.9 github.com/kubevirt/cluster-network-addons-operator v0.44.0 - github.com/kubevirt/kubevirt-ssp-operator v1.2.1 github.com/kubevirt/vm-import-operator v0.2.5 github.com/onsi/ginkgo v1.14.2 - github.com/onsi/gomega v1.10.3 + github.com/onsi/gomega v1.10.4 github.com/openshift/api v3.9.1-0.20190924102528-32369d4db2ad+incompatible github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca - github.com/operator-framework/api v0.3.13 + github.com/operator-framework/api v0.3.20 github.com/operator-framework/operator-lib v0.2.0 github.com/prometheus/client_golang v1.7.1 github.com/prometheus/client_model v0.2.0 github.com/spf13/pflag v1.0.5 - golang.org/x/net v0.0.0-20201110031124-69a78807bb2b // indirect golang.org/x/text v0.3.4 // indirect golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5 google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31 // indirect @@ -40,6 +37,7 @@ require ( kubevirt.io/containerized-data-importer v1.28.0 kubevirt.io/controller-lifecycle-operator-sdk v0.1.1 kubevirt.io/kubevirt v0.36.0 + kubevirt.io/ssp-operator v0.0.0-20201204192040-4623f45d34ea sigs.k8s.io/controller-runtime v0.6.3 sigs.k8s.io/controller-tools v0.4.0 ) diff --git a/go.sum b/go.sum index da28ef81a5..256db54580 100644 --- a/go.sum +++ b/go.sum @@ -3,9 +3,7 @@ bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1: bou.ke/monkey v1.0.1/go.mod h1:FgHuK96Rv2Nlf+0u1OOVDpCMdsWyOFmeeketDHE7LIg= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.37.4 h1:glPeL3BQJsbF6aIIYfZizMwc5LTYz250bDMjttbBGAU= cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= -cloud.google.com/go v0.38.0 h1:ROfEUZz+Gh5pa62DJWXSaonyu3StP6EA6lPEXPI6mCo= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= @@ -34,22 +32,15 @@ github.com/Azure/azure-sdk-for-go v32.5.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest v11.2.8+incompatible h1:Q2feRPMlcfVcqz3pF87PJzkm5lZrL+x6BDtzhODzNJM= github.com/Azure/go-autorest v11.2.8+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0 h1:MRvx8gncNaXJqOoLmhNjUAKh33JJF8LyxPhomEtOsjs= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.1 h1:JB7Mqhna/7J8gZfVHjxDSTLSD6ciz2YgSMb/4qLXTtY= github.com/Azure/go-autorest/autorest v0.9.1/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0 h1:q2gDruN08/guU9vAjuPWff0+QIrpH6ediguzdAzXAUU= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.6.0 h1:UCTq22yE3RPgbU/8u4scfnnzuCW6pwQ9n+uBtV78ouo= github.com/Azure/go-autorest/autorest/adal v0.6.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= -github.com/Azure/go-autorest/autorest/date v0.1.0 h1:YGrhWfrgtFs84+h0o46rJrlmsZtyZRg470CqAXTZaGM= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= @@ -59,9 +50,7 @@ github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocm github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= -github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.0/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= @@ -75,12 +64,9 @@ github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5 github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/MakeNowJust/heredoc v0.0.0-20171113091838-e9091a26100e/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.0.0/go.mod h1:NEUY/Qq8Gdm2xgYA+NwJM6wmfdRV9xkh8h/Rld20R0U= github.com/Masterminds/sprig/v3 v3.1.0/go.mod h1:ONGMf7UfYGAbMXCZmQLy8x3lCDIPrEZE/rU8pmrbihA= @@ -88,11 +74,9 @@ github.com/Masterminds/squirrel v1.2.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZl github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.12/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ= github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= @@ -119,7 +103,6 @@ github.com/StackExchange/wmi v0.0.0-20180725035823-b12b22c5341f/go.mod h1:3eOhrU github.com/VividCortex/ewma v1.1.1/go.mod h1:2Tkkvm3sRDVXaiyucHiACn4cqf7DpdyLvmxzcbUokwA= github.com/acarl005/stripansi v0.0.0-20180116102854-5a71ef0e047d/go.mod h1:asat636LX7Bqt5lYEZ27JNDcqxfjdBQuJ/MM4CN/Lzo= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/aktau/github-release v0.7.2 h1:la7AnShr2MQPIlBEcRA9MPbI8av0YFmpFP9WM5EoqJs= github.com/aktau/github-release v0.7.2/go.mod h1:cPkP83iRnV8pAJyQlQ4vjLJoC+JE+aT5sOrYz3sTsX0= github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -134,7 +117,6 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/ant31/crd-validation v0.0.0-20180702145049-30f8a35d0ac2/go.mod h1:X0noFIik9YqfhGYBLEHg8LJKEwy7QIitLQuFMpKLcPk= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/appscode/jsonpatch v1.0.1 h1:e82Bj+rsBSnpsmjiIGlc9NiKSBpJONZkamk/F8GrCR0= github.com/appscode/jsonpatch v1.0.1/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -144,9 +126,7 @@ github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496 h1:zV3ejI06GQ59hwDQAvmK1qxOQGB3WuVTRoY0okPTAv0= github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg= github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= github.com/aws/aws-sdk-go v0.0.0-20180507225419-00862f899353/go.mod h1:ZRmQr0FajVIyZ4ZzBYKG5P3ZqPz9IHG41ZoMu1ADI3k= @@ -160,7 +140,6 @@ github.com/bazelbuild/bazel-gazelle v0.0.0-20181012220611-c728ce9f663e/go.mod h1 github.com/bazelbuild/buildtools v0.0.0-20180226164855-80c7f0d45d7e/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= @@ -198,7 +177,6 @@ github.com/certifi/gocertifi v0.0.0-20180905225744-ee1a9a0726d2/go.mod h1:GJKEex github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0 h1:yTUvW7Vhb89inJ+8irsUqiWjh8iT6sQPZiQzI6ReGkA= github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -232,11 +210,9 @@ github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtM github.com/containerd/containerd v1.3.0-beta.2.0.20190823190603-4a2f61c4f2b4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.2 h1:ForxmXkA6tPIvffbrDAcPUIB32QgXkt2XFj+F0UxetA= github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41 h1:kIFnQBO7rQ0XkMe6xEwbybYHBEaWmh/f++laI6Emt7M= github.com/containerd/continuity v0.0.0-20200107194136-26c1120b8d41/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/continuity v0.0.0-20200228182428-0f16d7a0959c/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= @@ -263,7 +239,6 @@ github.com/coreos/go-iptables v0.4.2/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-iptables v0.4.3/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181031085051-9002847aa142/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -273,16 +248,15 @@ github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfc github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/prometheus-operator v0.34.0/go.mod h1:Li6rMllG/hYIyXfMuvUwhyC+hqwJVHdsDdP21hypT1M= -github.com/coreos/prometheus-operator v0.35.0 h1:kd7mysk8mCdwquBcPLyuRoRFNJCpgezXu8yUvIYE2nc= github.com/coreos/prometheus-operator v0.35.0/go.mod h1:XHYZUStZWcwd1yk/1DjZv/fywqKIyAJ6pSwvIr+v9BQ= -github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc h1:nMbUjGuF7UzVluucix/vsy4973BNdEiT/aX6kFtskKM= github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc/go.mod h1:erio69w1R/aC14D5nfvAXSlE8FT8jt2Hnavc50Dp33A= +github.com/coreos/prometheus-operator v0.41.1 h1:MEhY9syliPlQg+VlFRUfNodUEVXRXJ2n1pFG0aBp+mI= +github.com/coreos/prometheus-operator v0.41.1/go.mod h1:LhLfEBydppl7nvfEA1jIqlF3xJ9myHCnzrU+HHDxRd4= github.com/coreos/rkt v1.30.0/go.mod h1:O634mlH6U7qk87poQifK6M2rsFNt+FyUTWNMnP1hF1U= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -311,7 +285,6 @@ github.com/denisenkom/go-mssqldb v0.0.0-20191001013358-cfbb681360f0/go.mod h1:xb github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= @@ -322,30 +295,24 @@ github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TT github.com/docker/distribution v2.6.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker-credential-helpers v0.6.1/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.0.0-20180830151422-a9cd636e3789/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c h1:ZfSZ3P3BedhKGUhzj7BQlPSU4OvT6tfOKe3DVHzOA7s= github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= @@ -359,21 +326,16 @@ github.com/elastic/gosigar v0.9.0/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyC github.com/elazarl/go-bindata-assetfs v1.0.0/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1 h1:yY9rWGoXv1U5pl4gxqlULARMQD7x0QG85lqEXTWysik= github.com/elazarl/goproxy v0.0.0-20190911111923-ecfe977594f1/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM= -github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2 h1:dWB6v3RcOy03t/bUadywsbyrQwCqZeNIEX6M1OtSZOM= github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.8.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.8.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.6+incompatible h1:tfrHha8zJ01ywiOEC1miGY8st1/igzWB8OmvPgoYX7w= github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.10.0+incompatible h1:l6Soi8WCOOVAeCo4W98iBFC6Og7/X8bpRt51oNLZ2C8= github.com/emicklei/go-restful v2.10.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.11.1+incompatible h1:CjKsv3uWcCMvySPQYKxO8XX3f9zD4FeZRsW4G0B4ffE= github.com/emicklei/go-restful v2.11.1+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful-openapi v1.2.0 h1:ohRZ1yEZERGzqaozBgxa3A0lt6c6KF14xhs3IL9ECwg= github.com/emicklei/go-restful-openapi v1.2.0/go.mod h1:cy7o3Ge8ZWZ5E90mpEY81sJZZFs2pkuYcLvfngYy1l0= github.com/emicklei/go-restful-swagger12 v0.0.0-20170926063155-7524189396c6/go.mod h1:qr0VowGBT4CS4Q8vFF8BSeKz34PuqKGxs/L0IAQA9DQ= github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= @@ -386,25 +348,19 @@ github.com/evanphx/json-patch v3.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.1.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0 h1:DkWD4oS2D8LGGgTQ6IvwJJXSL5Vp2ffcQg58nFV38Ys= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/structtag v1.1.0 h1:6j4mUV/ES2duvnAzKMFkN6/A5mCaNYPD3xfbAkLLOF8= github.com/fatih/structtag v1.1.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -422,7 +378,6 @@ github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH github.com/github-release/github-release v0.8.1/go.mod h1:CcaWgA5VoBGz94mOHYIXavqUA8kADNZxU+5/oDQxF6o= github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= @@ -439,14 +394,12 @@ github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3I github.com/go-kit/kit v0.3.0 h1:QZEva+odUF/G+yz7yjQLwUQxnSAS4S45V9+4O02yJ1Q= github.com/go-kit/kit v0.3.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0 h1:TrB8swr/68K7m9CcGut2g3UOihhbcbiMAYiuTXdEih4= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.2.1 h1:fV3MLmabKIZ383XifUjFSwcoGee0v9qgPp8wy5svibE= github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= -github.com/go-logr/zapr v0.1.1 h1:qXBXPDdNncunGs7XeEpsJt8wCjYBygluzfdLO0G5baE= github.com/go-logr/zapr v0.1.1/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk= github.com/go-logr/zapr v0.2.0 h1:v6Ji8yBW77pva6NkJKQdHLAJKrIJKRHz0RXwPqCHSR4= github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU= @@ -454,13 +407,11 @@ github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNI github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI= +github.com/go-openapi/analysis v0.19.4/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.0.0-20180322222829-3a0015ad55fa/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= @@ -473,22 +424,19 @@ github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1 github.com/go-openapi/jsonreference v0.0.0-20180322222742-3fb327e6747d/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= github.com/go-openapi/jsonreference v0.19.3 h1:5cxNfTy0UVC3X8JL5ymxzyoUZmo8iZb+jeTWn7tUa8o= github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2 h1:rf5ArTHmIJxyV5Oiks+Su0mUens1+AjpkPoWr5xFRcI= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY= +github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0 h1:sU6pp4dSV2sGlNKKyHxZzi1m1kG4WnYtWcJ+HYbygjE= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= +github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.0.0-20180415031709-bcff419492ee/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -496,31 +444,29 @@ github.com/go-openapi/spec v0.17.2/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsd github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/spec v0.19.4 h1:ixzUSnHTd6hCemgtAJgluaTSGYpLNpJY4mA2DIkdOAo= github.com/go-openapi/spec v0.19.4/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= github.com/go-openapi/spec v0.19.7 h1:0xWSeMd35y5avQAThZR2PkEuqSosoS5t6gDH4L8n11M= github.com/go-openapi/spec v0.19.7/go.mod h1:Hm2Jr4jv8G1ciIAo+frC/Ft+rR2kQDh8JHKHb3gWUSk= github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.17.2/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= github.com/go-openapi/strfmt v0.19.2/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/strfmt v0.19.3 h1:eRfyY5SkaNJCAwmmMcADjY31ow9+N7MCLW7oRkbsINA= github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= +github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.0.0-20180405201759-811b1089cde9/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.9 h1:1IxuqvBUU3S2Bi4YC7tlP9SJF1gVpCvqN0T2Qof4azE= +github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2 h1:ky5l57HjyVRrsJfd2+Ro5Z9PjGuKbsmftwyMtk8H7js= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5 h1:QhCBKRYqZR+SKo4gl1lPhPahope8/RLt6EVgY8X80w0= +github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= @@ -528,10 +474,8 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ= -github.com/gobuffalo/envy v1.7.0 h1:GlXgaiBkmrYMHco6t4j7SacKO4XUjvh5pwXh0f4uxXU= github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w= -github.com/gobuffalo/flect v0.1.5 h1:xpKq9ap8MbYfhuPCF0dBH854Gp9CxZjr/IocxELFflo= github.com/gobuffalo/flect v0.1.5/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.0/go.mod h1:W3K3X9ksuZfir8f/LrfVtWmCDQFfayuylOJ7sz/Fj80= github.com/gobuffalo/flect v0.2.1 h1:GPoRjEN0QObosV4XwuoWvSd5uSiL0N3e91/xqyY4crQ= @@ -542,7 +486,6 @@ github.com/gobuffalo/packd v0.3.0/go.mod h1:zC7QkmNkYVGKPw4tHpBQ+ml7W/3tIebgeo1b github.com/gobuffalo/packr v1.30.1/go.mod h1:ljMyFO2EcrnzsHsN99cvbq055Y9OhRrIaviy289eRuk= github.com/gobuffalo/packr/v2 v2.5.1/go.mod h1:8f9c96ITobJlPzI44jj+4tHnEKNt0xXWSVlXRN9X1Iw= github.com/gobuffalo/packr/v2 v2.7.1/go.mod h1:qYEvAazPaVxy7Y7KR0W8qYEE+RymX74kETFqjFoFlOc= -github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= @@ -552,15 +495,12 @@ github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14j github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.0 h1:G8O7TerXerS4F6sx9OV7/nRfJdnXgHZu/S/7F2SN+UE= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang-migrate/migrate/v4 v4.6.2 h1:LDDOHo/q1W5UDj6PbkxdCv7lv9yunyZHXvxuwDkGo3k= github.com/golang-migrate/migrate/v4 v4.6.2/go.mod h1:JYi6reN3+Z734VZ0akNuyOJNcrg45ZL7LDBMW3WGJL0= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= @@ -569,9 +509,7 @@ github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4er github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20180924190550-6f2cf27854a4/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20181024230925-c65c006176ff/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -580,7 +518,6 @@ github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4er github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0 h1:Rd1kQnQu0Hq3qvJppYSG0HtP+f5LPPUiDswTLiEegLg= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -588,9 +525,7 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+ github.com/golang/protobuf v1.0.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= @@ -608,33 +543,23 @@ github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAO github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/cadvisor v0.34.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w= github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-github/v32 v32.0.0 h1:q74KVb22spUq0U5HqZ9VCYqQz8YRuOtL/39ZnfwO+NM= github.com/google/go-github/v32 v32.0.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= -github.com/google/go-github/v32 v32.1.0 h1:GWkQOdXqviCPx7Q7Fj+KyPoGm4SwHRh8rheoPhd27II= github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3qBsCizh3q2WCI= -github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/goexpect v0.0.0-20190425035906-112704a48083 h1:3HihP5rfhHoq48iXpwvPVPyUJE1jQWOhiTdQ+lGbozw= github.com/google/goexpect v0.0.0-20190425035906-112704a48083/go.mod h1:qtE5aAEkt0vOSA84DBh8aJsz6riL8ONfqfULY7lBjqc= github.com/google/gofuzz v0.0.0-20150304233714-bbcb9da2d746/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= -github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -648,7 +573,6 @@ github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OI github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v0.0.0-20170306145142-6a5e28554805/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -662,7 +586,6 @@ github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1a github.com/gophercloud/gophercloud v0.0.0-20190301152420-fca40860790e/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.2.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gophercloud/gophercloud v0.4.0 h1:4iXQnHF7LKOl7ncQsRibnUmfx/unxT3rLAniYRB8kQQ= github.com/gophercloud/gophercloud v0.4.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -673,9 +596,7 @@ github.com/gorilla/handlers v1.4.0/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/ github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.1 h1:Dw4jY2nghMMRsh1ol8dv1axHkDwMQK2DHerMNJsIpJU= github.com/gorilla/mux v1.7.1/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/pat v1.0.1/go.mod h1:YeAe0gNeiNT5hoiZRI4yiOky6jVdNvfO2N6Kav/HmxY= @@ -685,9 +606,7 @@ github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoA github.com/gosuri/uitable v0.0.1/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f h1:ShTPMJQes6tubcjzGMODIVG5hlrCeImaBnZzKF2N8SM= github.com/gregjones/httpcache v0.0.0-20181110185634-c63ab54fda8f/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/gregjones/httpcache v0.0.0-20190203031600-7a902570cb17 h1:prg2TTpTOcJF1jRWL2zSU1FQNgB0STAFNux8GK82y8k= github.com/gregjones/httpcache v0.0.0-20190203031600-7a902570cb17/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -727,15 +646,14 @@ github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdv github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= @@ -749,25 +667,20 @@ github.com/heketi/rest v0.0.0-20180404230133-aa6a65207413/go.mod h1:BeS3M108VzVl github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= github.com/heketi/utils v0.0.0-20170317161834-435bc5bdfa64/go.mod h1:RYlF4ghFZPPmk2TC5REt5OFwvfb6lzxFWrTWB+qs28s= github.com/helm/helm-2to3 v0.2.0/go.mod h1:jQUVAWB0bM7zNIqKPIfHFzuFSK0kHYovJrjO+hqcvRk= -github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.2.0 h1:yPeWdRnmynF7p+lLYz0H2tthW9lqhMJrQV/U7yy4wX0= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365 h1:ECW73yc9MY7935nNYXUkK7Dz17YuSUI9yqRqYS8aBww= github.com/iancoleman/strcase v0.0.0-20190422225806-e506e3ef7365/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.0.0-20171009183408-7fe0c75c13ab/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.7/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ= github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.9 h1:UauaLniWCFHWd+Jp9oCEkTBj8VO/9DKg3PV3VCNMDIg= github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/improbable-eng/thanos v0.3.2/go.mod h1:GZewVGILKuJVPNRn7L4Zw+7X96qzFOwj63b22xYGXBE= github.com/inconshreveable/log15 v0.0.0-20200109203555-b30bc20e4fd1/go.mod h1:cOaXtrgN4ScfRrD9Bre7U1thNq5RtJ8ZoP4iXVGRj6o= -github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/influxdata/influxdb v0.0.0-20170331210902-15e594fc09f1/go.mod h1:qZna6X/4elxqT3yI9iZYdZrWWdeFOOprn86kgg4+IzY= github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA= @@ -779,21 +692,16 @@ github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mo github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= -github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -827,17 +735,13 @@ github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/knz/strtime v0.0.0-20181018220328-af2256ee352c/go.mod h1:4ZxfWkxwtc7dBeifERVVWRy9F9rTU9p0yCDgeCtlius= github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= @@ -849,8 +753,6 @@ github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1 h1:t5bmB3Y8nCaLA4aFrIpX github.com/kubernetes-csi/external-snapshotter/v2 v2.1.1/go.mod h1:dV5oB3U62KBdlf9ADWkMmjGd3USauqQtwIm2OZb5mqI= github.com/kubevirt/cluster-network-addons-operator v0.44.0 h1:zR5OTl2YcJZOrSEbsoSXFlrRnl65DzNcaoMFmGqrwMs= github.com/kubevirt/cluster-network-addons-operator v0.44.0/go.mod h1:qtYFyYlmjk8egulFv39Sy2Bji2igdM2U2SdRxZFTMss= -github.com/kubevirt/kubevirt-ssp-operator v1.2.1 h1:rwMm1eR0RUKvXLF4F9www1XinD3BsroB7cw84fsNbAQ= -github.com/kubevirt/kubevirt-ssp-operator v1.2.1/go.mod h1:r8NvAzhRVvq7l8rnT5YGFXk9usDtlJ3QB2tttSXqPCc= github.com/kubevirt/vm-import-operator v0.2.5 h1:HAwCn/LEoDh1Xsb7nX3yRZ6+Cf+Q0P348HZNG3Wfwp0= github.com/kubevirt/vm-import-operator v0.2.5/go.mod h1:7Oxg4hMtSaZ5tosh2JMlMh5eobVkXUpn74xh80Uw7gs= github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= @@ -875,29 +777,23 @@ github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H7 github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= github.com/machacekondra/fakeovirt v0.0.0-20200617055337-1afdfa789aab/go.mod h1:/X5EoPOXC7o1I7kIgywprfQVLInH1VqsqrvpbwddtrE= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180323154445-8b799c424f57/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e h1:hB2xlXdHp/pmPZq0y3QnmWAArdw9PqbmotexnWx/FU8= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.0 h1:aizVhC/NAAcKWb+5QsU1iNOZb4Yws5UO2I+aIprQITM= github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/maorfr/helm-plugin-utils v0.0.0-20181205064038-588190cb5e3b/go.mod h1:p3gwmRSFqbWw6plBpR0sKl3n3vpu8kX70gvCJKMvvCA= -github.com/markbates/inflect v1.0.4 h1:5fh1gzTFhfae06u3hzHYO9xe3l3v3nW5Pwt3naLTP5g= github.com/markbates/inflect v1.0.4/go.mod h1:1fR9+pO2KHEO9ZRtto13gDwwZaAKstQzferVeWqbgNs= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/martinlindhe/base36 v1.0.0 h1:eYsumTah144C0A8P1T/AVSUk5ZoLnhfYFM3OGQxB52A= github.com/martinlindhe/base36 v1.0.0/go.mod h1:+AtEs8xrBpCeYgSLoY/aJ6Wf37jtBuR0s35750M27+8= github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.2 h1:/bC9yWikZXAL9uJdulbSfyVNIR3n3trXl+v8+1sx8mU= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA= github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= @@ -905,33 +801,25 @@ github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6I github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8 h1:HLtExJ+uU2HOZ+wI0Tt5DtUDrx8yhUqDcp7fYERX4CE= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.12.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/maxbrunsfeld/counterfeiter v0.0.0-20181017030959-1aadac120687 h1:fJasMUaV/LYZvzK4bUOj13rNXc4fhVzU0Vu1OlcGUd4= github.com/maxbrunsfeld/counterfeiter v0.0.0-20181017030959-1aadac120687/go.mod h1:aoVsckWnsNzazwF2kmD+bzgdr4GBlbK91zsdivQJ2eU= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.1/go.mod h1:F9YacGpnZbLQMzuPI0rR6op21YvNu/RjL705LJJpM3k= -github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= -github.com/mfranczy/crd-rest-coverage v0.1.0 h1:235kOdqFQ6Io+hsI/SvHfoYDHkSNEmb7cu6TG24O93Y= github.com/mfranczy/crd-rest-coverage v0.1.0/go.mod h1:yRun7BqJIqQC+myEhfhMvbImzQ5IOE4tAlln5KDIfqQ= github.com/mhenriks/library-go v0.0.0-20200116194830-9fcc1a687a9d/go.mod h1:wnNQErbYinBcgB7TWh5oa1Trpnr3qwXpwWwBg32YLDQ= github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= @@ -948,29 +836,22 @@ github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tB github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v0.0.0-20180523094522-3864e76763d9/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed h1:FI2NIv6fpef6BQl2u3IZX/Cj20tfypRF4yd+uaHOMtI= github.com/mitchellh/go-vnc v0.0.0-20150629162542-723ed9867aed/go.mod h1:3rdaFaCv4AyBgu5ALFM0+tSuHrBh6v692nyQe3ikrq0= -github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/hashstructure v0.0.0-20170609045927-2bca23e0e452/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/hashstructure v1.0.0/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/reflectwalk v1.0.0 h1:9D+8oIskB4VJBN5SFlmc27fSlIBZaov1Wpk/IfikLNY= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309 h1:cvy4lBOYN3gKfKj8Lzz5Q9TfviP+L7koMHY7SvkyTKs= github.com/moby/moby v0.7.3-0.20190826074503-38ab9da00309/go.mod h1:fDXVQ6+S340veQPv35CzDahGBmHsiclFwfEygB/TWMc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -981,9 +862,7 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20180911141734-db72e6cae808/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= @@ -996,7 +875,6 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/nakagami/firebirdsql v0.0.0-20190310045651-3c02a58cfed8/go.mod h1:86wM1zFnC6/uDBfZGNwB65O+pR2OFi5q/YQaEUid1qA= github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= @@ -1008,6 +886,7 @@ github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/oklog/oklog v0.0.0-20170918173356-f857583a70c3/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= +github.com/oklog/run v1.1.0/go.mod h1:sVPdnTZT1zYwAJeCMu2Th4T21pA3FPOQRfWjQlk7DVU= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= @@ -1018,14 +897,11 @@ github.com/onsi/ginkgo v1.4.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.2/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= -github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1033,26 +909,20 @@ github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.3.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.2-0.20180831124310-ae19f1b56d53/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA= -github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= +github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= +github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1073,7 +943,6 @@ github.com/openshift/build-machinery-go v0.0.0-20200917070002-f171684f77ab/go.mo github.com/openshift/client-go v0.0.0-20191125132246-f6563a70e19a h1:Otk3CuCAEHiMUr4Er6b+csq4Ar6qilAs9h93tbea+qM= github.com/openshift/client-go v0.0.0-20191125132246-f6563a70e19a/go.mod h1:6rzn+JTr7+WYS2E1TExP4gByoABxMznR6y2SnUIkmxk= github.com/openshift/cluster-network-operator v0.0.0-20200324123637-74e803688dd9/go.mod h1:M9dusM6U0OOmpKjTacoXquDKPhRPu23PvFA/ws8QML0= -github.com/openshift/custom-resource-status v0.0.0-20190822192428-e62f2f3b79f3 h1:XuAys09+XqT5/FjdR23G/UtbBLII89dFe9XIi73EKIQ= github.com/openshift/custom-resource-status v0.0.0-20190822192428-e62f2f3b79f3/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca h1:F1MEnOMwSrTA0YAkO0he9ip9w0JhYzI/iCB2mXmaSPg= github.com/openshift/custom-resource-status v0.0.0-20200602122900-c002fd1547ca/go.mod h1:GDjWl0tX6FNIj82vIxeudWeSx2Ff6nDZ8uJn0ohUFvo= @@ -1089,8 +958,8 @@ github.com/operator-framework/api v0.0.0-20200120235816-80fd2f1a09c9/go.mod h1:S github.com/operator-framework/api v0.1.1/go.mod h1:yzNYR7qyJqRGOOp+bT6Z/iYSbSPNxeh3Si93Gx/3OBY= github.com/operator-framework/api v0.3.4/go.mod h1:TmRmw+8XOUaDPq6SP9gA8cIexNf/Pq8LMFY7YaKQFTs= github.com/operator-framework/api v0.3.7-0.20200528122852-759ca0d84007/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= -github.com/operator-framework/api v0.3.13 h1:Rg+6sdgP7KMOUGNP83s+5gPo7IwTH3mZ85ZFml9SPXY= -github.com/operator-framework/api v0.3.13/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= +github.com/operator-framework/api v0.3.20 h1:2Ks8GXXl/H2sV9ll2iQBUO65ABQ5VuzN3IKEZCJWljo= +github.com/operator-framework/api v0.3.20/go.mod h1:Xbje9x0SHmh0nihE21kpesB38vk3cyxnE6JdDS8Jo1Q= github.com/operator-framework/go-appr v0.0.0-20180917210448-f2aef88446f2/go.mod h1:YNzwUx1i6C4dXWcffyq3yaIb0rh/K8/OvQ4vG0SNlSw= github.com/operator-framework/operator-lib v0.2.0 h1:yrSA8LL3y43/+3AMpuDl8c1jIAw1um+EvQWzwFH1lyc= github.com/operator-framework/operator-lib v0.2.0/go.mod h1:HLw61JTIEeq0YLeVf4dwYx/zt4DmLGZUVWI1y3Lf5Hg= @@ -1098,7 +967,6 @@ github.com/operator-framework/operator-lifecycle-manager v0.0.0-20181023032605-e github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190105193533-81104ffdc4fb/go.mod h1:XMyE4n2opUK4N6L45YGQkXXi8F9fD7XDYFv/CsS6V5I= github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190125151539-1e295784b30a/go.mod h1:vq6TTFvg6ti1Bn6ACsZneZTmjTsURgDD6tQtVDbEgsU= github.com/operator-framework/operator-lifecycle-manager v0.0.0-20190725173916-b56e63a643cc/go.mod h1:N1BVX1etkIJtKcXvVNNUanaKK8d00iFlnOf4qWP8V/Y= -github.com/operator-framework/operator-lifecycle-manager v0.0.0-20191115003340-16619cd27fa5 h1:rjaihxY50c5C+kbQIK4s36R8zxByATYrgRbua4eiG6o= github.com/operator-framework/operator-lifecycle-manager v0.0.0-20191115003340-16619cd27fa5/go.mod h1:zL34MNy92LPutBH5gQK+gGhtgTUlZZX03I2G12vWHF4= github.com/operator-framework/operator-marketplace v0.0.0-20190216021216-57300a3ef3ba/go.mod h1:msZSL8pXwzQjB+hU+awVrZQw94IwJi3sNZVD3NoESIs= github.com/operator-framework/operator-marketplace v0.0.0-20190617165322-1cbd32624349/go.mod h1:msZSL8pXwzQjB+hU+awVrZQw94IwJi3sNZVD3NoESIs= @@ -1111,7 +979,6 @@ github.com/operator-framework/operator-registry v1.5.7-0.20200121213444-d8e2ec52 github.com/operator-framework/operator-registry v1.12.1/go.mod h1:rf4b/h77GUv1+geiej2KzGRQr8iBLF4dXNwr5AuGkrQ= github.com/operator-framework/operator-registry v1.12.4/go.mod h1:JChIivJVLE1wRbgIhDFzYQYT9yosa2wd6qiTyMuG5mg= github.com/operator-framework/operator-sdk v0.15.2/go.mod h1:RkC5LpluVONa08ORFIIVCYrEr855xG1/NltRL2jQ8qo= -github.com/operator-framework/operator-sdk v0.18.0 h1:YdtgXvjHu+f0hE/Nzvw9JIU3XvOZyp2Kd2cOLW486rU= github.com/operator-framework/operator-sdk v0.18.0/go.mod h1:xP/DNvnYnIoGK1bLKiD0s7aYZp2fa4AI6t1v3INaoZg= github.com/ostreedev/ostree-go v0.0.0-20190702140239-759a8c1ac913/go.mod h1:J6OG6YJVEWopen4avK3VNQSnALmmjvniMmni/YFYAwc= github.com/otiai10/copy v1.0.1/go.mod h1:8bMCJrAqOtN/d9oyh5HR7HhLQMvcGMpGdwRDYsfOCHc= @@ -1124,15 +991,12 @@ github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT9 github.com/ovirt/go-ovirt v4.3.4+incompatible/go.mod h1:r33ZGjVKCPMiI6hw791/Zx8tNKk0Gn+4VFWbOfyIvZQ= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= -github.com/pborman/uuid v0.0.0-20170612153648-e790cca94e6c/go.mod h1:VyrYX9gd7irzKovcSS6BIIEwPRkP2Wm2m9ufcdFSJ34= github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.0.1/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/petar/GoLLRB v0.0.0-20130427215148-53be0d36a84c/go.mod h1:HUpKUBZnpzkdx0kD/+Yfuft+uD3zHGtXF/XJB14TUr4= github.com/peterbourgon/diskv v0.0.0-20180312054125-0646ccaebea1/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/peterbourgon/g2s v0.0.0-20170223122336-d4e7ad98afea/go.mod h1:1VcHEd3ro4QMoHfiNl/j7Jkln9+KQuorp0PItHMJYNg= github.com/petermattis/goid v0.0.0-20170504144140-0ded85884ba5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= @@ -1141,7 +1005,6 @@ github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rK github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -1153,29 +1016,25 @@ github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prY github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20181028064349-e517b90714f7/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= github.com/pquerna/ffjson v0.0.0-20190813045741-dac163c6c0a9/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= +github.com/prometheus-community/prom-label-proxy v0.1.1-0.20200616110844-0fbfa11fa8f3/go.mod h1:XdjyZg7LCbCC5FADHtpgNp6kQ0W9beXVGfmcvndMj5Y= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= -github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1188,11 +1047,8 @@ github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8 github.com/prometheus/common v0.3.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0 h1:kRhiuYSXR3+uv2IbVbZhUxK5zVD/2pp3Gd2PpvPkpEo= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= -github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= @@ -1206,12 +1062,9 @@ github.com/prometheus/procfs v0.0.0-20190129233650-316cf8ccfec5/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.3 h1:CTwfnzjQ+8dS6MhHHu4YswVAD99sL2wjPqP+VkURmKE= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5 h1:3+auTFlqw+ZaQYJARz6ArODtkaIwtvBTx3N2NehQlL8= github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8= @@ -1231,17 +1084,14 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.4.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= -github.com/rogpeppe/go-internal v1.5.0 h1:Usqs0/lDK/NqTkvrmKSwA/3XkZAs7ZAW/eLeQ2MVBTw= github.com/rogpeppe/go-internal v1.5.0/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3 h1:lwDYefgiwhjuAuVnMVUYknoF+Yg9CBUykYGvYoPCNnQ= github.com/rubenv/sql-migrate v0.0.0-20191025130928-9355dd04f4b3/go.mod h1:WS0rl9eEliYI8DPnr3TOwz4439pay+qNgzJoVya/DmY= github.com/rubenv/sql-migrate v0.0.0-20200212082348-64f95ea68aa3/go.mod h1:rtQlpHw+eR6UrqaS3kX1VYeaCxzCVdimDS7g5Ln4pPc= github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= github.com/rubyist/circuitbreaker v2.2.1+incompatible/go.mod h1:Ycs3JgJADPuzJDwffe12k6BZT8hxVi6lFK+gWYJLN4A= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1257,7 +1107,6 @@ github.com/sclevine/spec v1.0.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24 github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= @@ -1272,9 +1121,7 @@ github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjM github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0 h1:1N5EYkVAPEywqZRJd7cwnRtCb6xJx7NH3T3WUTF980Q= github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= @@ -1287,32 +1134,24 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0 h1:yXHLWeravcrgGyFSyCgdYpXQ9dR9c/WED3pg1RhxqEU= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/stevvooe/resumable v0.0.0-20180830230917-22b14a53ba50/go.mod h1:1pdIZTAHUz+HDKDVZ++5xg/duPlhKAIzw9qy42CWYp4= github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= @@ -1322,9 +1161,7 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= @@ -1338,12 +1175,10 @@ github.com/thanhpk/randstr v1.0.4/go.mod h1:M/H2P1eNLZzlDwAzpkkkUvoyNNMbzRGhESZu github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20171017195756-830351dc03c6/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80 h1:nrZ3ySNYwJbSpD6ce9duiP+QkD3JuLCcWkdaehUS/3Y= github.com/tomnomnom/linkheader v0.0.0-20180905144013-02ca5825eb80/go.mod h1:iFyPdL66DjUD96XmzVL3ZntbzcflLnznH0fr99w5VqE= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= @@ -1369,7 +1204,6 @@ github.com/vmware/govmomi v0.20.1/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59b github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= github.com/vmware/govmomi v0.23.1/go.mod h1:Y+Wq4lst78L85Ge/F8+ORXIWiKYqaro1vhAulACy9Lc= github.com/vmware/vmw-guestinfo v0.0.0-20170707015358-25eff159a728/go.mod h1:x9oS4Wk2s2u4tS29nEaDLdzvuHdB19CvSGJjPgkZJNk= -github.com/voxelbrain/goptions v0.0.0-20180630082107-58cddc247ea2 h1:txplJASvd6b/hrE0s/Ixfpp2cuwH9IO9oZBAN9iYa4A= github.com/voxelbrain/goptions v0.0.0-20180630082107-58cddc247ea2/go.mod h1:DGCIhurYgnLz8J9ga1fMV/fbLDyUvTyrWXVWUIyJon4= github.com/wadey/gocovmerge v0.0.0-20160331181800-b5bfa59ec0ad/go.mod h1:Hy8o65+MXnS6EwGElrSRjUzQDLXreJlzYLlWiHtt8hM= github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -1377,13 +1211,10 @@ github.com/xanzy/go-gitlab v0.15.0/go.mod h1:8zdQa/ri1dfn8eS3Ir1SyfvOKlw7WBJ8DVT github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= github.com/xeipuuv/gojsonpointer v0.0.0-20190809123943-df4f5c81cb3b/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= -github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= -github.com/xeipuuv/gojsonschema v1.1.0 h1:ngVtJC9TY/lg0AA/1k48FYhBrhRoFlEmWzsehpNAaZg= github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xenolf/lego v0.0.0-20160613233155-a9d8cec0e656/go.mod h1:fwiGnfsIjG7OHPfOvgK7Y/Qo6+2Ox0iozjNTkZICKbY= @@ -1414,7 +1245,6 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mI go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -1423,16 +1253,12 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= @@ -1440,13 +1266,11 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEa go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1465,23 +1289,18 @@ golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 h1:ydJNl0ENAG67pFbB+9tfhiL2pYqLhfoaZFw/cjLhY4A= golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= -golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad h1:5E5raQxcv+6CZ11RrBYQe5WRbUIWpScjh0kvHZkZIrQ= golang.org/x/crypto v0.0.0-20190927123631-a832865fa7ad/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191028145041-f83a4685e152/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d h1:9FCpayM9Egr1baVnV1SX0H87m+XB0B8S0hAMi99X/3U= golang.org/x/crypto v0.0.0-20200128174031-69ecbb4d6d5d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5 h1:Q7tZBpemrlsc2I7IyODzhtallWRSm4Q0d09pL6XbQtU= golang.org/x/crypto v0.0.0-20200423211502-4bdfaf469ed5/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1503,9 +1322,7 @@ golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= @@ -1551,36 +1368,27 @@ golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3 h1:6KET3Sqa7fkVfD63QnAM81ZeYg5n4HwApOJkufONnHA= golang.org/x/net v0.0.0-20190930134127-c5a3c61f89f3/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271 h1:N66aaryRB3Ax92gH0v3hp1QYZ3zWWCCUR/j8Ifh45Ss= golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dDLfdtbT/IVn4keq83P0= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM= golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= -golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181105165119-ca4130e427c7/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= @@ -1590,7 +1398,6 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1628,18 +1435,15 @@ golang.org/x/sys v0.0.0-20190602015325-4c4f7f33c9ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190825160603-fb81701db80f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190927073244-c990c680b611 h1:q9u40nxWT5zRClI/uU9dHCiYGottAg6Nzz4YUQyHxdA= golang.org/x/sys v0.0.0-20190927073244-c990c680b611/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1650,30 +1454,23 @@ golang.org/x/sys v0.0.0-20191028164358-195ce5e7f934/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwglRwMkXSBzwVbkOjLLA= -golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5 h1:LfCXLvNmTYH9kEmVgqbnsWfruoXZIrh4YBgqVHtDvw0= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299 h1:DYfZAGf2WMFjMxbgTjaC+2HC7NkNAQs+6Q8b9WEB/F4= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200915050820-6d893a6b696e h1:RGS7MuoO4EeRp68J5OWuANAi5oVYtLRl+3LoD5fkMns= golang.org/x/sys v0.0.0-20200915050820-6d893a6b696e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1682,7 +1479,6 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fq golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20171227012246-e19ae1496984/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -1691,7 +1487,6 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1722,7 +1517,6 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624180213-70d37148ca0c/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db h1:9hRk1xeL9LTT3yX/941DqeBz87XgHAQuj+TbimYJuiw= golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1734,7 +1528,6 @@ golang.org/x/tools v0.0.0-20191004055002-72853e10c5a3/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191018212557-ed542cd5b28a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191030203535-5e247c9ad0a0/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1747,14 +1540,11 @@ golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b h1:AFZdJUT7jJYXQEC29hYH/WZkoV7+KhwxQGmdZ19yYoY= golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5 h1:UaoXseXAWUJUcuJ2E2oczJdLxAJXL0lOmVaBl7kuk+I= golang.org/x/tools v0.0.0-20200616195046-dc31b401abb5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898 h1:/atklqdjdhuosWIl6AIbOeHJjicWYPqR9bpxqxYG2pA= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1763,7 +1553,6 @@ gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3m gomodules.xyz/jsonpatch/v3 v3.0.1/go.mod h1:CBhndykehEwTOlEfnsfJwvkFQbSN8YZFr9M+cIHAJto= gomodules.xyz/orderedmap v0.1.0/go.mod h1:g9/TPUCm1t2gwD3j3zfV8uylyYhVdCNSi+xCEIu7yTU= gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.0.0-20190710053202-4340aa3071a0 h1:2qZ38BsejXrhuetzb8UxucqrWDZKjypFSZA82hLCpZ4= gonum.org/v1/gonum v0.0.0-20190710053202-4340aa3071a0/go.mod h1:03dgh78c4UvU1WksguQ/lvJQXbezKQGJSrwwRq5MraQ= gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -1781,7 +1570,6 @@ google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/ google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0 h1:KxkO13IPW4Lslp2bz+KHP2E3gtFlrIGNThxkZQ3g+4c= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= @@ -1797,7 +1585,6 @@ google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1805,12 +1592,9 @@ google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvx google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20191220175831-5c49e3ecc1c1 h1:PlscBL5CvF+v1mNR82G+i4kACGq2JQvKDnNq7LSS65o= google.golang.org/genproto v0.0.0-20191220175831-5c49e3ecc1c1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI= google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31 h1:Of4QP8bfRqzDROen6+s2j/p0jCPgzvQRd9nHiactfn4= google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1823,16 +1607,12 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.0 h1:AzbTB6ux+okLTzP8Ru1Xs41C303zdcfEht7MQnYJt5A= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0 h1:vb/1TCsVn3DcJlQ0Gs1yB1pKI6Do2/QNwxdKqmc/b0s= google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1842,7 +1622,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= @@ -1852,22 +1631,17 @@ gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4 gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/fsnotify/fsnotify.v1 v1.3.1/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE= gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/imdario/mergo.v0 v0.3.7/go.mod h1:9qPP6AGrlC1G2PTNXko614FwGZvorN7MiBU0Eppok+U= gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= @@ -1925,7 +1699,6 @@ k8s.io/apiextensions-apiserver v0.18.6 h1:vDlk7cyFsDyfwn2rNAO2DbmUbvXy5yT5GE3rrq k8s.io/apiextensions-apiserver v0.18.6/go.mod h1:lv89S7fUysXjLZO7ke783xOwVTm6lKizADfvUM/SS/M= k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/apiserver v0.18.6 h1:HcWwcOfhj4Yv6y2igP4ZUuovyPjVLGoZcG0Tsph4Mxo= k8s.io/apiserver v0.18.6/go.mod h1:Zt2XvTHuaZjBz6EFYzpp+X4hTmgWGy8AthNVnTdm3Wg= k8s.io/autoscaler v0.0.0-20190607113959-1b4f1855cb8e/go.mod h1:QEXezc9uKPT91dwqhSJq3GNI3B1HxFRQHiku9kmrsSA= k8s.io/cli-runtime v0.18.6/go.mod h1:+G/WTNqHgUv636e5y7rhOQ7epUbRXnwmPnhOhD6t9uM= @@ -1952,7 +1725,6 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc k8s.io/klog v0.1.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= @@ -1967,7 +1739,6 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLy k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-proxy v0.18.6/go.mod h1:r3ScLxYTuskh8l2dDfAPdrFK3QnWIMsZI/+Bq5kkmWc= k8s.io/kube-scheduler v0.18.6/go.mod h1:J+GApeR/QkU6eYonXir0i7+rcUVWzZPZbNHqjq4FpoQ= -k8s.io/kube-state-metrics v1.7.2 h1:6vdtgXrrRRMSgnyDmgua+qvgCYv954JNfxXAtDkeLVQ= k8s.io/kube-state-metrics v1.7.2/go.mod h1:U2Y6DRi07sS85rmVPmBFlmv+2peBcL8IWGjM+IjYA/E= k8s.io/kubectl v0.18.6/go.mod h1:3TLzFOrF9h4mlRPAvdNkDbs5NWspN4e0EnPnEB41CGo= k8s.io/kubelet v0.18.6/go.mod h1:5e0PJYialWMWZgsYWJqI6zVW58y+MaQvmOQwEGFF4Xc= @@ -1982,17 +1753,16 @@ k8s.io/metrics v0.18.6/go.mod h1:iAwGeabusQNO3duHDM7BBExTUB8L+iq8PM7N9EtQw6g= k8s.io/repo-infra v0.0.0-20181204233714-00fe14e3d1a3/go.mod h1:+G1xBfZDfVFsm1Tj/HNCvg4QqWx8rJ2Fxpqr1rqp/gQ= k8s.io/sample-apiserver v0.18.6/go.mod h1:NSRGjwumFclVpq8zewaqGVwiyIR7DQbLAE6wQZ0uljI= k8s.io/utils v0.0.0-20190308190857-21c4ce38f2a7/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= -k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190712204705-3dccf664f023/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191010214722-8d271d903fe4/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19 h1:7Nu2dTj82c6IaWvL7hImJzcXoTPz1MsSCH7r+0m6rfo= k8s.io/utils v0.0.0-20200619165400-6e3d28b6ed19/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= -k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc h1:GiXZzevctVRRBh56shqcqB9s9ReWMU6GTsFyE2RCFJQ= k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +k8s.io/utils v0.0.0-20200821003339-5e75c0163111 h1:AChSIFe1D4vQ5XkklbH491v1ONSmnt8fnb235DsAw1U= +k8s.io/utils v0.0.0-20200821003339-5e75c0163111/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= kubevirt.io/client-go v0.36.0 h1:AT5oxHfGkDZnXlrXnKWFZaULC65zOFT8FGlxfUsQyl0= kubevirt.io/client-go v0.36.0/go.mod h1:ZR+pS7E7iROhKVSGO4y6PCMGy/iEYzMAUEo4Ky0/I6A= kubevirt.io/containerized-data-importer v1.23.1/go.mod h1:2nNKnigVFQAwGq2v5/92+e758nKYacb03uSkHM14mX0= @@ -2008,6 +1778,8 @@ kubevirt.io/kubevirt v0.36.0 h1:yp73WPfBtRotwL7l4WIb7siI72EjNOEC00MPEAazj6I= kubevirt.io/kubevirt v0.36.0/go.mod h1:3/gOZWe65FBRRg7gwbQ0cZuRIVHZNnRmFAdOcYtZ13c= kubevirt.io/qe-tools v0.1.6 h1:S6z9CATmgV2/z9CWetij++Rhu7l/Z4ObZqerLdNMo0Y= kubevirt.io/qe-tools v0.1.6/go.mod h1:PJyH/YXC4W0AmxfheDmXWMbLNsMSboVGXKpMAwfKzVE= +kubevirt.io/ssp-operator v0.0.0-20201204192040-4623f45d34ea h1:OL0Akfn8Oud2qwOyejlTt127L2mfyWS49VQc5vGT7OM= +kubevirt.io/ssp-operator v0.0.0-20201204192040-4623f45d34ea/go.mod h1:dPFo0xpTeTTdqiyX27mYhz4DYt1TL3ZApbA+by26RLs= libvirt.org/libvirt-go v6.5.0+incompatible/go.mod h1:CPoljLoiC2aEw+62g1rZXl2oXAJaNsrq4YCSmJOELek= libvirt.org/libvirt-go-xml v6.6.0+incompatible/go.mod h1:FL+H1+hKNWDdkKQGGS4sGCZJ3pGWcjt6VbxZvPlQJkY= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= @@ -2017,15 +1789,12 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= sigs.k8s.io/controller-runtime v0.1.10/go.mod h1:HFAYoOh6XMV+jKF1UjFwrknPbowfyHEHHRdJMf2jMX8= -sigs.k8s.io/controller-runtime v0.2.0/go.mod h1:ZHqrRDZi3f6BzONcvlUxkqCKgwasGk5FZrnSv9TVZF4= sigs.k8s.io/controller-runtime v0.4.0/go.mod h1:ApC79lpY3PHW9xj/w9pj+lYkLgwAAUZwfXkME1Lajns= sigs.k8s.io/controller-runtime v0.6.0/go.mod h1:CpYf5pdNY/B352A1TFLAS2JVSlnGQ5O2cftPHndTroo= sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A= -sigs.k8s.io/controller-runtime v0.6.2 h1:jkAnfdTYBpFwlmBn3pS5HFO06SfxvnTZ1p5PeEF/zAA= sigs.k8s.io/controller-runtime v0.6.2/go.mod h1:vhcq/rlnENJ09SIRp3EveTaZ0yqH526hjf9iJdbUJ/E= sigs.k8s.io/controller-runtime v0.6.3 h1:SBbr+inLPEKhvlJtrvDcwIpm+uhDvp63Bl72xYJtoOE= sigs.k8s.io/controller-runtime v0.6.3/go.mod h1:WlZNXcM0++oyaQt4B7C2lEE5JYRs8vJUzRP4N4JpdAY= -sigs.k8s.io/controller-tools v0.2.4 h1:la1h46EzElvWefWLqfsXrnsO3lZjpkI0asTpX6h8PLA= sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA= sigs.k8s.io/controller-tools v0.3.0/go.mod h1:enhtKGfxZD1GFEoMgP8Fdbu+uKQ/cq1/WGJhdVChfvI= sigs.k8s.io/controller-tools v0.4.0 h1:9zIdrc6q9RKke8+DnVPVBVZ+cfF9L0TwM01cxNnklYo= @@ -2036,7 +1805,6 @@ sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2 h1:9r5DY45e sigs.k8s.io/structured-merge-diff v0.0.0-20190302045857-e85c7b244fd2/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U= sigs.k8s.io/testing_frameworks v0.1.2/go.mod h1:ToQrwSC3s8Xf/lADdZp3Mktcql9CG0UAmdJG9th5i0w= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= diff --git a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go index 9699e85903..e5ae14fbb2 100644 --- a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go +++ b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/register.go @@ -53,6 +53,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ServiceMonitorList{}, &PodMonitor{}, &PodMonitorList{}, + &Probe{}, + &ProbeList{}, &Alertmanager{}, &AlertmanagerList{}, &PrometheusRule{}, diff --git a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go index baf7d3be69..f649cfaa63 100644 --- a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go +++ b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/thanos_types.go @@ -58,7 +58,7 @@ type ThanosRulerList struct { // +k8s:openapi-gen=true type ThanosRulerSpec struct { // PodMetadata contains Labels and Annotations gets propagated to the thanos ruler pods. - PodMetadata *PodMeta `json:"podMetadata,omitempty"` + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Thanos container image URL. Image string `json:"image,omitempty"` // An optional list of references to secrets in the same namespace @@ -123,6 +123,10 @@ type ThanosRulerSpec struct { // and metric that is user created. The label value will always be the namespace of the object that is // being created. EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + // PrometheusRulesExcludedFromEnforce - list of Prometheus rules to be excluded from enforcing + // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. + // Make sure both ruleNamespace and ruleName are set for each pair + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` // Log level for ThanosRuler to be configured with. LogLevel string `json:"logLevel,omitempty"` // Log format for ThanosRuler to be configured with. diff --git a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go index f25a7dc6ed..5f34a33e4f 100644 --- a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go +++ b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/types.go @@ -43,26 +43,11 @@ const ( PrometheusRuleKind = "PrometheusRule" PrometheusRuleName = "prometheusrules" PrometheusRuleKindKey = "prometheusrule" -) -// PodMeta is a subset of k8s.io/apimachinery/pkg/apis/meta/v1/ObjectMeta which only -// includes fields applicable to the generated stateful set pod template of the -// custom resource types. -type PodMeta struct { - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://kubernetes.io/docs/user-guide/labels - // +optional - Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` - - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations - // +optional - Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` -} + ProbesKind = "Probe" + ProbeName = "probes" + ProbeKindKey = "probe" +) // Prometheus defines a Prometheus deployment. // +genclient @@ -99,25 +84,37 @@ type PrometheusList struct { // +k8s:openapi-gen=true type PrometheusSpec struct { // PodMetadata configures Labels and Annotations which are propagated to the prometheus pods. - PodMetadata *PodMeta `json:"podMetadata,omitempty"` - // ServiceMonitors to be selected for target discovery. + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` + // ServiceMonitors to be selected for target discovery. *Deprecated:* if + // neither this nor podMonitorSelector are specified, configuration is + // unmanaged. ServiceMonitorSelector *metav1.LabelSelector `json:"serviceMonitorSelector,omitempty"` // Namespaces to be selected for ServiceMonitor discovery. If nil, only // check own namespace. ServiceMonitorNamespaceSelector *metav1.LabelSelector `json:"serviceMonitorNamespaceSelector,omitempty"` // *Experimental* PodMonitors to be selected for target discovery. + // *Deprecated:* if neither this nor serviceMonitorSelector are specified, + // configuration is unmanaged. PodMonitorSelector *metav1.LabelSelector `json:"podMonitorSelector,omitempty"` // Namespaces to be selected for PodMonitor discovery. If nil, only // check own namespace. PodMonitorNamespaceSelector *metav1.LabelSelector `json:"podMonitorNamespaceSelector,omitempty"` + // *Experimental* Probes to be selected for target discovery. + ProbeSelector *metav1.LabelSelector `json:"probeSelector,omitempty"` + // *Experimental* Namespaces to be selected for Probe discovery. If nil, only check own namespace. + ProbeNamespaceSelector *metav1.LabelSelector `json:"probeNamespaceSelector,omitempty"` // Version of Prometheus to be deployed. Version string `json:"version,omitempty"` // Tag of Prometheus container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified + // as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Prometheus container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified + // as part of the image URL. SHA string `json:"sha,omitempty"` // When a Prometheus deployment is paused, no actions except for deletion // will be performed on the underlying objects. @@ -128,6 +125,7 @@ type PrometheusSpec struct { // configured. Image *string `json:"image,omitempty"` // Base image to use for a Prometheus deployment. + // Deprecated: use 'image' instead BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries @@ -159,6 +157,8 @@ type PrometheusSpec struct { LogFormat string `json:"logFormat,omitempty"` // Interval between consecutive scrapes. ScrapeInterval string `json:"scrapeInterval,omitempty"` + // Number of seconds to wait for target to respond before erroring. + ScrapeTimeout string `json:"scrapeTimeout,omitempty"` // Interval between consecutive evaluations. EvaluationInterval string `json:"evaluationInterval,omitempty"` // /--rules.*/ command-line arguments. @@ -321,6 +321,10 @@ type PrometheusSpec struct { // and metric that is user created. The label value will always be the namespace of the object that is // being created. EnforcedNamespaceLabel string `json:"enforcedNamespaceLabel,omitempty"` + // PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing + // of adding namespace labels. Works only if enforcedNamespaceLabel set to true. + // Make sure both ruleNamespace and ruleName are set for each pair + PrometheusRulesExcludedFromEnforce []PrometheusRuleExcludeConfig `json:"prometheusRulesExcludedFromEnforce,omitempty"` // QueryLogFile specifies the file to which PromQL queries are logged. // Note that this location must be writable, and can be persisted using an attached volume. // Alternatively, the location can be set to a stdout location such as `/dev/stdout` to log @@ -328,6 +332,25 @@ type PrometheusSpec struct { // This is only available in versions of Prometheus >= 2.16.0. // For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) QueryLogFile string `json:"queryLogFile,omitempty"` + // EnforcedSampleLimit defines global limit on number of scraped samples + // that will be accepted. This overrides any SampleLimit set per + // ServiceMonitor or/and PodMonitor. It is meant to be used by admins to + // enforce the SampleLimit to keep overall number of samples/series under + // the desired limit. + // Note that if SampleLimit is lower that value will be taken instead. + EnforcedSampleLimit *uint64 `json:"enforcedSampleLimit,omitempty"` + // AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. + // This is still experimental in Prometheus so it may change in any upcoming release. + AllowOverlappingBlocks bool `json:"allowOverlappingBlocks,omitempty"` +} + +// PrometheusRuleExcludeConfig enables users to configure excluded PrometheusRule names and their namespaces +// to be ignored while enforcing namespace label for alerts and metrics. +type PrometheusRuleExcludeConfig struct { + // RuleNamespace - namespace of excluded rule + RuleNamespace string `json:"ruleNamespace"` + // RuleNamespace - name of excluded rule + RuleName string `json:"ruleName"` } // ArbitraryFSAccessThroughSMsConfig enables users to configure, whether @@ -382,7 +405,54 @@ type StorageSpec struct { // info: https://kubernetes.io/docs/concepts/storage/volumes/#emptydir EmptyDir *v1.EmptyDirVolumeSource `json:"emptyDir,omitempty"` // A PVC spec to be used by the Prometheus StatefulSets. - VolumeClaimTemplate v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` + VolumeClaimTemplate EmbeddedPersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"` +} + +// EmbeddedPersistentVolumeClaim is an embedded version of k8s.io/api/core/v1.PersistentVolumeClaim. +// It contains TypeMeta and a reduced ObjectMeta. +type EmbeddedPersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + + // EmbeddedMetadata contains metadata relevant to an EmbeddedResource. + EmbeddedObjectMetadata `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec v1.PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Status v1.PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// EmbeddedObjectMetadata contains a subset of the fields included in k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta +// Only fields which are relevant to embedded resources are included. +type EmbeddedObjectMetadata struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` } // QuerySpec defines the query command line flags when starting Prometheus. @@ -410,12 +480,17 @@ type ThanosSpec struct { Version *string `json:"version,omitempty"` // Tag of Thanos sidecar container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified + // as part of the image URL. Tag *string `json:"tag,omitempty"` // SHA of Thanos container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified + // as part of the image URL. SHA *string `json:"sha,omitempty"` // Thanos base image if other than default. + // Deprecated: use 'image' instead BaseImage *string `json:"baseImage,omitempty"` // Resources defines the resource requirements for the Thanos sidecar. // If not provided, no requests/limits will be set @@ -436,6 +511,8 @@ type ThanosSpec struct { LogLevel string `json:"logLevel,omitempty"` // LogFormat for Thanos sidecar to be configured with. LogFormat string `json:"logFormat,omitempty"` + // MinTime for Thanos sidecar to be configured with. Option can be a constant time in RFC3339 format or time duration relative to current time, such as -1d or 2h45m. Valid duration units are ms, s, m, h, d, w, y. + MinTime string `json:"minTime,omitempty"` } // RemoteWriteSpec defines the remote_write configuration for prometheus. @@ -710,6 +787,81 @@ type PodMetricsEndpoint struct { ProxyURL *string `json:"proxyUrl,omitempty"` } +// Probe defines monitoring for a set of static targets or ingresses. +// +genclient +// +k8s:openapi-gen=true +type Probe struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + // Specification of desired Ingress selection for target discovery by Prometheus. + Spec ProbeSpec `json:"spec"` +} + +// ProbeSpec contains specification parameters for a Probe. +// +k8s:openapi-gen=true +type ProbeSpec struct { + // The job name assigned to scraped metrics by default. + JobName string `json:"jobName,omitempty"` + // Specification for the prober to use for probing targets. + // The prober.URL parameter is required. Targets cannot be probed if left empty. + ProberSpec ProberSpec `json:"prober,omitempty"` + // The module to use for probing specifying how to probe the target. + // Example module configuring in the blackbox exporter: + // https://github.com/prometheus/blackbox_exporter/blob/master/example.yml + Module string `json:"module,omitempty"` + // Targets defines a set of static and/or dynamically discovered targets to be probed using the prober. + Targets ProbeTargets `json:"targets,omitempty"` + // Interval at which targets are probed using the configured prober. + // If not specified Prometheus' global scrape interval is used. + Interval string `json:"interval,omitempty"` + // Timeout for scraping metrics from the Prometheus exporter. + ScrapeTimeout string `json:"scrapeTimeout,omitempty"` +} + +// ProbeTargets defines a set of static and dynamically discovered targets for the prober. +// +k8s:openapi-gen=true +type ProbeTargets struct { + // StaticConfig defines static targets which are considers for probing. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#static_config. + StaticConfig *ProbeTargetStaticConfig `json:"staticConfig,omitempty"` + // Ingress defines the set of dynamically discovered ingress objects which hosts are considered for probing. + Ingress *ProbeTargetIngress `json:"ingress,omitempty"` +} + +// ProbeTargetStaticConfig defines the set of static targets considered for probing. +// +k8s:openapi-gen=true +type ProbeTargetStaticConfig struct { + // Targets is a list of URLs to probe using the configured prober. + Targets []string `json:"static,omitempty"` + // Labels assigned to all metrics scraped from the targets. + Labels map[string]string `json:"labels,omitempty"` +} + +// ProbeTargetIngress defines the set of Ingress objects considered for probing. +// +k8s:openapi-gen=true +type ProbeTargetIngress struct { + // Select Ingress objects by labels. + Selector metav1.LabelSelector `json:"selector,omitempty"` + // Select Ingress objects by namespace. + NamespaceSelector NamespaceSelector `json:"namespaceSelector,omitempty"` + // RelabelConfigs to apply to samples before ingestion. + // More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config + RelabelConfigs []*RelabelConfig `json:"relabelingConfigs,omitempty"` +} + +// ProberSpec contains specification parameters for the Prober used for probing. +// +k8s:openapi-gen=true +type ProberSpec struct { + // Mandatory URL of the prober. + URL string `json:"url"` + // HTTP scheme to use for scraping. + // Defaults to `http`. + Scheme string `json:"scheme,omitempty"` + // Path to collect metrics from. + // Defaults to `/probe`. + Path string `json:"path,omitempty"` +} + // BasicAuth allow an endpoint to authenticate over basic authentication // More info: https://prometheus.io/docs/operating/configuration/#endpoints // +k8s:openapi-gen=true @@ -834,6 +986,17 @@ type PodMonitorList struct { Items []*PodMonitor `json:"items"` } +// ProbeList is a list of Probes. +// +k8s:openapi-gen=true +type ProbeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata + metav1.ListMeta `json:"metadata,omitempty"` + // List of Probes + Items []*Probe `json:"items"` +} + // PrometheusRuleList is a list of PrometheusRules. // +k8s:openapi-gen=true type PrometheusRuleList struct { @@ -912,7 +1075,7 @@ type Alertmanager struct { // +k8s:openapi-gen=true type AlertmanagerSpec struct { // PodMetadata configures Labels and Annotations which are propagated to the alertmanager pods. - PodMetadata *PodMeta `json:"podMetadata,omitempty"` + PodMetadata *EmbeddedObjectMetadata `json:"podMetadata,omitempty"` // Image if specified has precedence over baseImage, tag and sha // combinations. Specifying the version is still necessary to ensure the // Prometheus Operator knows what version of Alertmanager is being @@ -922,12 +1085,17 @@ type AlertmanagerSpec struct { Version string `json:"version,omitempty"` // Tag of Alertmanager container image to be deployed. Defaults to the value of `version`. // Version is ignored if Tag is set. + // Deprecated: use 'image' instead. The image tag can be specified + // as part of the image URL. Tag string `json:"tag,omitempty"` // SHA of Alertmanager container image to be deployed. Defaults to the value of `version`. // Similar to a tag, but the SHA explicitly deploys an immutable container image. // Version and Tag are ignored if SHA is set. + // Deprecated: use 'image' instead. The image digest can be specified + // as part of the image URL. SHA string `json:"sha,omitempty"` // Base image that is used to deploy pods, without tag. + // Deprecated: use 'image' instead BaseImage string `json:"baseImage,omitempty"` // An optional list of references to secrets in the same namespace // to use for pulling prometheus and alertmanager images from registries @@ -1012,6 +1180,10 @@ type AlertmanagerSpec struct { PriorityClassName string `json:"priorityClassName,omitempty"` // AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. AdditionalPeers []string `json:"additionalPeers,omitempty"` + // ClusterAdvertiseAddress is the explicit address to advertise in cluster. + // Needs to be provided for non RFC1918 [1] (public) addresses. + // [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ClusterAdvertiseAddress string `json:"clusterAdvertiseAddress,omitempty"` // Port name used for the pods and governing service. // This defaults to web PortName string `json:"portName,omitempty"` @@ -1123,6 +1295,16 @@ func (l *PodMonitorList) DeepCopyObject() runtime.Object { return l.DeepCopy() } +// DeepCopyObject implements the runtime.Object interface. +func (l *Probe) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + +// DeepCopyObject implements the runtime.Object interface. +func (l *ProbeList) DeepCopyObject() runtime.Object { + return l.DeepCopy() +} + // DeepCopyObject implements the runtime.Object interface. func (f *PrometheusRule) DeepCopyObject() runtime.Object { return f.DeepCopy() diff --git a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go index 65ad1226f2..83f4d927f9 100644 --- a/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go +++ b/vendor/github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1/zz_generated.deepcopy.go @@ -144,7 +144,7 @@ func (in *AlertmanagerSpec) DeepCopyInto(out *AlertmanagerSpec) { *out = *in if in.PodMetadata != nil { in, out := &in.PodMetadata, &out.PodMetadata - *out = new(PodMeta) + *out = new(EmbeddedObjectMetadata) (*in).DeepCopyInto(*out) } if in.Image != nil { @@ -294,6 +294,54 @@ func (in *BasicAuth) DeepCopy() *BasicAuth { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) { + *out = *in + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedObjectMetadata. +func (in *EmbeddedObjectMetadata) DeepCopy() *EmbeddedObjectMetadata { + if in == nil { + return nil + } + out := new(EmbeddedObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmbeddedPersistentVolumeClaim) DeepCopyInto(out *EmbeddedPersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.EmbeddedObjectMetadata.DeepCopyInto(&out.EmbeddedObjectMetadata) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmbeddedPersistentVolumeClaim. +func (in *EmbeddedPersistentVolumeClaim) DeepCopy() *EmbeddedPersistentVolumeClaim { + if in == nil { + return nil + } + out := new(EmbeddedPersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Endpoint) DeepCopyInto(out *Endpoint) { *out = *in @@ -392,35 +440,6 @@ func (in *NamespaceSelector) DeepCopy() *NamespaceSelector { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodMeta) DeepCopyInto(out *PodMeta) { - *out = *in - if in.Labels != nil { - in, out := &in.Labels, &out.Labels - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Annotations != nil { - in, out := &in.Annotations, &out.Annotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMeta. -func (in *PodMeta) DeepCopy() *PodMeta { - if in == nil { - return nil - } - out := new(PodMeta) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodMetricsEndpoint) DeepCopyInto(out *PodMetricsEndpoint) { *out = *in @@ -563,6 +582,164 @@ func (in *PodMonitorSpec) DeepCopy() *PodMonitorSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeList) DeepCopyInto(out *ProbeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*Probe, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeList. +func (in *ProbeList) DeepCopy() *ProbeList { + if in == nil { + return nil + } + out := new(ProbeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeSpec) DeepCopyInto(out *ProbeSpec) { + *out = *in + out.ProberSpec = in.ProberSpec + in.Targets.DeepCopyInto(&out.Targets) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeSpec. +func (in *ProbeSpec) DeepCopy() *ProbeSpec { + if in == nil { + return nil + } + out := new(ProbeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetIngress) DeepCopyInto(out *ProbeTargetIngress) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + in.NamespaceSelector.DeepCopyInto(&out.NamespaceSelector) + if in.RelabelConfigs != nil { + in, out := &in.RelabelConfigs, &out.RelabelConfigs + *out = make([]*RelabelConfig, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(RelabelConfig) + (*in).DeepCopyInto(*out) + } + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetIngress. +func (in *ProbeTargetIngress) DeepCopy() *ProbeTargetIngress { + if in == nil { + return nil + } + out := new(ProbeTargetIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargetStaticConfig) DeepCopyInto(out *ProbeTargetStaticConfig) { + *out = *in + if in.Targets != nil { + in, out := &in.Targets, &out.Targets + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargetStaticConfig. +func (in *ProbeTargetStaticConfig) DeepCopy() *ProbeTargetStaticConfig { + if in == nil { + return nil + } + out := new(ProbeTargetStaticConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProbeTargets) DeepCopyInto(out *ProbeTargets) { + *out = *in + if in.StaticConfig != nil { + in, out := &in.StaticConfig, &out.StaticConfig + *out = new(ProbeTargetStaticConfig) + (*in).DeepCopyInto(*out) + } + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = new(ProbeTargetIngress) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProbeTargets. +func (in *ProbeTargets) DeepCopy() *ProbeTargets { + if in == nil { + return nil + } + out := new(ProbeTargets) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProberSpec) DeepCopyInto(out *ProberSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProberSpec. +func (in *ProberSpec) DeepCopy() *ProberSpec { + if in == nil { + return nil + } + out := new(ProberSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Prometheus) DeepCopyInto(out *Prometheus) { *out = *in @@ -632,6 +809,21 @@ func (in *PrometheusRule) DeepCopy() *PrometheusRule { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PrometheusRuleExcludeConfig) DeepCopyInto(out *PrometheusRuleExcludeConfig) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusRuleExcludeConfig. +func (in *PrometheusRuleExcludeConfig) DeepCopy() *PrometheusRuleExcludeConfig { + if in == nil { + return nil + } + out := new(PrometheusRuleExcludeConfig) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PrometheusRuleList) DeepCopyInto(out *PrometheusRuleList) { *out = *in @@ -687,7 +879,7 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { *out = *in if in.PodMetadata != nil { in, out := &in.PodMetadata, &out.PodMetadata - *out = new(PodMeta) + *out = new(EmbeddedObjectMetadata) (*in).DeepCopyInto(*out) } if in.ServiceMonitorSelector != nil { @@ -710,6 +902,16 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.ProbeSelector != nil { + in, out := &in.ProbeSelector, &out.ProbeSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ProbeNamespaceSelector != nil { + in, out := &in.ProbeNamespaceSelector, &out.ProbeNamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } if in.Image != nil { in, out := &in.Image, &out.Image *out = new(string) @@ -876,6 +1078,16 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) { (*in).DeepCopyInto(*out) } out.ArbitraryFSAccessThroughSMs = in.ArbitraryFSAccessThroughSMs + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } + if in.EnforcedSampleLimit != nil { + in, out := &in.EnforcedSampleLimit, &out.EnforcedSampleLimit + *out = new(uint64) + **out = **in + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PrometheusSpec. @@ -1359,7 +1571,7 @@ func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { *out = *in if in.PodMetadata != nil { in, out := &in.PodMetadata, &out.PodMetadata - *out = new(PodMeta) + *out = new(EmbeddedObjectMetadata) (*in).DeepCopyInto(*out) } if in.ImagePullSecrets != nil { @@ -1444,6 +1656,11 @@ func (in *ThanosRulerSpec) DeepCopyInto(out *ThanosRulerSpec) { *out = new(metav1.LabelSelector) (*in).DeepCopyInto(*out) } + if in.PrometheusRulesExcludedFromEnforce != nil { + in, out := &in.PrometheusRulesExcludedFromEnforce, &out.PrometheusRulesExcludedFromEnforce + *out = make([]PrometheusRuleExcludeConfig, len(*in)) + copy(*out, *in) + } if in.Containers != nil { in, out := &in.Containers, &out.Containers *out = make([]corev1.Container, len(*in)) diff --git a/vendor/github.com/go-openapi/swag/.travis.yml b/vendor/github.com/go-openapi/swag/.travis.yml index aa26d8763a..f1a3f80b35 100644 --- a/vendor/github.com/go-openapi/swag/.travis.yml +++ b/vendor/github.com/go-openapi/swag/.travis.yml @@ -1,12 +1,10 @@ after_success: - bash <(curl -s https://codecov.io/bash) go: -- 1.11.x -- 1.12.x +- 1.13.x +- 1.14.x install: - GO111MODULE=off go get -u gotest.tools/gotestsum -env: -- GO111MODULE=on language: go notifications: slack: diff --git a/vendor/github.com/go-openapi/swag/convert.go b/vendor/github.com/go-openapi/swag/convert.go index 7da35c316e..fc085aeb8e 100644 --- a/vendor/github.com/go-openapi/swag/convert.go +++ b/vendor/github.com/go-openapi/swag/convert.go @@ -88,7 +88,7 @@ func ConvertFloat64(str string) (float64, error) { return strconv.ParseFloat(str, 64) } -// ConvertInt8 turn a string into int8 boolean +// ConvertInt8 turn a string into an int8 func ConvertInt8(str string) (int8, error) { i, err := strconv.ParseInt(str, 10, 8) if err != nil { @@ -97,7 +97,7 @@ func ConvertInt8(str string) (int8, error) { return int8(i), nil } -// ConvertInt16 turn a string into a int16 +// ConvertInt16 turn a string into an int16 func ConvertInt16(str string) (int16, error) { i, err := strconv.ParseInt(str, 10, 16) if err != nil { @@ -106,7 +106,7 @@ func ConvertInt16(str string) (int16, error) { return int16(i), nil } -// ConvertInt32 turn a string into a int32 +// ConvertInt32 turn a string into an int32 func ConvertInt32(str string) (int32, error) { i, err := strconv.ParseInt(str, 10, 32) if err != nil { @@ -115,12 +115,12 @@ func ConvertInt32(str string) (int32, error) { return int32(i), nil } -// ConvertInt64 turn a string into a int64 +// ConvertInt64 turn a string into an int64 func ConvertInt64(str string) (int64, error) { return strconv.ParseInt(str, 10, 64) } -// ConvertUint8 turn a string into a uint8 +// ConvertUint8 turn a string into an uint8 func ConvertUint8(str string) (uint8, error) { i, err := strconv.ParseUint(str, 10, 8) if err != nil { @@ -129,7 +129,7 @@ func ConvertUint8(str string) (uint8, error) { return uint8(i), nil } -// ConvertUint16 turn a string into a uint16 +// ConvertUint16 turn a string into an uint16 func ConvertUint16(str string) (uint16, error) { i, err := strconv.ParseUint(str, 10, 16) if err != nil { @@ -138,7 +138,7 @@ func ConvertUint16(str string) (uint16, error) { return uint16(i), nil } -// ConvertUint32 turn a string into a uint32 +// ConvertUint32 turn a string into an uint32 func ConvertUint32(str string) (uint32, error) { i, err := strconv.ParseUint(str, 10, 32) if err != nil { @@ -147,7 +147,7 @@ func ConvertUint32(str string) (uint32, error) { return uint32(i), nil } -// ConvertUint64 turn a string into a uint64 +// ConvertUint64 turn a string into an uint64 func ConvertUint64(str string) (uint64, error) { return strconv.ParseUint(str, 10, 64) } diff --git a/vendor/github.com/go-openapi/swag/convert_types.go b/vendor/github.com/go-openapi/swag/convert_types.go index c95e4e78bd..c49cc473a8 100644 --- a/vendor/github.com/go-openapi/swag/convert_types.go +++ b/vendor/github.com/go-openapi/swag/convert_types.go @@ -181,12 +181,12 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } -// Int32 returns a pointer to of the int64 value passed in. +// Int32 returns a pointer to of the int32 value passed in. func Int32(v int32) *int32 { return &v } -// Int32Value returns the value of the int64 pointer passed in or +// Int32Value returns the value of the int32 pointer passed in or // 0 if the pointer is nil. func Int32Value(v *int32) int32 { if v != nil { @@ -195,7 +195,7 @@ func Int32Value(v *int32) int32 { return 0 } -// Int32Slice converts a slice of int64 values into a slice of +// Int32Slice converts a slice of int32 values into a slice of // int32 pointers func Int32Slice(src []int32) []*int32 { dst := make([]*int32, len(src)) @@ -299,13 +299,80 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } -// Uint returns a pouinter to of the uint value passed in. +// Uint16 returns a pointer to of the uint16 value passed in. +func Uint16(v uint16) *uint16 { + return &v +} + +// Uint16Value returns the value of the uint16 pointer passed in or +// 0 if the pointer is nil. +func Uint16Value(v *uint16) uint16 { + if v != nil { + return *v + } + + return 0 +} + +// Uint16Slice converts a slice of uint16 values into a slice of +// uint16 pointers +func Uint16Slice(src []uint16) []*uint16 { + dst := make([]*uint16, len(src)) + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Uint16ValueSlice converts a slice of uint16 pointers into a slice of +// uint16 values +func Uint16ValueSlice(src []*uint16) []uint16 { + dst := make([]uint16, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Uint16Map converts a string map of uint16 values into a string +// map of uint16 pointers +func Uint16Map(src map[string]uint16) map[string]*uint16 { + dst := make(map[string]*uint16) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Uint16ValueMap converts a string map of uint16 pointers into a string +// map of uint16 values +func Uint16ValueMap(src map[string]*uint16) map[string]uint16 { + dst := make(map[string]uint16) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + +// Uint returns a pointer to of the uint value passed in. func Uint(v uint) *uint { return &v } -// UintValue returns the value of the uint pouinter passed in or -// 0 if the pouinter is nil. +// UintValue returns the value of the uint pointer passed in or +// 0 if the pointer is nil. func UintValue(v *uint) uint { if v != nil { return *v @@ -313,8 +380,8 @@ func UintValue(v *uint) uint { return 0 } -// UintSlice converts a slice of uint values uinto a slice of -// uint pouinters +// UintSlice converts a slice of uint values into a slice of +// uint pointers func UintSlice(src []uint) []*uint { dst := make([]*uint, len(src)) for i := 0; i < len(src); i++ { @@ -323,7 +390,7 @@ func UintSlice(src []uint) []*uint { return dst } -// UintValueSlice converts a slice of uint pouinters uinto a slice of +// UintValueSlice converts a slice of uint pointers into a slice of // uint values func UintValueSlice(src []*uint) []uint { dst := make([]uint, len(src)) @@ -335,8 +402,8 @@ func UintValueSlice(src []*uint) []uint { return dst } -// UintMap converts a string map of uint values uinto a string -// map of uint pouinters +// UintMap converts a string map of uint values into a string +// map of uint pointers func UintMap(src map[string]uint) map[string]*uint { dst := make(map[string]*uint) for k, val := range src { @@ -346,7 +413,7 @@ func UintMap(src map[string]uint) map[string]*uint { return dst } -// UintValueMap converts a string map of uint pouinters uinto a string +// UintValueMap converts a string map of uint pointers into a string // map of uint values func UintValueMap(src map[string]*uint) map[string]uint { dst := make(map[string]uint) @@ -358,13 +425,13 @@ func UintValueMap(src map[string]*uint) map[string]uint { return dst } -// Uint32 returns a pouinter to of the uint64 value passed in. +// Uint32 returns a pointer to of the uint32 value passed in. func Uint32(v uint32) *uint32 { return &v } -// Uint32Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. +// Uint32Value returns the value of the uint32 pointer passed in or +// 0 if the pointer is nil. func Uint32Value(v *uint32) uint32 { if v != nil { return *v @@ -372,8 +439,8 @@ func Uint32Value(v *uint32) uint32 { return 0 } -// Uint32Slice converts a slice of uint64 values uinto a slice of -// uint32 pouinters +// Uint32Slice converts a slice of uint32 values into a slice of +// uint32 pointers func Uint32Slice(src []uint32) []*uint32 { dst := make([]*uint32, len(src)) for i := 0; i < len(src); i++ { @@ -382,7 +449,7 @@ func Uint32Slice(src []uint32) []*uint32 { return dst } -// Uint32ValueSlice converts a slice of uint32 pouinters uinto a slice of +// Uint32ValueSlice converts a slice of uint32 pointers into a slice of // uint32 values func Uint32ValueSlice(src []*uint32) []uint32 { dst := make([]uint32, len(src)) @@ -394,8 +461,8 @@ func Uint32ValueSlice(src []*uint32) []uint32 { return dst } -// Uint32Map converts a string map of uint32 values uinto a string -// map of uint32 pouinters +// Uint32Map converts a string map of uint32 values into a string +// map of uint32 pointers func Uint32Map(src map[string]uint32) map[string]*uint32 { dst := make(map[string]*uint32) for k, val := range src { @@ -405,7 +472,7 @@ func Uint32Map(src map[string]uint32) map[string]*uint32 { return dst } -// Uint32ValueMap converts a string map of uint32 pouinters uinto a string +// Uint32ValueMap converts a string map of uint32 pointers into a string // map of uint32 values func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { dst := make(map[string]uint32) @@ -417,13 +484,13 @@ func Uint32ValueMap(src map[string]*uint32) map[string]uint32 { return dst } -// Uint64 returns a pouinter to of the uint64 value passed in. +// Uint64 returns a pointer to of the uint64 value passed in. func Uint64(v uint64) *uint64 { return &v } -// Uint64Value returns the value of the uint64 pouinter passed in or -// 0 if the pouinter is nil. +// Uint64Value returns the value of the uint64 pointer passed in or +// 0 if the pointer is nil. func Uint64Value(v *uint64) uint64 { if v != nil { return *v @@ -431,8 +498,8 @@ func Uint64Value(v *uint64) uint64 { return 0 } -// Uint64Slice converts a slice of uint64 values uinto a slice of -// uint64 pouinters +// Uint64Slice converts a slice of uint64 values into a slice of +// uint64 pointers func Uint64Slice(src []uint64) []*uint64 { dst := make([]*uint64, len(src)) for i := 0; i < len(src); i++ { @@ -441,7 +508,7 @@ func Uint64Slice(src []uint64) []*uint64 { return dst } -// Uint64ValueSlice converts a slice of uint64 pouinters uinto a slice of +// Uint64ValueSlice converts a slice of uint64 pointers into a slice of // uint64 values func Uint64ValueSlice(src []*uint64) []uint64 { dst := make([]uint64, len(src)) @@ -453,8 +520,8 @@ func Uint64ValueSlice(src []*uint64) []uint64 { return dst } -// Uint64Map converts a string map of uint64 values uinto a string -// map of uint64 pouinters +// Uint64Map converts a string map of uint64 values into a string +// map of uint64 pointers func Uint64Map(src map[string]uint64) map[string]*uint64 { dst := make(map[string]*uint64) for k, val := range src { @@ -464,7 +531,7 @@ func Uint64Map(src map[string]uint64) map[string]*uint64 { return dst } -// Uint64ValueMap converts a string map of uint64 pouinters uinto a string +// Uint64ValueMap converts a string map of uint64 pointers into a string // map of uint64 values func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { dst := make(map[string]uint64) @@ -476,6 +543,74 @@ func Uint64ValueMap(src map[string]*uint64) map[string]uint64 { return dst } +// Float32 returns a pointer to of the float32 value passed in. +func Float32(v float32) *float32 { + return &v +} + +// Float32Value returns the value of the float32 pointer passed in or +// 0 if the pointer is nil. +func Float32Value(v *float32) float32 { + if v != nil { + return *v + } + + return 0 +} + +// Float32Slice converts a slice of float32 values into a slice of +// float32 pointers +func Float32Slice(src []float32) []*float32 { + dst := make([]*float32, len(src)) + + for i := 0; i < len(src); i++ { + dst[i] = &(src[i]) + } + + return dst +} + +// Float32ValueSlice converts a slice of float32 pointers into a slice of +// float32 values +func Float32ValueSlice(src []*float32) []float32 { + dst := make([]float32, len(src)) + + for i := 0; i < len(src); i++ { + if src[i] != nil { + dst[i] = *(src[i]) + } + } + + return dst +} + +// Float32Map converts a string map of float32 values into a string +// map of float32 pointers +func Float32Map(src map[string]float32) map[string]*float32 { + dst := make(map[string]*float32) + + for k, val := range src { + v := val + dst[k] = &v + } + + return dst +} + +// Float32ValueMap converts a string map of float32 pointers into a string +// map of float32 values +func Float32ValueMap(src map[string]*float32) map[string]float32 { + dst := make(map[string]float32) + + for k, val := range src { + if val != nil { + dst[k] = *val + } + } + + return dst +} + // Float64 returns a pointer to of the float64 value passed in. func Float64(v float64) *float64 { return &v diff --git a/vendor/github.com/go-openapi/swag/go.mod b/vendor/github.com/go-openapi/swag/go.mod index 15bbb08222..4aef463e42 100644 --- a/vendor/github.com/go-openapi/swag/go.mod +++ b/vendor/github.com/go-openapi/swag/go.mod @@ -6,9 +6,11 @@ require ( github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 github.com/stretchr/testify v1.3.0 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 ) replace github.com/golang/lint => golang.org/x/lint v0.0.0-20190409202823-959b441ac422 replace sourcegraph.com/sourcegraph/go-diff => github.com/sourcegraph/go-diff v0.5.1 + +go 1.13 diff --git a/vendor/github.com/go-openapi/swag/go.sum b/vendor/github.com/go-openapi/swag/go.sum index 33469f54ac..e8a80bacf0 100644 --- a/vendor/github.com/go-openapi/swag/go.sum +++ b/vendor/github.com/go-openapi/swag/go.sum @@ -16,5 +16,5 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index edf93d84c6..7e9902ca31 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -51,7 +51,7 @@ type ejUnmarshaler interface { UnmarshalEasyJSON(w *jlexer.Lexer) } -// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaller +// WriteJSON writes json data, prefers finding an appropriate interface to short-circuit the marshaler // so it takes the fastest option available. func WriteJSON(data interface{}) ([]byte, error) { if d, ok := data.(ejMarshaler); ok { @@ -65,8 +65,8 @@ func WriteJSON(data interface{}) ([]byte, error) { return json.Marshal(data) } -// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaller -// so it takes the fastes option available +// ReadJSON reads json data, prefers finding an appropriate interface to short-circuit the unmarshaler +// so it takes the fastest option available func ReadJSON(data []byte, value interface{}) error { trimmedData := bytes.Trim(data, "\x00") if d, ok := value.(ejUnmarshaler); ok { @@ -189,7 +189,7 @@ func FromDynamicJSON(data, target interface{}) error { return json.Unmarshal(b, target) } -// NameProvider represents an object capabale of translating from go property names +// NameProvider represents an object capable of translating from go property names // to json property names // This type is thread-safe. type NameProvider struct { diff --git a/vendor/github.com/go-openapi/swag/loading.go b/vendor/github.com/go-openapi/swag/loading.go index 70f4fb361c..04160b89ba 100644 --- a/vendor/github.com/go-openapi/swag/loading.go +++ b/vendor/github.com/go-openapi/swag/loading.go @@ -27,6 +27,15 @@ import ( // LoadHTTPTimeout the default timeout for load requests var LoadHTTPTimeout = 30 * time.Second +// LoadHTTPBasicAuthUsername the username to use when load requests require basic auth +var LoadHTTPBasicAuthUsername = "" + +// LoadHTTPBasicAuthPassword the password to use when load requests require basic auth +var LoadHTTPBasicAuthPassword = "" + +// LoadHTTPCustomHeaders an optional collection of custom HTTP headers for load requests +var LoadHTTPCustomHeaders = map[string]string{} + // LoadFromFileOrHTTP loads the bytes from a file or a remote http server based on the path passed in func LoadFromFileOrHTTP(path string) ([]byte, error) { return LoadStrategy(path, ioutil.ReadFile, loadHTTPBytes(LoadHTTPTimeout))(path) @@ -59,6 +68,15 @@ func loadHTTPBytes(timeout time.Duration) func(path string) ([]byte, error) { if err != nil { return nil, err } + + if LoadHTTPBasicAuthUsername != "" && LoadHTTPBasicAuthPassword != "" { + req.SetBasicAuth(LoadHTTPBasicAuthUsername, LoadHTTPBasicAuthPassword) + } + + for key, val := range LoadHTTPCustomHeaders { + req.Header.Set(key, val) + } + resp, err := client.Do(req) defer func() { if resp != nil { diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/addtoscheme_kubevirt_v1.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/addtoscheme_kubevirt_v1.go deleted file mode 100644 index 48cd053df8..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/addtoscheme_kubevirt_v1.go +++ /dev/null @@ -1,10 +0,0 @@ -package apis - -import ( - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" -) - -func init() { - // Register the types with the Scheme so the components can map objects to GroupVersionKinds and back - AddToSchemes = append(AddToSchemes, v1.SchemeBuilder.AddToScheme) -} diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/apis.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/apis.go deleted file mode 100644 index 07dc961644..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/apis.go +++ /dev/null @@ -1,13 +0,0 @@ -package apis - -import ( - "k8s.io/apimachinery/pkg/runtime" -) - -// AddToSchemes may be used to add all resources defined in the project to a Scheme -var AddToSchemes runtime.SchemeBuilder - -// AddToScheme adds all Resources to the Scheme -func AddToScheme(s *runtime.Scheme) error { - return AddToSchemes.AddToScheme(s) -} diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/doc.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/doc.go deleted file mode 100644 index 41c6ef9ebe..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -// +k8s:deepcopy-gen=package,register -// +k8s:defaulter-gen=TypeMeta -// +groupName=ssp.kubevirt.io -package v1 diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/register.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/register.go deleted file mode 100644 index 2ef740bb2c..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/register.go +++ /dev/null @@ -1,17 +0,0 @@ -// Package v1 contains API Schema definitions for the kubevirt v1 API group -// +k8s:deepcopy-gen=package,register -// +groupName=ssp.kubevirt.io -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" - "sigs.k8s.io/controller-runtime/pkg/runtime/scheme" -) - -var ( - // SchemeGroupVersion is group version used to register these objects - SchemeGroupVersion = schema.GroupVersion{Group: "ssp.kubevirt.io", Version: "v1"} - - // SchemeBuilder is used to add go types to the GroupVersionKind scheme - SchemeBuilder = &scheme.Builder{GroupVersion: SchemeGroupVersion} -) diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/types.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/types.go deleted file mode 100644 index 8bb93f4270..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/types.go +++ /dev/null @@ -1,193 +0,0 @@ -package v1 - -import ( - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// ConfigStatus defines the observed state of Config -type ConfigStatus struct { - // The version of the deployed operator - OperatorVersion string `json:"operatorVersion,omitempty"` - - // The version of the deployed operands - ObservedVersion string `json:"observedVersion,omitempty"` - - // The desired version of the deployed operands - TargetVersion string `json:"targetVersion,omitempty"` - - // Reported states of the controller - Conditions []conditionsv1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` - - // Containers used in the current deployment - Containers []Container `json:"containers,omitempty"` -} - -// Defines a container -type Container struct { - // Container namespace - Namespace string `json:"namespace"` - - // Parent kind - ParentKind string `json:"parentKind"` - - // Parent image - ParentName string `json:"parentName"` - - // Container name - Name string `json:"name"` - - // Image path - Image string `json:"image"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kvct -// KubevirtCommonTemplatesBundle defines the CommonTemplates CR -type KubevirtCommonTemplatesBundle struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec contains the configuration of Common Templates - Spec VersionSpec `json:"spec,omitempty"` - - // Status holds the current status of Common Templates - Status ConfigStatus `json:"status,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kvnl -// KubevirtNodeLabellerBundle defines the NodeLabeller CR -type KubevirtNodeLabellerBundle struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec contains the configuration of NodeLabeller - Spec ComponentSpec `json:"spec,omitempty"` - - // Status holds the current status of NodeLabeller - Status ConfigStatus `json:"status,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kvtv -// KubevirtTemplateValidator defines the TemplateValidator CR -type KubevirtTemplateValidator struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec contains the configuration of TemplateValidator - Spec TemplateValidatorSpec `json:"spec,omitempty"` - - // Status holds the current status of TemplateValidator - Status ConfigStatus `json:"status,omitempty"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:openapi-gen=true -// +kubebuilder:subresource:status -// +kubebuilder:resource:shortName=kvma -// KubevirtMetricsAggregation defines the MetricsAggregation CR -type KubevirtMetricsAggregation struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty"` - - // Spec contains the configuration of MetricsAggregation - Spec VersionSpec `json:"spec,omitempty"` - - // Status holds the current status of MetricsAggregation - Status ConfigStatus `json:"status,omitempty"` -} - -// Defines the version of the operand -type VersionSpec struct { - // Defines the version of the operand - Version string `json:"version,omitempty"` -} - -// Defines the configuration of the NodeLabeller -type ComponentSpec struct { - // Defines the version of the NodeLabeller - Version string `json:"version,omitempty"` - - // Define the node affinity for NodeLabeller pods - Affinity v1.Affinity `json:"affinity,omitempty"` - - // Define node selector labels for NodeLabeller pods - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // Define tolerations for NodeLabeller pods - Tolerations []v1.Toleration `json:"tolerations,omitempty"` -} - -// Defines the configuration of Template Validator -type TemplateValidatorSpec struct { - // Defines the version of TemplateValidaotr - Version string `json:"version,omitempty"` - - // Defines the desired number of replicas for TemplateValidator - TemplateValidatorReplicas int `json:"templateValidatorReplicas,omitempty"` - - // Define the node affinity for TemplateValidator pods - Affinity v1.Affinity `json:"affinity,omitempty"` - - // Define node selector labels for TemplateValidator - NodeSelector map[string]string `json:"nodeSelector,omitempty"` - - // Define tolerations for TemplateValidator - Tolerations []v1.Toleration `json:"tolerations,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type KubevirtCommonTemplatesBundleList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []KubevirtCommonTemplatesBundle `json:"items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type KubevirtNodeLabellerBundleList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []KubevirtNodeLabellerBundle `json:"items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type KubevirtTemplateValidatorList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []KubevirtTemplateValidator `json:"items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type KubevirtMetricsAggregationList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty"` - - Items []KubevirtMetricsAggregation `json:"items"` -} - -func init() { - SchemeBuilder.Register(&KubevirtCommonTemplatesBundle{}, &KubevirtCommonTemplatesBundleList{}) - SchemeBuilder.Register(&KubevirtNodeLabellerBundle{}, &KubevirtNodeLabellerBundleList{}) - SchemeBuilder.Register(&KubevirtTemplateValidator{}, &KubevirtTemplateValidatorList{}) - SchemeBuilder.Register(&KubevirtMetricsAggregation{}, &KubevirtMetricsAggregationList{}) -} diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.deepcopy.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.deepcopy.go deleted file mode 100644 index 6912d026b5..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,362 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentSpec) DeepCopyInto(out *ComponentSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentSpec. -func (in *ComponentSpec) DeepCopy() *ComponentSpec { - if in == nil { - return nil - } - out := new(ComponentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigStatus) DeepCopyInto(out *ConfigStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]conditionsv1.Condition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigStatus. -func (in *ConfigStatus) DeepCopy() *ConfigStatus { - if in == nil { - return nil - } - out := new(ConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtCommonTemplatesBundle) DeepCopyInto(out *KubevirtCommonTemplatesBundle) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCommonTemplatesBundle. -func (in *KubevirtCommonTemplatesBundle) DeepCopy() *KubevirtCommonTemplatesBundle { - if in == nil { - return nil - } - out := new(KubevirtCommonTemplatesBundle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtCommonTemplatesBundle) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtCommonTemplatesBundleList) DeepCopyInto(out *KubevirtCommonTemplatesBundleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubevirtCommonTemplatesBundle, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtCommonTemplatesBundleList. -func (in *KubevirtCommonTemplatesBundleList) DeepCopy() *KubevirtCommonTemplatesBundleList { - if in == nil { - return nil - } - out := new(KubevirtCommonTemplatesBundleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtCommonTemplatesBundleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtMetricsAggregation) DeepCopyInto(out *KubevirtMetricsAggregation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtMetricsAggregation. -func (in *KubevirtMetricsAggregation) DeepCopy() *KubevirtMetricsAggregation { - if in == nil { - return nil - } - out := new(KubevirtMetricsAggregation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtMetricsAggregation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtMetricsAggregationList) DeepCopyInto(out *KubevirtMetricsAggregationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubevirtMetricsAggregation, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtMetricsAggregationList. -func (in *KubevirtMetricsAggregationList) DeepCopy() *KubevirtMetricsAggregationList { - if in == nil { - return nil - } - out := new(KubevirtMetricsAggregationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtMetricsAggregationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtNodeLabellerBundle) DeepCopyInto(out *KubevirtNodeLabellerBundle) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodeLabellerBundle. -func (in *KubevirtNodeLabellerBundle) DeepCopy() *KubevirtNodeLabellerBundle { - if in == nil { - return nil - } - out := new(KubevirtNodeLabellerBundle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtNodeLabellerBundle) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtNodeLabellerBundleList) DeepCopyInto(out *KubevirtNodeLabellerBundleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubevirtNodeLabellerBundle, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtNodeLabellerBundleList. -func (in *KubevirtNodeLabellerBundleList) DeepCopy() *KubevirtNodeLabellerBundleList { - if in == nil { - return nil - } - out := new(KubevirtNodeLabellerBundleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtNodeLabellerBundleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtTemplateValidator) DeepCopyInto(out *KubevirtTemplateValidator) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtTemplateValidator. -func (in *KubevirtTemplateValidator) DeepCopy() *KubevirtTemplateValidator { - if in == nil { - return nil - } - out := new(KubevirtTemplateValidator) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtTemplateValidator) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubevirtTemplateValidatorList) DeepCopyInto(out *KubevirtTemplateValidatorList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KubevirtTemplateValidator, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubevirtTemplateValidatorList. -func (in *KubevirtTemplateValidatorList) DeepCopy() *KubevirtTemplateValidatorList { - if in == nil { - return nil - } - out := new(KubevirtTemplateValidatorList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubevirtTemplateValidatorList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TemplateValidatorSpec) DeepCopyInto(out *TemplateValidatorSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateValidatorSpec. -func (in *TemplateValidatorSpec) DeepCopy() *TemplateValidatorSpec { - if in == nil { - return nil - } - out := new(TemplateValidatorSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VersionSpec) DeepCopyInto(out *VersionSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionSpec. -func (in *VersionSpec) DeepCopy() *VersionSpec { - if in == nil { - return nil - } - out := new(VersionSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.openapi.go b/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.openapi.go deleted file mode 100644 index f32badffdf..0000000000 --- a/vendor/github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1/zz_generated.openapi.go +++ /dev/null @@ -1,191 +0,0 @@ -// +build !ignore_autogenerated - -// This file was autogenerated by openapi-gen. Do not edit it manually! - -package v1 - -import ( - spec "github.com/go-openapi/spec" - common "k8s.io/kube-openapi/pkg/common" -) - -func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { - return map[string]common.OpenAPIDefinition{ - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.KubevirtCommonTemplatesBundle": schema_pkg_apis_kubevirt_v1_KubevirtCommonTemplatesBundle(ref), - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.KubevirtMetricsAggregation": schema_pkg_apis_kubevirt_v1_KubevirtMetricsAggregation(ref), - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.KubevirtNodeLabellerBundle": schema_pkg_apis_kubevirt_v1_KubevirtNodeLabellerBundle(ref), - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.KubevirtTemplateValidator": schema_pkg_apis_kubevirt_v1_KubevirtTemplateValidator(ref), - } -} - -func schema_pkg_apis_kubevirt_v1_KubevirtCommonTemplatesBundle(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.VersionSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus", "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.VersionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_kubevirt_v1_KubevirtMetricsAggregation(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.VersionSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus", "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.VersionSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_kubevirt_v1_KubevirtNodeLabellerBundle(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ComponentSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ComponentSpec", "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_pkg_apis_kubevirt_v1_KubevirtTemplateValidator(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.TemplateValidatorSpec"), - }, - }, - "status": { - SchemaProps: spec.SchemaProps{ - Ref: ref("github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.ConfigStatus", "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1.TemplateValidatorSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} diff --git a/vendor/github.com/onsi/gomega/.travis.yml b/vendor/github.com/onsi/gomega/.travis.yml index e250be4ac2..348e3014c6 100644 --- a/vendor/github.com/onsi/gomega/.travis.yml +++ b/vendor/github.com/onsi/gomega/.travis.yml @@ -1,4 +1,7 @@ language: go +arch: + - amd64 + - ppc64le go: - 1.14.x diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index 9409533942..0f1765d84d 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,10 @@ +## 1.10.4 + +### Fixes +- update golang net library to more recent version without vulnerability (#406) [817a8b9] +- Correct spelling: alloted -> allotted (#403) [0bae715] +- fix a panic in MessageWithDiff with long message (#402) [ea06b9b] + ## 1.10.3 ### Fixes diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index fae25adceb..e59d7d75b6 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -105,7 +105,13 @@ func MessageWithDiff(actual, message, expected string) string { tabLength := 4 spaceFromMessageToActual := tabLength + len(": ") - len(message) - padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + + paddingCount := spaceFromMessageToActual + spacesBeforeFormattedMismatch + if paddingCount < 0 { + return Message(formattedActual, message, formattedExpected) + } + + padding := strings.Repeat(" ", paddingCount) + "|" return Message(formattedActual, message+padding, formattedExpected) } diff --git a/vendor/github.com/onsi/gomega/go.mod b/vendor/github.com/onsi/gomega/go.mod index 0a80d5ec3d..02b99ab604 100644 --- a/vendor/github.com/onsi/gomega/go.mod +++ b/vendor/github.com/onsi/gomega/go.mod @@ -5,7 +5,7 @@ go 1.14 require ( github.com/golang/protobuf v1.4.2 github.com/onsi/ginkgo v1.12.1 - golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 + golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 gopkg.in/yaml.v2 v2.3.0 ) diff --git a/vendor/github.com/onsi/gomega/go.sum b/vendor/github.com/onsi/gomega/go.sum index c54e9b88e9..fc230153bf 100644 --- a/vendor/github.com/onsi/gomega/go.sum +++ b/vendor/github.com/onsi/gomega/go.sum @@ -31,6 +31,8 @@ golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7 h1:AeiKBIuRw3UomYXSbLy0Mc2dD golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0 h1:wBouT66WTYFXdxfVdz9sVWARVd/2vfGcmI45D2gj45M= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= +golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= @@ -42,6 +44,7 @@ golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e h1:N7DeIrjYszNmSW409R3frPPwg golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 4af1a8c013..a8529f1ca6 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.10.3" +const GOMEGA_VERSION = "1.10.4" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/vendor/github.com/operator-framework/api/LICENSE b/vendor/github.com/operator-framework/api/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/operator-framework/api/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go index d1b403abed..0c251cd492 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/clusterserviceversion_types.go @@ -12,6 +12,7 @@ import ( rbac "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/util/intstr" "github.com/operator-framework/api/pkg/lib/version" ) @@ -161,16 +162,22 @@ const ( ValidatingAdmissionWebhook WebhookAdmissionType = "ValidatingAdmissionWebhook" // MutatingAdmissionWebhook is for mutating admission webhooks MutatingAdmissionWebhook WebhookAdmissionType = "MutatingAdmissionWebhook" + // ConversionWebhook is for conversion webhooks + ConversionWebhook WebhookAdmissionType = "ConversionWebhook" ) // WebhookDescription provides details to OLM about required webhooks // +k8s:openapi-gen=true type WebhookDescription struct { GenerateName string `json:"generateName"` - // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook - Type WebhookAdmissionType `json:"type"` - DeploymentName string `json:"deploymentName,omitempty"` + // +kubebuilder:validation:Enum=ValidatingAdmissionWebhook;MutatingAdmissionWebhook;ConversionWebhook + Type WebhookAdmissionType `json:"type"` + DeploymentName string `json:"deploymentName,omitempty"` + // +kubebuilder:validation:Maximum=65535 + // +kubebuilder:validation:Minimum=1 + // +kubebuilder:default=443 ContainerPort int32 `json:"containerPort,omitempty"` + TargetPort *intstr.IntOrString `json:"targetPort,omitempty"` Rules []admissionregistrationv1.RuleWithOperations `json:"rules,omitempty"` FailurePolicy *admissionregistrationv1.FailurePolicyType `json:"failurePolicy,omitempty"` MatchPolicy *admissionregistrationv1.MatchPolicyType `json:"matchPolicy,omitempty"` diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go index 5c86f797b5..d739333d89 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/installplan_types.go @@ -262,6 +262,9 @@ type BundleLookup struct { // Conditions represents the overall state of a BundleLookup. // +optional Conditions []BundleLookupCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type"` + // The effective properties of the unpacked bundle. + // +optional + Properties string `json:"properties,omitempty"` } // GetCondition returns the BundleLookupCondition of the given type if it exists in the BundleLookup's Conditions. diff --git a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go index a177222951..e25e2bde1f 100644 --- a/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/github.com/operator-framework/api/pkg/operators/v1alpha1/zz_generated.deepcopy.go @@ -27,6 +27,7 @@ import ( "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -1362,6 +1363,11 @@ func (in *UpdateStrategy) DeepCopy() *UpdateStrategy { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookDescription) DeepCopyInto(out *WebhookDescription) { *out = *in + if in.TargetPort != nil { + in, out := &in.TargetPort, &out.TargetPort + *out = new(intstr.IntOrString) + **out = **in + } if in.Rules != nil { in, out := &in.Rules, &out.Rules *out = make([]admissionregistrationv1.RuleWithOperations, len(*in)) diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 8b129b7941..7688d72c39 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2632,7 +2632,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body func (s bodyWriterState) cancel() { if s.timer != nil { - s.timer.Stop() + if s.timer.Stop() { + s.resc <- nil + } } } diff --git a/vendor/k8s.io/utils/pointer/pointer.go b/vendor/k8s.io/utils/pointer/pointer.go index 5365a11365..0a55a844ee 100644 --- a/vendor/k8s.io/utils/pointer/pointer.go +++ b/vendor/k8s.io/utils/pointer/pointer.go @@ -51,14 +51,23 @@ func Int32Ptr(i int32) *int32 { return &i } +// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil, +// else returns def. +func Int32PtrDerefOr(ptr *int32, def int32) int32 { + if ptr != nil { + return *ptr + } + return def +} + // Int64Ptr returns a pointer to an int64 func Int64Ptr(i int64) *int64 { return &i } -// Int32PtrDerefOr dereference the int32 ptr and returns it if not nil, +// Int64PtrDerefOr dereference the int64 ptr and returns it if not nil, // else returns def. -func Int32PtrDerefOr(ptr *int32, def int32) int32 { +func Int64PtrDerefOr(ptr *int64, def int64) int64 { if ptr != nil { return *ptr } @@ -70,17 +79,53 @@ func BoolPtr(b bool) *bool { return &b } +// BoolPtrDerefOr dereference the bool ptr and returns it if not nil, +// else returns def. +func BoolPtrDerefOr(ptr *bool, def bool) bool { + if ptr != nil { + return *ptr + } + return def +} + // StringPtr returns a pointer to the passed string. func StringPtr(s string) *string { return &s } +// StringPtrDerefOr dereference the string ptr and returns it if not nil, +// else returns def. +func StringPtrDerefOr(ptr *string, def string) string { + if ptr != nil { + return *ptr + } + return def +} + // Float32Ptr returns a pointer to the passed float32. func Float32Ptr(i float32) *float32 { return &i } +// Float32PtrDerefOr dereference the float32 ptr and returns it if not nil, +// else returns def. +func Float32PtrDerefOr(ptr *float32, def float32) float32 { + if ptr != nil { + return *ptr + } + return def +} + // Float64Ptr returns a pointer to the passed float64. func Float64Ptr(i float64) *float64 { return &i } + +// Float64PtrDerefOr dereference the float64 ptr and returns it if not nil, +// else returns def. +func Float64PtrDerefOr(ptr *float64, def float64) float64 { + if ptr != nil { + return *ptr + } + return def +} diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index 7ae41672e1..2af4967ca0 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -197,7 +197,7 @@ func (t *Trace) logTrace() { } // if any step took more than it's share of the total allowed time, it deserves a higher log level - buffer.WriteString(fmt.Sprintf("(%v) (total time: %vms):", t.startTime.Format("02-Jan-2006 15:04:00.000"), totalTime.Milliseconds())) + buffer.WriteString(fmt.Sprintf("(%v) (total time: %vms):", t.startTime.Format("02-Jan-2006 15:04:05.000"), totalTime.Milliseconds())) stepThreshold := t.calculateStepThreshold() t.writeTraceSteps(&buffer, fmt.Sprintf("\nTrace[%d]: ", traceNum), stepThreshold) buffer.WriteString(fmt.Sprintf("\nTrace[%d]: [%v] [%v] END\n", traceNum, t.endTime.Sub(t.startTime), totalTime)) diff --git a/vendor/kubevirt.io/ssp-operator/LICENSE b/vendor/kubevirt.io/ssp-operator/LICENSE new file mode 100644 index 0000000000..c0dda21b64 --- /dev/null +++ b/vendor/kubevirt.io/ssp-operator/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 The SSP Operator Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/kubevirt.io/ssp-operator/api/v1beta1/groupversion_info.go b/vendor/kubevirt.io/ssp-operator/api/v1beta1/groupversion_info.go new file mode 100644 index 0000000000..6a35a63df7 --- /dev/null +++ b/vendor/kubevirt.io/ssp-operator/api/v1beta1/groupversion_info.go @@ -0,0 +1,36 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1beta1 contains API Schema definitions for the ssp v1beta1 API group +// +kubebuilder:object:generate=true +// +groupName=ssp.kubevirt.io +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/scheme" +) + +var ( + // GroupVersion is group version used to register these objects + GroupVersion = schema.GroupVersion{Group: "ssp.kubevirt.io", Version: "v1beta1"} + + // SchemeBuilder is used to add go types to the GroupVersionKind scheme + SchemeBuilder = &scheme.Builder{GroupVersion: GroupVersion} + + // AddToScheme adds the types in this group-version to the given scheme. + AddToScheme = SchemeBuilder.AddToScheme +) diff --git a/vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_types.go b/vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_types.go new file mode 100644 index 0000000000..a53c786fd1 --- /dev/null +++ b/vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_types.go @@ -0,0 +1,86 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + lifecycleapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" +) + +type TemplateValidator struct { + // Replicas is the number of replicas of the template validator pod + //+kubebuilder:validation:Minimum=0 + //+kubebuilder:default=2 + Replicas *int32 `json:"replicas,omitempty"` + + // Placement describes the node scheduling configuration + Placement *lifecycleapi.NodePlacement `json:"placement,omitempty"` +} + +type CommonTemplates struct { + // Namespace is the k8s namespace where CommonTemplates should be installed + //+kubebuilder:validation:MaxLength=63 + //+kubebuilder:validation:Pattern=^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + Namespace string `json:"namespace"` +} + +type NodeLabeller struct { + // Placement describes the node scheduling configuration + Placement *lifecycleapi.NodePlacement `json:"placement,omitempty"` +} + +// SSPSpec defines the desired state of SSP +type SSPSpec struct { + // TemplateValidator is configuration of the template validator operand + TemplateValidator TemplateValidator `json:"templateValidator,omitempty"` + + // CommonTemplates is the configuration of the common templates operand + CommonTemplates CommonTemplates `json:"commonTemplates"` + + // NodeLabeller is configuration of the node-labeller operand + NodeLabeller NodeLabeller `json:"nodeLabeller,omitempty"` +} + +// SSPStatus defines the observed state of SSP +type SSPStatus struct { + lifecycleapi.Status `json:",inline"` +} + +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status + +// SSP is the Schema for the ssps API +type SSP struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec SSPSpec `json:"spec,omitempty"` + Status SSPStatus `json:"status,omitempty"` +} + +// +kubebuilder:object:root=true + +// SSPList contains a list of SSP +type SSPList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []SSP `json:"items"` +} + +func init() { + SchemeBuilder.Register(&SSP{}, &SSPList{}) +} diff --git a/vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_webhook.go b/vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_webhook.go new file mode 100644 index 0000000000..54bda35695 --- /dev/null +++ b/vendor/kubevirt.io/ssp-operator/api/v1beta1/ssp_webhook.go @@ -0,0 +1,93 @@ +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "context" + "fmt" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/webhook" +) + +// log is for logging in this package. +var ssplog = logf.Log.WithName("ssp-resource") +var clt client.Client + +func (r *SSP) SetupWebhookWithManager(mgr ctrl.Manager) error { + clt = mgr.GetClient() + return ctrl.NewWebhookManagedBy(mgr). + For(r). + Complete() +} + +// +kubebuilder:webhook:verbs=create;update,path=/validate-ssp-kubevirt-io-v1beta1-ssp,mutating=false,failurePolicy=fail,groups=ssp.kubevirt.io,resources=ssps,versions=v1beta1,name=vssp.kb.io,webhookVersions=v1beta1,sideEffects=None + +var _ webhook.Validator = &SSP{} + +// ValidateCreate implements webhook.Validator so a webhook will be registered for the type +func (r *SSP) ValidateCreate() error { + var ssps SSPList + + // Check if no other SSP resources are present in the cluster + ssplog.Info("validate create", "name", r.Name) + err := clt.List(context.TODO(), &ssps, &client.ListOptions{}) + if err != nil { + return fmt.Errorf("could not list SSPs for validation, please try again: %v", err) + } + if len(ssps.Items) > 0 { + return fmt.Errorf("creation failed, an SSP CR already exists in namespace %v: %v", ssps.Items[0].ObjectMeta.Namespace, ssps.Items[0].ObjectMeta.Name) + } + + // Check if the common templates namespace exists + namespaceName := r.Spec.CommonTemplates.Namespace + var namespace v1.Namespace + err = clt.Get(context.TODO(), client.ObjectKey{Name: namespaceName}, &namespace) + if err != nil { + return fmt.Errorf("creation failed, the configured namespace for common templates does not exist: %v", namespaceName) + } + + return nil +} + +// ValidateUpdate implements webhook.Validator so a webhook will be registered for the type +func (r *SSP) ValidateUpdate(old runtime.Object) error { + ssplog.Info("validate update", "name", r.Name) + + oldSsp := old.(*SSP) + if r.Spec.CommonTemplates.Namespace != oldSsp.Spec.CommonTemplates.Namespace { + return fmt.Errorf("commonTemplates.namespace cannot be changed. Attempting to change from: %v to %v", + oldSsp.Spec.CommonTemplates.Namespace, + r.Spec.CommonTemplates.Namespace) + } + + return nil +} + +// ValidateDelete implements webhook.Validator so a webhook will be registered for the type +func (r *SSP) ValidateDelete() error { + return nil +} + +// Forces the value of clt, to be used in unit tests +func setClientForWebhook(c client.Client) { + clt = c +} diff --git a/vendor/kubevirt.io/ssp-operator/api/v1beta1/zz_generated.deepcopy.go b/vendor/kubevirt.io/ssp-operator/api/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000000..21ad88949c --- /dev/null +++ b/vendor/kubevirt.io/ssp-operator/api/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,176 @@ +// +build !ignore_autogenerated + +/* + + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by controller-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CommonTemplates) DeepCopyInto(out *CommonTemplates) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CommonTemplates. +func (in *CommonTemplates) DeepCopy() *CommonTemplates { + if in == nil { + return nil + } + out := new(CommonTemplates) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeLabeller) DeepCopyInto(out *NodeLabeller) { + *out = *in + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeLabeller. +func (in *NodeLabeller) DeepCopy() *NodeLabeller { + if in == nil { + return nil + } + out := new(NodeLabeller) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSP) DeepCopyInto(out *SSP) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSP. +func (in *SSP) DeepCopy() *SSP { + if in == nil { + return nil + } + out := new(SSP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SSP) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSPList) DeepCopyInto(out *SSPList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]SSP, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSPList. +func (in *SSPList) DeepCopy() *SSPList { + if in == nil { + return nil + } + out := new(SSPList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SSPList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSPSpec) DeepCopyInto(out *SSPSpec) { + *out = *in + in.TemplateValidator.DeepCopyInto(&out.TemplateValidator) + out.CommonTemplates = in.CommonTemplates + in.NodeLabeller.DeepCopyInto(&out.NodeLabeller) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSPSpec. +func (in *SSPSpec) DeepCopy() *SSPSpec { + if in == nil { + return nil + } + out := new(SSPSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SSPStatus) DeepCopyInto(out *SSPStatus) { + *out = *in + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SSPStatus. +func (in *SSPStatus) DeepCopy() *SSPStatus { + if in == nil { + return nil + } + out := new(SSPStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TemplateValidator) DeepCopyInto(out *TemplateValidator) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Placement != nil { + in, out := &in.Placement, &out.Placement + *out = (*in).DeepCopy() + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TemplateValidator. +func (in *TemplateValidator) DeepCopy() *TemplateValidator { + if in == nil { + return nil + } + out := new(TemplateValidator) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 5ba680d25a..f37c9f0958 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -11,7 +11,7 @@ github.com/beorn7/perks/quantile github.com/blang/semver # github.com/cespare/xxhash/v2 v2.1.1 github.com/cespare/xxhash/v2 -# github.com/coreos/prometheus-operator v0.38.1-0.20200424145508-7e176fda06cc +# github.com/coreos/prometheus-operator v0.41.1 ## explicit github.com/coreos/prometheus-operator/pkg/apis/monitoring github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1 @@ -28,7 +28,6 @@ github.com/fsnotify/fsnotify ## explicit github.com/ghodss/yaml # github.com/go-kit/kit v0.10.0 => github.com/go-kit/kit v0.3.0 -## explicit github.com/go-kit/kit/log # github.com/go-logfmt/logfmt v0.5.0 ## explicit @@ -46,7 +45,7 @@ github.com/go-openapi/jsonreference # github.com/go-openapi/spec v0.19.7 ## explicit github.com/go-openapi/spec -# github.com/go-openapi/swag v0.19.5 +# github.com/go-openapi/swag v0.19.9 github.com/go-openapi/swag # github.com/go-stack/stack v1.8.0 github.com/go-stack/stack @@ -100,10 +99,6 @@ github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperat github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1 github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1alpha1 github.com/kubevirt/cluster-network-addons-operator/pkg/names -# github.com/kubevirt/kubevirt-ssp-operator v1.2.1 -## explicit -github.com/kubevirt/kubevirt-ssp-operator/pkg/apis -github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1 # github.com/kubevirt/vm-import-operator v0.2.5 ## explicit github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1 @@ -145,7 +140,7 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.10.3 +# github.com/onsi/gomega v1.10.4 ## explicit github.com/onsi/gomega github.com/onsi/gomega/format @@ -169,7 +164,7 @@ github.com/openshift/api/security/v1 github.com/openshift/custom-resource-status/conditions/v1 github.com/openshift/custom-resource-status/objectreferences/v1 github.com/openshift/custom-resource-status/testlib -# github.com/operator-framework/api v0.3.13 +# github.com/operator-framework/api v0.3.20 ## explicit github.com/operator-framework/api/pkg/lib/version github.com/operator-framework/api/pkg/operators @@ -218,8 +213,7 @@ go.uber.org/zap/zapcore golang.org/x/crypto/ssh/terminal # golang.org/x/mod v0.2.0 golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20201110031124-69a78807bb2b -## explicit +# golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb golang.org/x/net/context golang.org/x/net/context/ctxhttp golang.org/x/net/html @@ -512,7 +506,7 @@ k8s.io/klog/v2 ## explicit k8s.io/kube-openapi/pkg/common k8s.io/kube-openapi/pkg/util/proto -# k8s.io/utils v0.0.0-20200720150651-0bdb4ca86cbc +# k8s.io/utils v0.0.0-20200821003339-5e75c0163111 k8s.io/utils/buffer k8s.io/utils/integer k8s.io/utils/pointer @@ -533,6 +527,9 @@ kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api # kubevirt.io/kubevirt v0.36.0 ## explicit kubevirt.io/kubevirt/pkg/virt-config +# kubevirt.io/ssp-operator v0.0.0-20201204192040-4623f45d34ea +## explicit +kubevirt.io/ssp-operator/api/v1beta1 # sigs.k8s.io/controller-runtime v0.6.3 ## explicit sigs.k8s.io/controller-runtime From faff562b5e8572e2f3d531a393198b22c04de97f Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Wed, 25 Nov 2020 20:27:12 +0200 Subject: [PATCH 03/19] Switch to use the new SSP operator Signed-off-by: Zvi Cahana --- cmd/hyperconverged-cluster-operator/main.go | 8 +- .../hco/v1alpha1/hyperconverged_resources.go | 11 - .../hyperconverged_controller.go | 7 +- pkg/controller/operands/operandHandler.go | 7 +- pkg/controller/operands/ssp.go | 521 ++++-------------- 5 files changed, 114 insertions(+), 440 deletions(-) diff --git a/cmd/hyperconverged-cluster-operator/main.go b/cmd/hyperconverged-cluster-operator/main.go index 60d67212da..91ebe45884 100644 --- a/cmd/hyperconverged-cluster-operator/main.go +++ b/cmd/hyperconverged-cluster-operator/main.go @@ -11,7 +11,6 @@ import ( "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" "github.com/spf13/pflag" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client/config" "sigs.k8s.io/controller-runtime/pkg/healthz" @@ -23,17 +22,18 @@ import ( networkaddons "github.com/kubevirt/cluster-network-addons-operator/pkg/apis" hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspopv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" openshiftconfigv1 "github.com/openshift/api/config/v1" consolev1 "github.com/openshift/api/console/v1" csvv1alpha1 "github.com/operator-framework/api/pkg/operators/v1alpha1" admissionregistrationv1 "k8s.io/api/admissionregistration/v1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" apiruntime "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" logf "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -149,15 +149,15 @@ func main() { apis.AddToScheme, cdiv1beta1.AddToScheme, networkaddons.AddToScheme, - sspopv1.AddToScheme, + sspv1beta1.AddToScheme, csvv1alpha1.AddToScheme, vmimportv1beta1.AddToScheme, admissionregistrationv1.AddToScheme, consolev1.AddToScheme, openshiftconfigv1.AddToScheme, monitoringv1.AddToScheme, - extv1.AddToScheme, consolev1.AddToScheme, + apiextensionsv1.AddToScheme, } { if err := f(mgr.GetScheme()); err != nil { log.Error(err, "Failed to add to scheme") diff --git a/pkg/apis/hco/v1alpha1/hyperconverged_resources.go b/pkg/apis/hco/v1alpha1/hyperconverged_resources.go index 5ed6df71ef..6f8d16ac37 100644 --- a/pkg/apis/hco/v1alpha1/hyperconverged_resources.go +++ b/pkg/apis/hco/v1alpha1/hyperconverged_resources.go @@ -5,7 +5,6 @@ import ( networkaddonsv1alpha1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1alpha1" networkaddonsnames "github.com/kubevirt/cluster-network-addons-operator/pkg/names" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" schedulingv1 "k8s.io/api/scheduling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kubevirtv1 "kubevirt.io/client-go/api/v1" @@ -75,16 +74,6 @@ func (r *HyperConverged) NewNetworkAddons(opts ...string) *networkaddonsv1alpha1 } } -func (r *HyperConverged) NewKubeVirtCommonTemplateBundle(opts ...string) *sspv1.KubevirtCommonTemplatesBundle { - return &sspv1.KubevirtCommonTemplatesBundle{ - ObjectMeta: metav1.ObjectMeta{ - Name: "common-templates-" + r.Name, - Labels: r.getLabels(), - Namespace: r.getNamespace(hcoutil.OpenshiftNamespace, opts), - }, - } -} - func (r *HyperConverged) NewKubeVirtPriorityClass() *schedulingv1.PriorityClass { return &schedulingv1.PriorityClass{ TypeMeta: metav1.TypeMeta{ diff --git a/pkg/controller/hyperconverged/hyperconverged_controller.go b/pkg/controller/hyperconverged/hyperconverged_controller.go index f0674e42bc..a9797ba07a 100644 --- a/pkg/controller/hyperconverged/hyperconverged_controller.go +++ b/pkg/controller/hyperconverged/hyperconverged_controller.go @@ -30,10 +30,10 @@ import ( hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util/predicate" version "github.com/kubevirt/hyperconverged-cluster-operator/version" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" kubevirtv1 "kubevirt.io/client-go/api/v1" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" ) var ( @@ -125,10 +125,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler, ci hcoutil.ClusterInfo) er &kubevirtv1.KubeVirt{}, &cdiv1beta1.CDI{}, &networkaddonsv1.NetworkAddonsConfig{}, - &sspv1.KubevirtCommonTemplatesBundle{}, - &sspv1.KubevirtNodeLabellerBundle{}, - &sspv1.KubevirtTemplateValidator{}, - &sspv1.KubevirtMetricsAggregation{}, + &sspv1beta1.SSP{}, &schedulingv1.PriorityClass{}, &vmimportv1beta1.VMImportConfig{}, } diff --git a/pkg/controller/operands/operandHandler.go b/pkg/controller/operands/operandHandler.go index a682ad9ba5..5cb647a479 100644 --- a/pkg/controller/operands/operandHandler.go +++ b/pkg/controller/operands/operandHandler.go @@ -57,10 +57,7 @@ func NewOperandHandler(client client.Client, scheme *runtime.Scheme, isOpenshift if isOpenshiftCluster { operands = append(operands, []Operand{ - newCommonTemplateBundleHandler(client, scheme), - newNodeLabellerBundleHandler(client, scheme), - newTemplateValidatorHandler(client, scheme), - newMetricsAggregationHandler(client, scheme), + newSspHandler(client, scheme), (*genericOperand)(newMetricsServiceHandler(client, scheme)), (*genericOperand)(newMetricsServiceMonitorHandler(client, scheme)), (*genericOperand)(newMonitoringPrometheusRuleHandler(client, scheme)), @@ -137,7 +134,7 @@ func (h OperandHandler) EnsureDeleted(req *common.HcoRequest) error { NewKubeVirt(req.Instance), NewCDI(req.Instance), NewNetworkAddons(req.Instance), - NewKubeVirtCommonTemplateBundle(req.Instance), + NewSSP(req.Instance), NewConsoleCLIDownload(req.Instance), NewVMImportForCR(req.Instance), } diff --git a/pkg/controller/operands/ssp.go b/pkg/controller/operands/ssp.go index 370381ad83..eac8e7bf88 100644 --- a/pkg/controller/operands/ssp.go +++ b/pkg/controller/operands/ssp.go @@ -3,498 +3,189 @@ package operands import ( "errors" "fmt" - conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" "reflect" + "sync" + + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" - objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" - corev1 "k8s.io/api/core/v1" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/reference" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" ) const ( - commonTemplatesBundleOldCrdName = "kubevirtcommontemplatesbundles.kubevirt.io" - metricsAggregationOldCrdName = "kubevirtmetricsaggregations.kubevirt.io" - nodeLabellerBundlesOldCrdName = "kubevirtnodelabellerbundles.kubevirt.io" - templateValidatorsOldCrdName = "kubevirttemplatevalidators.kubevirt.io" + // This is initially set to 2 replicas, to maintain the behavior of the previous SSP operator. + // After SSP implements its defaulting webhook, we can change this to 0 replicas, + // and let the webhook set the default. + defaultTemplateValidatorReplicas = 2 + + defaultCommonTemplatesNamespace = hcoutil.OpenshiftNamespace ) -type sspOperand struct { +type sspHandler struct { genericOperand - shouldRemoveOldCrd bool - oldCrdName string -} -func (handler *sspOperand) ensure(req *common.HcoRequest) *EnsureResult { - res := handler.genericOperand.ensure(req) - if handler.shouldRemoveOldCrd && (!req.UpgradeMode || res.Updated) { - if removeCrd(handler.Client, req, handler.oldCrdName) { - handler.shouldRemoveOldCrd = false - } - } - - return res + crdsToRemove []string } -// ************* KubeVirt Common Template Bundle ************* -type commonTemplateBundleHandler sspOperand - -func newCommonTemplateBundleHandler(clt client.Client, scheme *runtime.Scheme) *commonTemplateBundleHandler { - return &commonTemplateBundleHandler{ +func newSspHandler(Client client.Client, Scheme *runtime.Scheme) *sspHandler { + return &sspHandler{ genericOperand: genericOperand{ - Client: clt, - Scheme: scheme, - crType: "KubeVirtCommonTemplatesBundle", - // Previous versions used to have HCO-operator (namespace: kubevirt-hyperconverged) - // as the owner of kvCTB (namespace: OpenshiftNamespace). - // It's not legal, so remove that. - removeExistingOwner: true, + Client: Client, + Scheme: Scheme, + crType: "SSP", isCr: true, + removeExistingOwner: false, setControllerReference: false, - hooks: &commonTemplateBundleHooks{}, + hooks: &sspHooks{}, }, - shouldRemoveOldCrd: true, - oldCrdName: commonTemplatesBundleOldCrdName, - } -} - -type commonTemplateBundleHooks struct{} -func (h commonTemplateBundleHooks) getFullCr(hc *hcov1beta1.HyperConverged) runtime.Object { - return NewKubeVirtCommonTemplateBundle(hc) -} -func (h commonTemplateBundleHooks) getEmptyCr() runtime.Object { - return &sspv1.KubevirtCommonTemplatesBundle{} -} -func (h commonTemplateBundleHooks) validate() error { return nil } -func (h commonTemplateBundleHooks) postFound(*common.HcoRequest, runtime.Object) error { return nil } -func (h commonTemplateBundleHooks) getConditions(cr runtime.Object) []conditionsv1.Condition { - return cr.(*sspv1.KubevirtCommonTemplatesBundle).Status.Conditions -} -func (h commonTemplateBundleHooks) checkComponentVersion(cr runtime.Object) bool { - found := cr.(*sspv1.KubevirtCommonTemplatesBundle) - return checkComponentVersion(hcoutil.SspVersionEnvV, found.Status.ObservedVersion) -} -func (h commonTemplateBundleHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta { - return &cr.(*sspv1.KubevirtCommonTemplatesBundle).ObjectMeta -} - -func (h *commonTemplateBundleHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) { - kvCTB, ok1 := required.(*sspv1.KubevirtCommonTemplatesBundle) - found, ok2 := exists.(*sspv1.KubevirtCommonTemplatesBundle) - if !ok1 || !ok2 { - return false, false, errors.New("can't convert to Kubevirt Common Templates Bundle") - } - if !reflect.DeepEqual(kvCTB.Spec, found.Spec) { - if req.HCOTriggered { - req.Logger.Info("Updating existing KubeVirt Common Templates Bundle's Spec to new opinionated values") - } else { - req.Logger.Info("Reconciling an externally updated KubeVirt Common Templates Bundle's Spec to its opinionated values") - } - kvCTB.Spec.DeepCopyInto(&found.Spec) - err := Client.Update(req.Ctx, found) - if err != nil { - return false, false, err - } - return true, !req.HCOTriggered, nil - } - return false, false, nil -} - -func (h commonTemplateBundleHandler) Ensure(req *common.HcoRequest) *EnsureResult { - handler := sspOperand(h) - return handler.ensure(req) -} - -func NewKubeVirtCommonTemplateBundle(hc *hcov1beta1.HyperConverged, opts ...string) *sspv1.KubevirtCommonTemplatesBundle { - return &sspv1.KubevirtCommonTemplatesBundle{ - ObjectMeta: metav1.ObjectMeta{ - Name: "common-templates-" + hc.Name, - Labels: getLabels(hc), - Namespace: getNamespace(hcoutil.OpenshiftNamespace, opts), + crdsToRemove: []string{ + // These are the 2nd generation SSP CRDs, + // where the group name has been changed to "ssp.kubevirt.io" + "kubevirtcommontemplatesbundles.ssp.kubevirt.io", + "kubevirtmetricsaggregations.ssp.kubevirt.io", + "kubevirtnodelabellerbundles.ssp.kubevirt.io", + "kubevirttemplatevalidators.ssp.kubevirt.io", + + // These are the original SSP CRDs, with the group name "kubevirt.io". + // We attempt to remove these too, for upgrades from an older version. + "kubevirtcommontemplatesbundles.kubevirt.io", + "kubevirtmetricsaggregations.kubevirt.io", + "kubevirtnodelabellerbundles.kubevirt.io", + "kubevirttemplatevalidators.kubevirt.io", }, } } -// ************* KubeVirt Node Labeller Bundle ************* -type nodeLabellerBundleHandler sspOperand +func (handler *sspHandler) ensure(req *common.HcoRequest) *EnsureResult { + res := handler.genericOperand.ensure(req) -func newNodeLabellerBundleHandler(clt client.Client, scheme *runtime.Scheme) *nodeLabellerBundleHandler { - return &nodeLabellerBundleHandler{ - genericOperand: genericOperand{ - Client: clt, - Scheme: scheme, - crType: "KubeVirtNodeLabellerBundle", - removeExistingOwner: false, - isCr: true, - setControllerReference: true, - hooks: &nodeLabellerBundleHooks{}, - }, - shouldRemoveOldCrd: true, - oldCrdName: nodeLabellerBundlesOldCrdName, + // Attempt to remove old CRDs + if len(handler.crdsToRemove) > 0 && (!req.UpgradeMode || res.UpgradeDone) { + unremovedCRDs := removeCRDs(handler.Client, req, handler.crdsToRemove) + handler.crdsToRemove = unremovedCRDs } + + return res } -type nodeLabellerBundleHooks struct{} +type sspHooks struct{} -func (h nodeLabellerBundleHooks) getFullCr(hc *hcov1beta1.HyperConverged) runtime.Object { - return NewKubeVirtNodeLabellerBundleForCR(hc, hc.Namespace) -} -func (h nodeLabellerBundleHooks) getEmptyCr() runtime.Object { - return &sspv1.KubevirtNodeLabellerBundle{} +func (h sspHooks) getFullCr(hc *hcov1beta1.HyperConverged) runtime.Object { + return NewSSP(hc) } -func (h nodeLabellerBundleHooks) validate() error { return nil } -func (h nodeLabellerBundleHooks) postFound(_ *common.HcoRequest, _ runtime.Object) error { return nil } -func (h nodeLabellerBundleHooks) getConditions(cr runtime.Object) []conditionsv1.Condition { - return cr.(*sspv1.KubevirtNodeLabellerBundle).Status.Conditions +func (h sspHooks) getEmptyCr() runtime.Object { return &sspv1beta1.SSP{} } +func (h sspHooks) validate() error { return nil } +func (h sspHooks) postFound(*common.HcoRequest, runtime.Object) error { return nil } +func (h sspHooks) getConditions(cr runtime.Object) []conditionsv1.Condition { + return cr.(*sspv1beta1.SSP).Status.Conditions } -func (h nodeLabellerBundleHooks) checkComponentVersion(cr runtime.Object) bool { - found := cr.(*sspv1.KubevirtNodeLabellerBundle) +func (h sspHooks) checkComponentVersion(cr runtime.Object) bool { + found := cr.(*sspv1beta1.SSP) return checkComponentVersion(hcoutil.SspVersionEnvV, found.Status.ObservedVersion) } -func (h nodeLabellerBundleHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta { - return &cr.(*sspv1.KubevirtNodeLabellerBundle).ObjectMeta +func (h sspHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta { + return &cr.(*sspv1beta1.SSP).ObjectMeta } -func (h *nodeLabellerBundleHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) { - kvNLB, ok1 := required.(*sspv1.KubevirtNodeLabellerBundle) - found, ok2 := exists.(*sspv1.KubevirtNodeLabellerBundle) +func (h *sspHooks) updateCr(req *common.HcoRequest, client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) { + ssp, ok1 := required.(*sspv1beta1.SSP) + found, ok2 := exists.(*sspv1beta1.SSP) if !ok1 || !ok2 { - return false, false, errors.New("can't convert to KubeVirt Node Labeller Bundle") + return false, false, errors.New("can't convert to SSP") } - if !reflect.DeepEqual(kvNLB.Spec, found.Spec) { + if !reflect.DeepEqual(found.Spec, ssp.Spec) { if req.HCOTriggered { - req.Logger.Info("Updating existing KubeVirt Node Labeller Bundle's Spec to new opinionated values") + req.Logger.Info("Updating existing SSP's Spec to new opinionated values") } else { - req.Logger.Info("Reconciling an externally updated KubeVirt Node Labeller Bundle's Spec to its opinionated values") + req.Logger.Info("Reconciling an externally updated SSP's Spec to its opinionated values") } - kvNLB.Spec.DeepCopyInto(&found.Spec) - err := Client.Update(req.Ctx, found) + ssp.Spec.DeepCopyInto(&found.Spec) + err := client.Update(req.Ctx, found) if err != nil { return false, false, err } - return true, !req.HCOTriggered, nil } return false, false, nil } -func (h nodeLabellerBundleHandler) Ensure(req *common.HcoRequest) *EnsureResult { - handler := sspOperand(h) - return handler.ensure(req) -} - -func NewKubeVirtNodeLabellerBundleForCR(cr *hcov1beta1.HyperConverged, namespace string) *sspv1.KubevirtNodeLabellerBundle { - labels := map[string]string{ - hcoutil.AppLabel: cr.Name, - } - - spec := sspv1.ComponentSpec{ - // UseKVM: isKVMAvailable(), - } - - if cr.Spec.Workloads.NodePlacement != nil { - if cr.Spec.Workloads.NodePlacement.Affinity != nil { - cr.Spec.Workloads.NodePlacement.Affinity.DeepCopyInto(&spec.Affinity) - } - - if cr.Spec.Workloads.NodePlacement.NodeSelector != nil { - spec.NodeSelector = make(map[string]string) - for k, v := range cr.Spec.Workloads.NodePlacement.NodeSelector { - spec.NodeSelector[k] = v - } - } - - for _, hcoTolr := range cr.Spec.Workloads.NodePlacement.Tolerations { - nlbTolr := corev1.Toleration{} - hcoTolr.DeepCopyInto(&nlbTolr) - spec.Tolerations = append(spec.Tolerations, nlbTolr) - } - } +func NewSSP(hc *hcov1beta1.HyperConverged, opts ...string) *sspv1beta1.SSP { + replicas := int32(defaultTemplateValidatorReplicas) - return &sspv1.KubevirtNodeLabellerBundle{ - ObjectMeta: metav1.ObjectMeta{ - Name: "node-labeller-" + cr.Name, - Labels: labels, - Namespace: namespace, + spec := sspv1beta1.SSPSpec{ + TemplateValidator: sspv1beta1.TemplateValidator{ + Replicas: &replicas, }, - Spec: spec, - } -} - -// ************* KubeVirt Template Validator ************* -type templateValidatorHandler sspOperand - -func newTemplateValidatorHandler(clt client.Client, scheme *runtime.Scheme) *templateValidatorHandler { - return &templateValidatorHandler{ - genericOperand: genericOperand{ - Client: clt, - Scheme: scheme, - crType: "KubevirtTemplateValidator", - removeExistingOwner: false, - isCr: true, - setControllerReference: true, - hooks: &templateValidatorHooks{}, + CommonTemplates: sspv1beta1.CommonTemplates{ + Namespace: defaultCommonTemplatesNamespace, }, - shouldRemoveOldCrd: true, - oldCrdName: templateValidatorsOldCrdName, - } -} - -type templateValidatorHooks struct{} - -func (h templateValidatorHooks) getFullCr(hc *hcov1beta1.HyperConverged) runtime.Object { - return NewKubeVirtTemplateValidatorForCR(hc, hc.Namespace) -} -func (h templateValidatorHooks) getEmptyCr() runtime.Object { - return &sspv1.KubevirtTemplateValidator{} -} -func (h templateValidatorHooks) validate() error { return nil } -func (h templateValidatorHooks) postFound(_ *common.HcoRequest, _ runtime.Object) error { return nil } -func (h templateValidatorHooks) getConditions(cr runtime.Object) []conditionsv1.Condition { - return cr.(*sspv1.KubevirtTemplateValidator).Status.Conditions -} -func (h templateValidatorHooks) checkComponentVersion(cr runtime.Object) bool { - found := cr.(*sspv1.KubevirtTemplateValidator) - return checkComponentVersion(hcoutil.SspVersionEnvV, found.Status.ObservedVersion) -} -func (h templateValidatorHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta { - return &cr.(*sspv1.KubevirtTemplateValidator).ObjectMeta -} - -func (h *templateValidatorHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) { - kvTV, ok1 := required.(*sspv1.KubevirtTemplateValidator) - found, ok2 := exists.(*sspv1.KubevirtTemplateValidator) - if !ok1 || !ok2 { - return false, false, errors.New("can't convert to KubeVirt Template Validator") + // NodeLabeller field is explicitly initialized to its zero-value, + // in order to future-proof from bugs if SSP changes it to pointer-type, + // causing nil pointers dereferences at the DeepCopyInto() below. + NodeLabeller: sspv1beta1.NodeLabeller{}, } - if !reflect.DeepEqual(kvTV.Spec, found.Spec) { - if req.HCOTriggered { - req.Logger.Info("Updating existing KubeVirt Template Validator's Spec to new opinionated values") - } else { - req.Logger.Info("Reconciling an externally updated KubeVirt Template Validator's Spec to its opinionated values") - } - kvTV.Spec.DeepCopyInto(&found.Spec) - err := Client.Update(req.Ctx, found) - if err != nil { - return false, false, err - } - return true, !req.HCOTriggered, nil + if hc.Spec.Infra.NodePlacement != nil { + spec.TemplateValidator.Placement = hc.Spec.Infra.NodePlacement.DeepCopy() } - return false, false, nil -} -func (h templateValidatorHandler) Ensure(req *common.HcoRequest) *EnsureResult { - handler := sspOperand(h) - return handler.ensure(req) -} - -func NewKubeVirtTemplateValidatorForCR(cr *hcov1beta1.HyperConverged, namespace string) *sspv1.KubevirtTemplateValidator { - labels := map[string]string{ - hcoutil.AppLabel: cr.Name, - } - - spec := sspv1.TemplateValidatorSpec{} - if cr.Spec.Infra.NodePlacement != nil { - if cr.Spec.Infra.NodePlacement.Affinity != nil { - cr.Spec.Infra.NodePlacement.Affinity.DeepCopyInto(&spec.Affinity) - } - - if cr.Spec.Infra.NodePlacement.NodeSelector != nil { - spec.NodeSelector = make(map[string]string) - for k, v := range cr.Spec.Infra.NodePlacement.NodeSelector { - spec.NodeSelector[k] = v - } - } - - for _, hcoTolr := range cr.Spec.Infra.NodePlacement.Tolerations { - tvTolr := corev1.Toleration{} - hcoTolr.DeepCopyInto(&tvTolr) - spec.Tolerations = append(spec.Tolerations, tvTolr) - } + if hc.Spec.Workloads.NodePlacement != nil { + spec.NodeLabeller.Placement = hc.Spec.Workloads.NodePlacement.DeepCopy() } - return &sspv1.KubevirtTemplateValidator{ + return &sspv1beta1.SSP{ ObjectMeta: metav1.ObjectMeta{ - Name: "template-validator-" + cr.Name, - Labels: labels, - Namespace: namespace, + Name: "ssp-" + hc.Name, + Labels: getLabels(hc), + Namespace: getNamespace(hc.Namespace, opts), }, Spec: spec, } } -// ************* KubeVirt Metrics Aggregation ************* -type metricsAggregationHandler sspOperand - -func newMetricsAggregationHandler(clt client.Client, scheme *runtime.Scheme) *metricsAggregationHandler { - return &metricsAggregationHandler{ - genericOperand: genericOperand{ - Client: clt, - Scheme: scheme, - crType: "KubevirtMetricsAggregation", - removeExistingOwner: false, - isCr: true, - setControllerReference: true, - hooks: &metricsAggregationHooks{}, - }, - shouldRemoveOldCrd: true, - oldCrdName: metricsAggregationOldCrdName, - } -} +// returns a slice of CRD names that weren't successfully removed +func removeCRDs(clt client.Client, req *common.HcoRequest, crdNames []string) []string { + unremovedCRDs := make([]string, 0, len(crdNames)) -type metricsAggregationHooks struct{} + // The deletion is performed concurrently for all CRDs. + var mutex sync.Mutex + var wg sync.WaitGroup + wg.Add(len(crdNames)) -func (h metricsAggregationHooks) getFullCr(hc *hcov1beta1.HyperConverged) runtime.Object { - return NewKubeVirtMetricsAggregationForCR(hc, hc.Namespace) -} -func (h metricsAggregationHooks) getEmptyCr() runtime.Object { - return &sspv1.KubevirtMetricsAggregation{} -} -func (h metricsAggregationHooks) validate() error { return nil } -func (h metricsAggregationHooks) postFound(_ *common.HcoRequest, _ runtime.Object) error { return nil } -func (h metricsAggregationHooks) getConditions(cr runtime.Object) []conditionsv1.Condition { - return cr.(*sspv1.KubevirtMetricsAggregation).Status.Conditions -} -func (h metricsAggregationHooks) checkComponentVersion(cr runtime.Object) bool { - found := cr.(*sspv1.KubevirtMetricsAggregation) - return checkComponentVersion(hcoutil.SspVersionEnvV, found.Status.ObservedVersion) -} -func (h metricsAggregationHooks) getObjectMeta(cr runtime.Object) *metav1.ObjectMeta { - return &cr.(*sspv1.KubevirtMetricsAggregation).ObjectMeta -} + for _, crdName := range crdNames { + go func(crdName string) { + removed := removeCRD(clt, req, crdName) -func (h *metricsAggregationHooks) updateCr(req *common.HcoRequest, Client client.Client, exists runtime.Object, required runtime.Object) (bool, bool, error) { - kubevirtMetricsAggregation, ok1 := required.(*sspv1.KubevirtMetricsAggregation) - found, ok2 := exists.(*sspv1.KubevirtMetricsAggregation) - if !ok1 || !ok2 { - return false, false, errors.New("can't convert to KubeVirt Metrics Aggregation") - } + // If removal failed for some reason, we'll retry in the next reconciliation loop. + if !removed { + mutex.Lock() + defer mutex.Unlock() - if !reflect.DeepEqual(kubevirtMetricsAggregation.Spec, found.Spec) { - if req.HCOTriggered { - req.Logger.Info("Updating existing KubeVirt Template Validator's Spec to new opinionated values") - } else { - req.Logger.Info("Reconciling an externally updated KubeVirt Template Validator's Spec to its opinionated values") - } - kubevirtMetricsAggregation.Spec.DeepCopyInto(&found.Spec) - err := Client.Update(req.Ctx, found) - if err != nil { - return false, false, err - } - return true, !req.HCOTriggered, nil - } - return false, false, nil -} - -func (h *metricsAggregationHandler) Ensure(req *common.HcoRequest) *EnsureResult { - kubevirtMetricsAggregation := NewKubeVirtMetricsAggregationForCR(req.Instance, req.Namespace) - res := NewEnsureResult(kubevirtMetricsAggregation) - // todo if !r.clusterInfo.IsOpenshift() { // SSP operators Only supported in OpenShift. Ignore in K8s. - // return res.SetUpgradeDone(true) - //} - - err := controllerutil.SetControllerReference(req.Instance, kubevirtMetricsAggregation, h.Scheme) - if err != nil { - return res.Error(err) - } - - key, err := client.ObjectKeyFromObject(kubevirtMetricsAggregation) - if err != nil { - req.Logger.Error(err, "Failed to get object key for KubeVirt Metrics Aggregation") - } - - res.SetName(key.Name) - found := &sspv1.KubevirtMetricsAggregation{} - - err = h.Client.Get(req.Ctx, key, found) - if err != nil { - if apierrors.IsNotFound(err) { - req.Logger.Info("Creating KubeVirt Metrics Aggregation") - err = h.Client.Create(req.Ctx, kubevirtMetricsAggregation) - if err == nil { - return res.SetCreated() + unremovedCRDs = append(unremovedCRDs, crdName) } - } - return res.Error(err) - } - - req.Logger.Info("KubeVirt Metrics Aggregation already exists", "metrics.Namespace", found.Namespace, "metrics.Name", found.Name) - - if !reflect.DeepEqual(kubevirtMetricsAggregation.Spec, found.Spec) { - overwritten := false - if req.HCOTriggered { - req.Logger.Info("Updating existing KubeVirt Metrics Aggregation's Spec to new opinionated values") - } else { - req.Logger.Info("Reconciling an externally updated KubeVirt Metrics Aggregation's Spec to its opinionated values") - overwritten = true - } - kubevirtMetricsAggregation.Spec.DeepCopyInto(&found.Spec) - err = h.Client.Update(req.Ctx, found) - if err != nil { - return res.Error(err) - } - if overwritten { - res.SetOverwritten() - } - return res.SetUpdated() - } - // Add it to the list of RelatedObjects if found - objectRef, err := reference.GetReference(h.Scheme, found) - if err != nil { - return res.Error(err) - } - objectreferencesv1.SetObjectReference(&req.Instance.Status.RelatedObjects, *objectRef) - - isReady := handleComponentConditions(req, "KubeVirtMetricsAggregation", found.Status.Conditions) - upgradeInProgress := false - if isReady { - upgradeInProgress = req.UpgradeMode && checkComponentVersion(hcoutil.SspVersionEnvV, found.Status.ObservedVersion) - if (upgradeInProgress || !req.UpgradeMode) && h.shouldRemoveOldCrd { - if removeCrd(h.Client, req, metricsAggregationOldCrdName) { - h.shouldRemoveOldCrd = false - } - } + wg.Done() + }(crdName) } - return res.SetUpgradeDone(req.ComponentUpgradeInProgress && upgradeInProgress) -} + wg.Wait() -func NewKubeVirtMetricsAggregationForCR(cr *hcov1beta1.HyperConverged, namespace string) *sspv1.KubevirtMetricsAggregation { - labels := map[string]string{ - hcoutil.AppLabel: cr.Name, - } - return &sspv1.KubevirtMetricsAggregation{ - ObjectMeta: metav1.ObjectMeta{ - Name: "metrics-aggregation-" + cr.Name, - Labels: labels, - Namespace: namespace, - }, - } + return unremovedCRDs } -// ************* Common Methods ************* - -// return true if not found or if deletion succeeded -func removeCrd(clt client.Client, req *common.HcoRequest, crdName string) bool { - found := &unstructured.Unstructured{ - Object: map[string]interface{}{ - "kind": "CustomResourceDefinition", - "apiVersion": "apiextensions.k8s.io/v1", - }, - } - key := client.ObjectKey{Namespace: req.Namespace, Name: crdName} +// returns true if not found or if deletion succeeded, and false otherwise. +func removeCRD(clt client.Client, req *common.HcoRequest, crdName string) bool { + found := &apiextensionsv1.CustomResourceDefinition{} + key := client.ObjectKey{Namespace: hcoutil.UndefinedNamespace, Name: crdName} err := clt.Get(req.Ctx, key, found) if err != nil { if !apierrors.IsNotFound(err) { From 5552db0013f955f5aa7851623de872a57a296da2 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Wed, 25 Nov 2020 20:32:00 +0200 Subject: [PATCH 04/19] Unit tests for new SSP operator Signed-off-by: Zvi Cahana --- pkg/controller/commonTestUtils/testUtils.go | 11 +- .../hyperconverged_controller_test.go | 77 +- .../hyperconverged/testUtils_test.go | 35 +- pkg/controller/operands/ssp_test.go | 1137 ++++++----------- 4 files changed, 409 insertions(+), 851 deletions(-) diff --git a/pkg/controller/commonTestUtils/testUtils.go b/pkg/controller/commonTestUtils/testUtils.go index 4327162ac3..5f5e900ca1 100644 --- a/pkg/controller/commonTestUtils/testUtils.go +++ b/pkg/controller/commonTestUtils/testUtils.go @@ -4,14 +4,13 @@ import ( "context" monitoringv1 "github.com/coreos/prometheus-operator/pkg/apis/monitoring/v1" + networkaddons "github.com/kubevirt/cluster-network-addons-operator/pkg/apis" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis" - sspopv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" consolev1 "github.com/openshift/api/console/v1" - extv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" - "k8s.io/apimachinery/pkg/runtime" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" @@ -19,7 +18,9 @@ import ( hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes/scheme" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" @@ -111,11 +112,11 @@ func GetScheme() *runtime.Scheme { apis.AddToScheme, cdiv1beta1.AddToScheme, networkaddons.AddToScheme, - sspopv1.AddToScheme, + sspv1beta1.AddToScheme, vmimportv1beta1.AddToScheme, consolev1.AddToScheme, monitoringv1.AddToScheme, - extv1.AddToScheme, + apiextensionsv1.AddToScheme, } { Expect(f(testScheme)).To(BeNil()) } diff --git a/pkg/controller/hyperconverged/hyperconverged_controller_test.go b/pkg/controller/hyperconverged/hyperconverged_controller_test.go index 8d3b23f6d5..45551d4c49 100644 --- a/pkg/controller/hyperconverged/hyperconverged_controller_test.go +++ b/pkg/controller/hyperconverged/hyperconverged_controller_test.go @@ -190,14 +190,10 @@ var _ = Describe("HyperconvergedController", func() { expectedCDI.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/cdis/%s", expectedCDI.Namespace, expectedCDI.Name) expectedCNA := operands.NewNetworkAddons(hco) expectedCNA.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/cnas/%s", expectedCNA.Namespace, expectedCNA.Name) - expectedKVCTB := operands.NewKubeVirtCommonTemplateBundle(hco) - expectedKVCTB.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/ctbs/%s", expectedKVCTB.Namespace, expectedKVCTB.Name) - expectedKVNLB := operands.NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - expectedKVNLB.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/nlb/%s", expectedKVNLB.Namespace, expectedKVNLB.Name) - expectedKVTV := operands.NewKubeVirtTemplateValidatorForCR(hco, namespace) - expectedKVTV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/tv/%s", expectedKVTV.Namespace, expectedKVTV.Name) + expectedSSP := operands.NewSSP(hco) + expectedSSP.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/ctbs/%s", expectedSSP.Namespace, expectedSSP.Name) // Add all of the objects to the client - cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKVStorageRole, expectedKVStorageRoleBinding, expectedKV, expectedCDI, expectedCNA, expectedKVCTB, expectedKVNLB, expectedKVTV}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKVStorageRole, expectedKVStorageRoleBinding, expectedKV, expectedCDI, expectedCNA, expectedSSP}) r := initReconciler(cl) // Do the reconcile @@ -219,24 +215,24 @@ var _ = Describe("HyperconvergedController", func() { Reason: reconcileCompleted, Message: reconcileCompletedMessage, }))) - // Why Template validator? Because it is the last to be checked, so the last missing overwrites everything + // Why SSP? Because it is the last to be checked, so the last missing overwrites everything Expect(foundResource.Status.Conditions).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ Type: conditionsv1.ConditionAvailable, Status: corev1.ConditionFalse, - Reason: "KubevirtTemplateValidatorConditions", - Message: "KubevirtTemplateValidator resource has no conditions", + Reason: "SSPConditions", + Message: "SSP resource has no conditions", }))) Expect(foundResource.Status.Conditions).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ Type: conditionsv1.ConditionProgressing, Status: corev1.ConditionTrue, - Reason: "KubevirtTemplateValidatorConditions", - Message: "KubevirtTemplateValidator resource has no conditions", + Reason: "SSPConditions", + Message: "SSP resource has no conditions", }))) Expect(foundResource.Status.Conditions).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ Type: conditionsv1.ConditionUpgradeable, Status: corev1.ConditionFalse, - Reason: "KubevirtTemplateValidatorConditions", - Message: "KubevirtTemplateValidator resource has no conditions", + Reason: "SSPConditions", + Message: "SSP resource has no conditions", }))) }) @@ -316,17 +312,11 @@ var _ = Describe("HyperconvergedController", func() { Status: corev1.ConditionFalse, }, } - expectedKVCTB := operands.NewKubeVirtCommonTemplateBundle(hco) - expectedKVCTB.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/ctbs/%s", expectedKVCTB.Namespace, expectedKVCTB.Name) - expectedKVCTB.Status.Conditions = getGenericCompletedConditions() - expectedKVNLB := operands.NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - expectedKVNLB.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/nlb/%s", expectedKVNLB.Namespace, expectedKVNLB.Name) - expectedKVNLB.Status.Conditions = getGenericCompletedConditions() - expectedKVTV := operands.NewKubeVirtTemplateValidatorForCR(hco, namespace) - expectedKVTV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/tv/%s", expectedKVTV.Namespace, expectedKVTV.Name) - expectedKVTV.Status.Conditions = getGenericCompletedConditions() + expectedSSP := operands.NewSSP(hco) + expectedSSP.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/ctbs/%s", expectedSSP.Namespace, expectedSSP.Name) + expectedSSP.Status.Conditions = getGenericCompletedConditions() // Add all of the objects to the client - cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKV, expectedCDI, expectedCNA, expectedKVCTB, expectedKVNLB, expectedKVTV}) + cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedKVConfig, expectedKVStorageConfig, expectedKV, expectedCDI, expectedCNA, expectedSSP}) r := initReconciler(cl) // Do the reconcile @@ -528,40 +518,14 @@ var _ = Describe("HyperconvergedController", func() { Expect(requeue).To(BeFalse()) checkAvailability(foundResource, corev1.ConditionTrue) - origConds = expected.kvCtb.Status.Conditions - expected.kvCtb.Status.Conditions = expected.cdi.Status.Conditions[1:] + origConds = expected.ssp.Status.Conditions + expected.ssp.Status.Conditions = expected.cdi.Status.Conditions[1:] cl = expected.initClient() foundResource, requeue = doReconcile(cl, expected.hco) Expect(requeue).To(BeFalse()) checkAvailability(foundResource, corev1.ConditionFalse) - expected.kvCtb.Status.Conditions = origConds - cl = expected.initClient() - foundResource, requeue = doReconcile(cl, expected.hco) - Expect(requeue).To(BeFalse()) - checkAvailability(foundResource, corev1.ConditionTrue) - - origConds = expected.kvNlb.Status.Conditions - expected.kvNlb.Status.Conditions = expected.cdi.Status.Conditions[1:] - cl = expected.initClient() - foundResource, requeue = doReconcile(cl, expected.hco) - Expect(requeue).To(BeFalse()) - checkAvailability(foundResource, corev1.ConditionFalse) - - expected.kvNlb.Status.Conditions = origConds - cl = expected.initClient() - foundResource, requeue = doReconcile(cl, expected.hco) - Expect(requeue).To(BeFalse()) - checkAvailability(foundResource, corev1.ConditionTrue) - - origConds = expected.kvTv.Status.Conditions - expected.kvTv.Status.Conditions = expected.cdi.Status.Conditions[1:] - cl = expected.initClient() - foundResource, requeue = doReconcile(cl, expected.hco) - Expect(requeue).To(BeFalse()) - checkAvailability(foundResource, corev1.ConditionFalse) - - expected.kvTv.Status.Conditions = origConds + expected.ssp.Status.Conditions = origConds cl = expected.initClient() foundResource, requeue = doReconcile(cl, expected.hco) Expect(requeue).To(BeFalse()) @@ -586,7 +550,7 @@ var _ = Describe("HyperconvergedController", func() { ).To(BeNil()) Expect(foundResource.Status.RelatedObjects).ToNot(BeNil()) - Expect(len(foundResource.Status.RelatedObjects)).Should(Equal(18)) + Expect(len(foundResource.Status.RelatedObjects)).Should(Equal(15)) Expect(foundResource.ObjectMeta.Finalizers).Should(Equal([]string{FinalizerName})) // Now, delete HCO @@ -715,10 +679,7 @@ var _ = Describe("HyperconvergedController", func() { os.Setenv(hcoutil.VMImportEnvV, newComponentVersion) os.Setenv(hcoutil.SspVersionEnvV, newComponentVersion) - expected.kvCtb.Status.ObservedVersion = newComponentVersion - expected.kvNlb.Status.ObservedVersion = newComponentVersion - expected.kvTv.Status.ObservedVersion = newComponentVersion - expected.kvMtAg.Status.ObservedVersion = newComponentVersion + expected.ssp.Status.ObservedVersion = newComponentVersion os.Setenv(hcoutil.HcoKvIoVersionName, newVersion) diff --git a/pkg/controller/hyperconverged/testUtils_test.go b/pkg/controller/hyperconverged/testUtils_test.go index 544400f503..83c65f2e3d 100644 --- a/pkg/controller/hyperconverged/testUtils_test.go +++ b/pkg/controller/hyperconverged/testUtils_test.go @@ -12,7 +12,6 @@ import ( "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" "github.com/kubevirt/hyperconverged-cluster-operator/version" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" corev1 "k8s.io/api/core/v1" @@ -22,6 +21,8 @@ import ( "k8s.io/apimachinery/pkg/runtime" kubevirtv1 "kubevirt.io/client-go/api/v1" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" @@ -65,11 +66,8 @@ type BasicExpected struct { kv *kubevirtv1.KubeVirt cdi *cdiv1beta1.CDI cna *networkaddonsv1.NetworkAddonsConfig - kvCtb *sspv1.KubevirtCommonTemplatesBundle - kvNlb *sspv1.KubevirtNodeLabellerBundle - kvTv *sspv1.KubevirtTemplateValidator + ssp *sspv1beta1.SSP vmi *vmimportv1beta1.VMImportConfig - kvMtAg *sspv1.KubevirtMetricsAggregation imsConfig *corev1.ConfigMap mService *corev1.Service serviceMonitor *monitoringv1.ServiceMonitor @@ -87,11 +85,8 @@ func (be BasicExpected) toArray() []runtime.Object { be.kv, be.cdi, be.cna, - be.kvCtb, - be.kvNlb, - be.kvTv, + be.ssp, be.vmi, - be.kvMtAg, be.imsConfig, be.mService, be.serviceMonitor, @@ -178,30 +173,16 @@ func getBasicDeployment() *BasicExpected { expectedCNA.Status.Conditions = getGenericCompletedConditions() res.cna = expectedCNA - expectedKVCTB := operands.NewKubeVirtCommonTemplateBundle(hco) - expectedKVCTB.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/ctbs/%s", expectedKVCTB.Namespace, expectedKVCTB.Name) - expectedKVCTB.Status.Conditions = getGenericCompletedConditions() - res.kvCtb = expectedKVCTB - - expectedKVNLB := operands.NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - expectedKVNLB.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/nlb/%s", expectedKVNLB.Namespace, expectedKVNLB.Name) - expectedKVNLB.Status.Conditions = getGenericCompletedConditions() - res.kvNlb = expectedKVNLB - - expectedKVTV := operands.NewKubeVirtTemplateValidatorForCR(hco, namespace) - expectedKVTV.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/tv/%s", expectedKVTV.Namespace, expectedKVTV.Name) - expectedKVTV.Status.Conditions = getGenericCompletedConditions() - res.kvTv = expectedKVTV + expectedSSP := operands.NewSSP(hco) + expectedSSP.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/ctbs/%s", expectedSSP.Namespace, expectedSSP.Name) + expectedSSP.Status.Conditions = getGenericCompletedConditions() + res.ssp = expectedSSP expectedVMI := operands.NewVMImportForCR(hco) expectedVMI.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/vmimportconfigs/%s", expectedVMI.Namespace, expectedVMI.Name) expectedVMI.Status.Conditions = getGenericCompletedConditions() res.vmi = expectedVMI - kvMtAg := operands.NewKubeVirtMetricsAggregationForCR(hco, namespace) - kvMtAg.Status.Conditions = getGenericCompletedConditions() - res.kvMtAg = kvMtAg - res.imsConfig = operands.NewIMSConfigForCR(hco, namespace) res.imsConfig.Data["v2v-conversion-image"] = commonTestUtils.Conversion_image res.imsConfig.Data["kubevirt-vmware-image"] = commonTestUtils.Vmware_image diff --git a/pkg/controller/operands/ssp_test.go b/pkg/controller/operands/ssp_test.go index 46567b30a7..69c6a8f72c 100644 --- a/pkg/controller/operands/ssp_test.go +++ b/pkg/controller/operands/ssp_test.go @@ -7,18 +7,25 @@ import ( "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" - . "github.com/onsi/ginkgo" - . "github.com/onsi/gomega" + conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + lifecycleapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" + "os" + corev1 "k8s.io/api/core/v1" + apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/reference" + + . "github.com/onsi/ginkgo" + . "github.com/onsi/gomega" ) var _ = Describe("SSP Operands", func() { - Context("KubeVirtCommonTemplatesBundle", func() { + Context("SSP", func() { var hco *hcov1beta1.HyperConverged var req *common.HcoRequest @@ -28,9 +35,9 @@ var _ = Describe("SSP Operands", func() { }) It("should create if not present", func() { - expectedResource := NewKubeVirtCommonTemplateBundle(hco) + expectedResource := NewSSP(hco) cl := commonTestUtils.InitClient([]runtime.Object{}) - handler := newCommonTemplateBundleHandler(cl, commonTestUtils.GetScheme()) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) res := handler.ensure(req) Expect(res.Created).To(BeTrue()) Expect(res.Updated).To(BeFalse()) @@ -38,7 +45,7 @@ var _ = Describe("SSP Operands", func() { Expect(res.UpgradeDone).To(BeFalse()) Expect(res.Err).To(BeNil()) - foundResource := &sspv1.KubevirtCommonTemplatesBundle{} + foundResource := &sspv1beta1.SSP{} Expect( cl.Get(context.TODO(), types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, @@ -50,10 +57,10 @@ var _ = Describe("SSP Operands", func() { }) It("should find if present", func() { - expectedResource := NewKubeVirtCommonTemplateBundle(hco) + expectedResource := NewSSP(hco) expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) - handler := newCommonTemplateBundleHandler(cl, commonTestUtils.GetScheme()) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) res := handler.ensure(req) Expect(res.Created).To(BeFalse()) Expect(res.Updated).To(BeFalse()) @@ -70,14 +77,21 @@ var _ = Describe("SSP Operands", func() { }) It("should reconcile to default", func() { - existingResource := NewKubeVirtCommonTemplateBundle(hco) + expectedResource := NewSSP(hco) + existingResource := expectedResource.DeepCopy() existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name) - existingResource.Spec.Version = "Non default value" + replicas := int32(defaultTemplateValidatorReplicas * 2) // non-default value + existingResource.Spec.TemplateValidator.Replicas = &replicas + existingResource.Spec.CommonTemplates.Namespace = "foobar" + existingResource.Spec.NodeLabeller.Placement = &lifecycleapi.NodePlacement{ + NodeSelector: map[string]string{"foo": "bar"}, + } + req.HCOTriggered = false // mock a reconciliation triggered by a change in NewKubeVirtCommonTemplateBundle CR cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newCommonTemplateBundleHandler(cl, commonTestUtils.GetScheme()) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) res := handler.ensure(req) Expect(res.Created).To(BeFalse()) Expect(res.Updated).To(BeTrue()) @@ -85,807 +99,408 @@ var _ = Describe("SSP Operands", func() { Expect(res.UpgradeDone).To(BeFalse()) Expect(res.Err).To(BeNil()) - foundResource := &sspv1.KubevirtCommonTemplatesBundle{} + foundResource := &sspv1beta1.SSP{} Expect( cl.Get(context.TODO(), types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, foundResource), ).To(BeNil()) - Expect(foundResource.Spec.Version).To(BeEmpty()) + Expect(foundResource.Spec).To(Equal(expectedResource.Spec)) }) - // TODO: add tests to ensure that HCO properly propagates NodePlacement from its CR - - // TODO: temporary avoid checking conditions on KubevirtCommonTemplatesBundle because it's currently - // broken on k8s. Revert this when we will be able to fix it - /* - It("should handle conditions", func() { - expectedResource := newKubeVirtCommonTemplateBundleForCR(hco, OpenshiftNamespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - expectedResource.Status.Conditions = []conditionsv1.Condition{ - conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - } - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - Expect(r.ensureKubeVirtCommonTemplateBundle(req)).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "KubevirtCommonTemplatesBundleNotAvailable", - Message: "KubevirtCommonTemplatesBundle is not available: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "KubevirtCommonTemplatesBundleProgressing", - Message: "KubevirtCommonTemplatesBundle is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "KubevirtCommonTemplatesBundleProgressing", - Message: "KubevirtCommonTemplatesBundle is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "KubevirtCommonTemplatesBundleDegraded", - Message: "KubevirtCommonTemplatesBundle is degraded: Bar", - }))) + Context("NodeLabeller node placement", func() { + + It("should add node placement if missing", func() { + existingResource := NewSSP(hco, commonTestUtils.Namespace) + + hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.NodeLabeller.Placement).To(BeZero()) + Expect(*foundResource.Spec.NodeLabeller.Placement).To(Equal(*hco.Spec.Workloads.NodePlacement)) + Expect(foundResource.Spec.TemplateValidator.Placement).To(BeZero()) + Expect(req.Conditions).To(BeEmpty()) }) - */ - }) - Context("KubeVirtNodeLabellerBundle", func() { - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest + It("should remove node placement if missing in HCO CR", func() { - BeforeEach(func() { - hco = commonTestUtils.NewHco() - req = commonTestUtils.NewReq(hco) - }) + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + existingResource := NewSSP(hcoNodePlacement, commonTestUtils.Namespace) - It("should create if not present", func() { - expectedResource := NewKubeVirtNodeLabellerBundleForCR(hco, commonTestUtils.Namespace) - cl := commonTestUtils.InitClient([]runtime.Object{}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeTrue()) - Expect(res.Updated).To(BeFalse()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) - foundResource := &sspv1.KubevirtNodeLabellerBundle{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) - It("should find if present", func() { - expectedResource := NewKubeVirtNodeLabellerBundleForCR(hco, commonTestUtils.Namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeFalse()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) + Expect(existingResource.Spec.NodeLabeller.Placement).ToNot(BeZero()) + Expect(foundResource.Spec.NodeLabeller.Placement).To(BeZero()) - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(handler.Scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - }) - - It("should reconcile to default", func() { - existingResource := NewKubeVirtNodeLabellerBundleForCR(hco, commonTestUtils.Namespace) - existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name) - - existingResource.Spec.Version = "Non default value" - req.HCOTriggered = false // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeTrue()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtNodeLabellerBundle{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Spec.Version).To(BeEmpty()) - }) - - It("should add node placement if missing in KubeVirtNodeLabellerBundle", func() { - existingResource := NewKubeVirtNodeLabellerBundleForCR(hco, commonTestUtils.Namespace) - - hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtNodeLabellerBundle{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Affinity.NodeAffinity).To(BeNil()) - Expect(existingResource.Spec.Affinity.PodAffinity).To(BeNil()) - Expect(existingResource.Spec.Affinity.PodAntiAffinity).To(BeNil()) - Expect(foundResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.NodeSelector["key1"]).Should(Equal("value1")) - Expect(foundResource.Spec.NodeSelector["key2"]).Should(Equal("value2")) + Expect(req.Conditions).To(BeEmpty()) + }) - Expect(foundResource.Spec.Tolerations).Should(Equal(hco.Spec.Workloads.NodePlacement.Tolerations)) + It("should modify node placement according to HCO CR", func() { + + hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + existingResource := NewSSP(hco, commonTestUtils.Namespace) + + // now, modify HCO's node placement + seconds3 := int64(3) + hco.Spec.Workloads.NodePlacement.Tolerations = append(hco.Spec.Workloads.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + }) + hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.NodeLabeller.Placement.Affinity.NodeAffinity).ToNot(BeZero()) + Expect(existingResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(2)) + Expect(existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("value1")) + + Expect(foundResource.Spec.NodeLabeller.Placement.Affinity.NodeAffinity).ToNot(BeNil()) + Expect(foundResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(3)) + Expect(foundResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("something else")) + + Expect(req.Conditions).To(BeEmpty()) + }) - Expect(req.Conditions).To(BeEmpty()) + It("should overwrite node placement if directly set on SSP CR", func() { + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + existingResource := NewSSP(hco, commonTestUtils.Namespace) + + // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR + req.HCOTriggered = false + + // now, modify NodeLabeller node placement + seconds3 := int64(3) + existingResource.Spec.NodeLabeller.Placement.Tolerations = append(hco.Spec.Workloads.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + }) + existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"] = "BADvalue1" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(3)) + Expect(existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("BADvalue1")) + + Expect(foundResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(2)) + Expect(foundResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("value1")) + + Expect(req.Conditions).To(BeEmpty()) + }) }) - It("should remove node placement if missing in HCO CR", func() { + Context("TemplateValidator node placement", func() { + + It("should add node placement if missing ", func() { + existingResource := NewSSP(hco, commonTestUtils.Namespace) + + hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.TemplateValidator.Placement).To(BeZero()) + Expect(*foundResource.Spec.TemplateValidator.Placement).To(Equal(*hco.Spec.Infra.NodePlacement)) + Expect(foundResource.Spec.NodeLabeller.Placement).To(BeZero()) + Expect(req.Conditions).To(BeEmpty()) + }) - hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - existingResource := NewKubeVirtNodeLabellerBundleForCR(hcoNodePlacement, commonTestUtils.Namespace) + It("should remove node placement if missing in HCO CR", func() { - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) + hcoNodePlacement := commonTestUtils.NewHco() + hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + existingResource := NewSSP(hcoNodePlacement, commonTestUtils.Namespace) - foundResource := &sspv1.KubevirtNodeLabellerBundle{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.Affinity.NodeAffinity).To(BeNil()) - - Expect(req.Conditions).To(BeEmpty()) - }) + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) - It("should modify node placement according to HCO CR", func() { + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) - hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - existingResource := NewKubeVirtNodeLabellerBundleForCR(hco, commonTestUtils.Namespace) + Expect(existingResource.Spec.TemplateValidator.Placement).ToNot(BeZero()) + Expect(foundResource.Spec.TemplateValidator.Placement).To(BeZero()) - // now, modify HCO's node placement - seconds3 := int64(3) - hco.Spec.Workloads.NodePlacement.Tolerations = append(hco.Spec.Workloads.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + Expect(req.Conditions).To(BeEmpty()) }) - hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) + It("should modify node placement according to HCO CR", func() { + + hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + existingResource := NewSSP(hco, commonTestUtils.Namespace) + + // now, modify HCO's node placement + seconds3 := int64(3) + hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + }) + hco.Spec.Infra.NodePlacement.NodeSelector["key1"] = "something else" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.TemplateValidator.Placement.Affinity.NodeAffinity).ToNot(BeZero()) + Expect(existingResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(2)) + Expect(existingResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("value1")) + + Expect(foundResource.Spec.TemplateValidator.Placement.Affinity.NodeAffinity).ToNot(BeNil()) + Expect(foundResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(3)) + Expect(foundResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("something else")) + + Expect(req.Conditions).To(BeEmpty()) + }) - foundResource := &sspv1.KubevirtNodeLabellerBundle{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) + It("should overwrite node placement if directly set on SSP CR", func() { + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + existingResource := NewSSP(hco, commonTestUtils.Namespace) + + // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR + req.HCOTriggered = false + + // now, modify NodeLabeller node placement + seconds3 := int64(3) + existingResource.Spec.TemplateValidator.Placement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + }) + existingResource.Spec.TemplateValidator.Placement.NodeSelector["key1"] = "BADvalue1" + + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Updated).To(BeTrue()) + Expect(res.Overwritten).To(BeTrue()) + Expect(res.Err).To(BeNil()) + + foundResource := &sspv1beta1.SSP{} + Expect( + cl.Get(context.TODO(), + types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, + foundResource), + ).To(BeNil()) + + Expect(existingResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(3)) + Expect(existingResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("BADvalue1")) + + Expect(foundResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(2)) + Expect(foundResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("value1")) + + Expect(req.Conditions).To(BeEmpty()) + }) + }) - Expect(existingResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(existingResource.Spec.Tolerations).To(HaveLen(2)) - Expect(existingResource.Spec.NodeSelector["key1"]).Should(Equal("value1")) + Context("SSP Upgrade", func() { - Expect(foundResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.Tolerations).To(HaveLen(3)) - Expect(foundResource.Spec.NodeSelector["key1"]).Should(Equal("something else")) + It("shouldn't remove old CRDs if upgrade isn't done", func() { + oldCrds := oldSSPCrdsAsObjects() + cl := commonTestUtils.InitClient(oldCrds) - Expect(req.Conditions).To(BeEmpty()) - }) + // Simulate ongoing upgrade + req.SetUpgradeMode(true) - It("should overwrite node placement if directly set on NewKubeVirtNodeLabellerBundle CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - existingResource := NewKubeVirtNodeLabellerBundleForCR(hco, commonTestUtils.Namespace) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) - // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR - req.HCOTriggered = false + Expect(res.Created).To(BeTrue()) + Expect(res.Updated).To(BeFalse()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) - // now, modify VMImport node placement - seconds3 := int64(3) - existingResource.Spec.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + foundCrds := apiextensionsv1.CustomResourceDefinitionList{} + Expect(cl.List(context.TODO(), &foundCrds)).To(BeNil()) + Expect(foundCrds.Items).To(HaveLen(len(oldCrds))) }) - existingResource.Spec.NodeSelector["key1"] = "BADvalue1" + It("should remove old CRDs if general upgrade is done", func() { + oldCrds := oldSSPCrdsAsObjects() + cl := commonTestUtils.InitClient(oldCrds) - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newNodeLabellerBundleHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeTrue()) - Expect(res.Err).To(BeNil()) + // Simulate no upgrade + req.SetUpgradeMode(false) - foundResource := &sspv1.KubevirtNodeLabellerBundle{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) - Expect(existingResource.Spec.Tolerations).To(HaveLen(3)) - Expect(existingResource.Spec.NodeSelector["key1"]).Should(Equal("BADvalue1")) + Expect(res.Created).To(BeTrue()) + Expect(res.Updated).To(BeFalse()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) - Expect(foundResource.Spec.Tolerations).To(HaveLen(2)) - Expect(foundResource.Spec.NodeSelector["key1"]).Should(Equal("value1")) - - Expect(req.Conditions).To(BeEmpty()) - }) + foundCrds := apiextensionsv1.CustomResourceDefinitionList{} + Expect(cl.List(context.TODO(), &foundCrds)).To(BeNil()) + Expect(foundCrds.Items).To(BeEmpty()) + }) - // TODO: temporary avoid checking conditions on KubevirtNodeLabellerBundle because it's currently - // broken on k8s. Revert this when we will be able to fix it - /* - It("should handle conditions", func() { - expectedResource := NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - expectedResource.Status.Conditions = []conditionsv1.Condition{ - conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Foo", - Message: "Bar", + It("should remove old CRDs if SSP upgrade is done", func() { + existingResource := NewSSP(hco, commonTestUtils.Namespace) + existingResource.Status.Conditions = []conditionsv1.Condition{ + { + Type: conditionsv1.ConditionAvailable, + Status: corev1.ConditionTrue, }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", + { + Type: conditionsv1.ConditionDegraded, + Status: corev1.ConditionFalse, }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", + { + Type: conditionsv1.ConditionProgressing, + Status: corev1.ConditionFalse, }, } - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - Expect(r.ensureKubeVirtNodeLabellerBundle(req)).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "KubevirtNodeLabellerBundleNotAvailable", - Message: "KubevirtNodeLabellerBundle is not available: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "KubevirtNodeLabellerBundleProgressing", - Message: "KubevirtNodeLabellerBundle is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "KubevirtNodeLabellerBundleProgressing", - Message: "KubevirtNodeLabellerBundle is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "KubevirtNodeLabellerBundleDegraded", - Message: "KubevirtNodeLabellerBundle is degraded: Bar", - }))) - }) - */ - - //It("should request KVM without any extra setting", func() { - // os.Unsetenv("KVM_EMULATION") - // - // expectedResource := NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - // Expect(expectedResource.Spec.UseKVM).To(BeTrue()) - //}) - // - //It("should not request KVM if emulation requested", func() { - // err := os.Setenv("KVM_EMULATION", "true") - // Expect(err).NotTo(HaveOccurred()) - // defer os.Unsetenv("KVM_EMULATION") - // - // expectedResource := NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - // Expect(expectedResource.Spec.UseKVM).To(BeFalse()) - //}) - - //It("should request KVM if emulation value not set", func() { - // err := os.Setenv("KVM_EMULATION", "") - // Expect(err).NotTo(HaveOccurred()) - // defer os.Unsetenv("KVM_EMULATION") - // - // expectedResource := NewKubeVirtNodeLabellerBundleForCR(hco, namespace) - // Expect(expectedResource.Spec.UseKVM).To(BeTrue()) - //}) - }) - - Context("KubeVirtTemplateValidator", func() { - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - BeforeEach(func() { - hco = commonTestUtils.NewHco() - req = commonTestUtils.NewReq(hco) - }) - - It("should create if not present", func() { - expectedResource := NewKubeVirtTemplateValidatorForCR(hco, commonTestUtils.Namespace) - cl := commonTestUtils.InitClient([]runtime.Object{}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeTrue()) - Expect(res.Updated).To(BeFalse()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtTemplateValidator{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) - - It("should find if present", func() { - expectedResource := NewKubeVirtTemplateValidatorForCR(hco, commonTestUtils.Namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeFalse()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(handler.Scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - }) - - It("should reconcile to default", func() { - existingResource := NewKubeVirtTemplateValidatorForCR(hco, commonTestUtils.Namespace) - existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name) - - existingResource.Spec.TemplateValidatorReplicas = 5 // set non-default value - req.HCOTriggered = false // mock a reconciliation triggered by a change in NewKubeVirtTemplateValidator CR - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeTrue()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtTemplateValidator{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Spec.TemplateValidatorReplicas).To(BeZero()) - }) - - It("should add node placement if missing in KubeVirtTemplateValidator", func() { - existingResource := NewKubeVirtTemplateValidatorForCR(hco, commonTestUtils.Namespace) - hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + // Set the expected SSP version that indicates upgrade complete. + // Note: the value doesn't really matter, even when we move beyond 2.6 + const expectedSSPVersion = "2.6" + os.Setenv(hcoutil.SspVersionEnvV, expectedSSPVersion) + existingResource.Status.ObservedVersion = expectedSSPVersion - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) + oldCrds := oldSSPCrdsAsObjects() + objects := append(oldCrds, existingResource) + cl := commonTestUtils.InitClient(objects) - foundResource := &sspv1.KubevirtTemplateValidator{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) + // Simulate ongoing upgrade + req.SetUpgradeMode(true) - Expect(existingResource.Spec.Affinity.NodeAffinity).To(BeNil()) - Expect(existingResource.Spec.Affinity.PodAffinity).To(BeNil()) - Expect(existingResource.Spec.Affinity.PodAntiAffinity).To(BeNil()) - Expect(foundResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.NodeSelector["key1"]).Should(Equal("value1")) - Expect(foundResource.Spec.NodeSelector["key2"]).Should(Equal("value2")) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) - Expect(foundResource.Spec.Tolerations).Should(Equal(hco.Spec.Infra.NodePlacement.Tolerations)) + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeFalse()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeTrue()) + Expect(res.Err).To(BeNil()) - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should remove node placement if missing in HCO CR", func() { - - hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - existingResource := NewKubeVirtTemplateValidatorForCR(hcoNodePlacement, commonTestUtils.Namespace) - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtTemplateValidator{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.Affinity.NodeAffinity).To(BeNil()) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should modify node placement according to HCO CR", func() { - - hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - existingResource := NewKubeVirtTemplateValidatorForCR(hco, commonTestUtils.Namespace) - - // now, modify HCO's node placement - seconds3 := int64(3) - hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + foundCrds := apiextensionsv1.CustomResourceDefinitionList{} + Expect(cl.List(context.TODO(), &foundCrds)).To(BeNil()) + Expect(foundCrds.Items).To(BeEmpty()) }) - hco.Spec.Infra.NodePlacement.NodeSelector["key1"] = "something else" - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtTemplateValidator{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(existingResource.Spec.Tolerations).To(HaveLen(2)) - Expect(existingResource.Spec.NodeSelector["key1"]).Should(Equal("value1")) - - Expect(foundResource.Spec.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.Tolerations).To(HaveLen(3)) - Expect(foundResource.Spec.NodeSelector["key1"]).Should(Equal("something else")) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should overwrite node placement if directly set on NewKubeVirtTemplateValidator CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - existingResource := NewKubeVirtTemplateValidatorForCR(hco, commonTestUtils.Namespace) - - // mock a reconciliation triggered by a change in NewKubeVirtTemplateValidator CR - req.HCOTriggered = false - - // now, modify NewKubeVirtTemplateValidator node placement - seconds3 := int64(3) - existingResource.Spec.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, - }) - - existingResource.Spec.NodeSelector["key1"] = "BADvalue1" - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newTemplateValidatorHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtTemplateValidator{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.Tolerations).To(HaveLen(3)) - Expect(existingResource.Spec.NodeSelector["key1"]).Should(Equal("BADvalue1")) - - Expect(foundResource.Spec.Tolerations).To(HaveLen(2)) - Expect(foundResource.Spec.NodeSelector["key1"]).Should(Equal("value1")) - - Expect(req.Conditions).To(BeEmpty()) }) - - // TODO: temporary avoid checking conditions on KubevirtTemplateValidator because it's currently - // broken on k8s. Revert this when we will be able to fix it - /*It("should handle conditions", func() { - expectedResource := newKubeVirtTemplateValidatorForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - expectedResource.Status.Conditions = []conditionsv1.Condition{ - conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - } - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - Expect(r.ensureKubeVirtTemplateValidator(req)).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "KubevirtTemplateValidatorNotAvailable", - Message: "KubevirtTemplateValidator is not available: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "KubevirtTemplateValidatorProgressing", - Message: "KubevirtTemplateValidator is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "KubevirtTemplateValidatorProgressing", - Message: "KubevirtTemplateValidator is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "KubevirtTemplateValidatorDegraded", - Message: "KubevirtTemplateValidator is degraded: Bar", - }))) - })*/ - }) - - Context("KubeVirtMetricsAggregation", func() { - var hco *hcov1beta1.HyperConverged - var req *common.HcoRequest - - BeforeEach(func() { - hco = commonTestUtils.NewHco() - req = commonTestUtils.NewReq(hco) - }) - - It("should create if not present", func() { - expectedResource := NewKubeVirtMetricsAggregationForCR(hco, commonTestUtils.Namespace) - cl := commonTestUtils.InitClient([]runtime.Object{}) - handler := newMetricsAggregationHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeTrue()) - Expect(res.Updated).To(BeFalse()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtMetricsAggregation{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: expectedResource.Name, Namespace: expectedResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Name).To(Equal(expectedResource.Name)) - Expect(foundResource.Labels).Should(HaveKeyWithValue(hcoutil.AppLabel, commonTestUtils.Name)) - Expect(foundResource.Namespace).To(Equal(expectedResource.Namespace)) - }) - - It("should find if present", func() { - expectedResource := NewKubeVirtMetricsAggregationForCR(hco, commonTestUtils.Namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - cl := commonTestUtils.InitClient([]runtime.Object{hco, expectedResource}) - handler := newMetricsAggregationHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeFalse()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(handler.Scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - }) - - It("should reconcile to default", func() { - existingResource := NewKubeVirtMetricsAggregationForCR(hco, commonTestUtils.Namespace) - existingResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", existingResource.Namespace, existingResource.Name) - - existingResource.Spec.Version = "non-default value" - req.HCOTriggered = false // mock a reconciliation triggered by a change in NewKubeVirtMetricsAggregation CR - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newMetricsAggregationHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeTrue()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1.KubevirtMetricsAggregation{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - Expect(foundResource.Spec.Version).To(BeEmpty()) - }) - - // TODO: add tests to ensure that HCO properly propagates NodePlacement from its CR - - // TODO: temporary avoid checking conditions on KubevirtTemplateValidator because it's currently - // broken on k8s. Revert this when we will be able to fix it - /*It("should handle conditions", func() { - expectedResource := newKubeVirtTemplateValidatorForCR(hco, namespace) - expectedResource.ObjectMeta.SelfLink = fmt.Sprintf("/apis/v1/namespaces/%s/dummies/%s", expectedResource.Namespace, expectedResource.Name) - expectedResource.Status.Conditions = []conditionsv1.Condition{ - conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "Foo", - Message: "Bar", - }, - } - cl := initClient([]runtime.Object{hco, expectedResource}) - r := initReconciler(cl) - Expect(r.ensureKubeVirtTemplateValidator(req)).To(BeNil()) - - // Check HCO's status - Expect(hco.Status.RelatedObjects).To(Not(BeNil())) - objectRef, err := reference.GetReference(r.scheme, expectedResource) - Expect(err).To(BeNil()) - // ObjectReference should have been added - Expect(hco.Status.RelatedObjects).To(ContainElement(*objectRef)) - // Check conditions - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionAvailable, - Status: corev1.ConditionFalse, - Reason: "KubevirtTemplateValidatorNotAvailable", - Message: "KubevirtTemplateValidator is not available: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionProgressing, - Status: corev1.ConditionTrue, - Reason: "KubevirtTemplateValidatorProgressing", - Message: "KubevirtTemplateValidator is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionUpgradeable, - Status: corev1.ConditionFalse, - Reason: "KubevirtTemplateValidatorProgressing", - Message: "KubevirtTemplateValidator is progressing: Bar", - }))) - Expect(req.Conditions[]).To(ContainElement(testlib.RepresentCondition(conditionsv1.Condition{ - Type: conditionsv1.ConditionDegraded, - Status: corev1.ConditionTrue, - Reason: "KubevirtTemplateValidatorDegraded", - Message: "KubevirtTemplateValidator is degraded: Bar", - }))) - })*/ }) }) + +func oldSSPCrds() []*apiextensionsv1.CustomResourceDefinition { + names := []string{ + "kubevirtcommontemplatesbundles.ssp.kubevirt.io", + "kubevirtmetricsaggregations.ssp.kubevirt.io", + "kubevirtnodelabellerbundles.ssp.kubevirt.io", + "kubevirttemplatevalidators.ssp.kubevirt.io", + "kubevirtcommontemplatesbundles.kubevirt.io", + "kubevirtmetricsaggregations.kubevirt.io", + "kubevirtnodelabellerbundles.kubevirt.io", + "kubevirttemplatevalidators.kubevirt.io", + } + + crds := make([]*apiextensionsv1.CustomResourceDefinition, 0, len(names)) + for _, name := range names { + crd := &apiextensionsv1.CustomResourceDefinition{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + } + crds = append(crds, crd) + } + + return crds +} + +func oldSSPCrdsAsObjects() []runtime.Object { + crds := oldSSPCrds() + objs := make([]runtime.Object, 0, len(crds)) + for _, crd := range crds { + objs = append(objs, crd) + } + + return objs +} From f5805261500a203a7f7468cec451f90182aa9b5e Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Wed, 25 Nov 2020 22:06:45 +0200 Subject: [PATCH 05/19] Align webhook with new SSP operator Signed-off-by: Zvi Cahana --- cmd/hyperconverged-cluster-webhook/main.go | 4 +- pkg/webhooks/webhooks.go | 23 +-- pkg/webhooks/webhooks_test.go | 208 ++------------------- 3 files changed, 23 insertions(+), 212 deletions(-) diff --git a/cmd/hyperconverged-cluster-webhook/main.go b/cmd/hyperconverged-cluster-webhook/main.go index f825b31f9e..67fd3c9465 100644 --- a/cmd/hyperconverged-cluster-webhook/main.go +++ b/cmd/hyperconverged-cluster-webhook/main.go @@ -18,7 +18,6 @@ import ( networkaddons "github.com/kubevirt/cluster-network-addons-operator/pkg/apis" hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspopv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" openshiftconfigv1 "github.com/openshift/api/config/v1" consolev1 "github.com/openshift/api/console/v1" @@ -28,6 +27,7 @@ import ( apiruntime "k8s.io/apimachinery/pkg/runtime" _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" logf "sigs.k8s.io/controller-runtime/pkg/log" ) @@ -139,7 +139,7 @@ func main() { apis.AddToScheme, cdiv1beta1.AddToScheme, networkaddons.AddToScheme, - sspopv1.AddToScheme, + sspv1beta1.AddToScheme, csvv1alpha1.AddToScheme, vmimportv1beta1.AddToScheme, admissionregistrationv1.AddToScheme, diff --git a/pkg/webhooks/webhooks.go b/pkg/webhooks/webhooks.go index 180600ca84..a9025dbced 100644 --- a/pkg/webhooks/webhooks.go +++ b/pkg/webhooks/webhooks.go @@ -8,7 +8,6 @@ import ( "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -16,6 +15,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" kubevirtv1 "kubevirt.io/client-go/api/v1" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" "reflect" "sigs.k8s.io/controller-runtime/pkg/client" "sync" @@ -68,10 +68,7 @@ func (wh WebhookHandler) ValidateUpdate(requested *v1beta1.HyperConverged, exist operands.NewKubeVirt(requested), operands.NewCDI(requested), operands.NewNetworkAddons(requested), - operands.NewKubeVirtCommonTemplateBundle(requested), - operands.NewKubeVirtNodeLabellerBundleForCR(requested, requested.Namespace), - operands.NewKubeVirtTemplateValidatorForCR(requested, requested.Namespace), - operands.NewKubeVirtMetricsAggregationForCR(requested, requested.Namespace), + operands.NewSSP(requested), operands.NewVMImportForCR(requested), } @@ -131,20 +128,8 @@ func (wh WebhookHandler) updateOperatorCr(ctx context.Context, hc *v1beta1.Hyper required := operands.NewNetworkAddons(hc) required.Spec.DeepCopyInto(&existing.Spec) - case *sspv1.KubevirtCommonTemplatesBundle: - required := operands.NewKubeVirtCommonTemplateBundle(hc) - required.Spec.DeepCopyInto(&existing.Spec) - - case *sspv1.KubevirtNodeLabellerBundle: - required := operands.NewKubeVirtNodeLabellerBundleForCR(hc, hc.Namespace) - required.Spec.DeepCopyInto(&existing.Spec) - - case *sspv1.KubevirtTemplateValidator: - required := operands.NewKubeVirtTemplateValidatorForCR(hc, hc.Namespace) - required.Spec.DeepCopyInto(&existing.Spec) - - case *sspv1.KubevirtMetricsAggregation: - required := operands.NewKubeVirtMetricsAggregationForCR(hc, hc.Namespace) + case *sspv1beta1.SSP: + required := operands.NewSSP(hc) required.Spec.DeepCopyInto(&existing.Spec) case *vmimportv1beta1.VMImportConfig: diff --git a/pkg/webhooks/webhooks_test.go b/pkg/webhooks/webhooks_test.go index d126536364..a8bf6cbb2a 100644 --- a/pkg/webhooks/webhooks_test.go +++ b/pkg/webhooks/webhooks_test.go @@ -7,8 +7,6 @@ import ( networkaddonsv1 "github.com/kubevirt/cluster-network-addons-operator/pkg/apis/networkaddonsoperator/v1" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/operands" - sspopv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis" - sspv1 "github.com/kubevirt/kubevirt-ssp-operator/pkg/apis/kubevirt/v1" vmimportv1beta1 "github.com/kubevirt/vm-import-operator/pkg/apis/v2v/v1beta1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -17,6 +15,7 @@ import ( kubevirtv1 "kubevirt.io/client-go/api/v1" cdiv1beta1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1beta1" sdkapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" "os" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "testing" @@ -51,7 +50,7 @@ var _ = Describe("webhooks handler", func() { cdiv1beta1.AddToScheme, kubevirtv1.AddToScheme, networkaddons.AddToScheme, - sspopv1.AddToScheme, + sspv1beta1.AddToScheme, vmimportv1beta1.AddToScheme, } { Expect(f(s)).To(BeNil()) @@ -265,11 +264,11 @@ var _ = Describe("webhooks handler", func() { Expect(err).Should(Equal(ErrFakeNetworkError)) }) - It("should return error if KubeVirtCommonTemplateBundle CR is missing", func() { + It("should return error if SSP CR is missing", func() { hco := &v1beta1.HyperConverged{} ctx := context.TODO() cli := getFakeClient(s, hco) - Expect(cli.Delete(ctx, operands.NewKubeVirtCommonTemplateBundle(hco))).To(BeNil()) + Expect(cli.Delete(ctx, operands.NewSSP(hco))).To(BeNil()) wh := &WebhookHandler{} wh.Init(logger, cli, HcoValidNamespace) @@ -289,7 +288,7 @@ var _ = Describe("webhooks handler", func() { Expect(apierrors.IsNotFound(err)).To(BeTrue()) }) - It("should return error if dry-run update of KubeVirtCommonTemplateBundle CR returns error", func() { + It("should return error if dry-run update of SSP CR returns error", func() { hco := &v1beta1.HyperConverged{ Spec: v1beta1.HyperConvergedSpec{ Infra: v1beta1.HyperConvergedConfig{ @@ -301,7 +300,7 @@ var _ = Describe("webhooks handler", func() { }, } c := getFakeClient(s, hco) - cli := errorClient{c, kubevirtCommonTemplateBundleUpdateFailure} + cli := errorClient{c, sspUpdateFailure} wh := &WebhookHandler{} wh.Init(logger, cli, HcoValidNamespace) @@ -312,162 +311,10 @@ var _ = Describe("webhooks handler", func() { err := wh.ValidateUpdate(newHco, hco) Expect(err).NotTo(BeNil()) - Expect(err).Should(Equal(ErrFakeCommonTemplateBundleError)) + Expect(err).Should(Equal(ErrFakeSspError)) }) - It("should return error if KubeVirtNodeLabellerBundle CR is missing", func() { - hco := &v1beta1.HyperConverged{} - ctx := context.TODO() - cli := getFakeClient(s, hco) - Expect(cli.Delete(ctx, operands.NewKubeVirtNodeLabellerBundleForCR(hco, hco.Namespace))).To(BeNil()) - wh := &WebhookHandler{} - wh.Init(logger, cli, HcoValidNamespace) - - newHco := &v1beta1.HyperConverged{ - Spec: v1beta1.HyperConvergedSpec{ - Infra: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - Workloads: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - }, - } - - err := wh.ValidateUpdate(newHco, hco) - Expect(err).NotTo(BeNil()) - Expect(apierrors.IsNotFound(err)).To(BeTrue()) - }) - - It("should return error if dry-run update of KubeVirtNodeLabellerBundle CR returns error", func() { - hco := &v1beta1.HyperConverged{ - Spec: v1beta1.HyperConvergedSpec{ - Infra: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - Workloads: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - }, - } - c := getFakeClient(s, hco) - cli := errorClient{c, kubevirtNodeLabellerBundleUpdateFailure} - wh := &WebhookHandler{} - wh.Init(logger, cli, HcoValidNamespace) - - newHco := &v1beta1.HyperConverged{} - hco.DeepCopyInto(newHco) - // change something in workloads to trigger dry-run update - newHco.Spec.Workloads.NodePlacement.NodeSelector["a change"] = "Something else" - - err := wh.ValidateUpdate(newHco, hco) - Expect(err).NotTo(BeNil()) - Expect(err).Should(Equal(ErrFakeNodeLabellerBundleError)) - - }) - - It("should return error if KubeVirtTemplateValidator CR is missing", func() { - hco := &v1beta1.HyperConverged{} - ctx := context.TODO() - cli := getFakeClient(s, hco) - Expect(cli.Delete(ctx, operands.NewKubeVirtTemplateValidatorForCR(hco, hco.Namespace))).To(BeNil()) - wh := &WebhookHandler{} - wh.Init(logger, cli, HcoValidNamespace) - - newHco := &v1beta1.HyperConverged{ - Spec: v1beta1.HyperConvergedSpec{ - Infra: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - Workloads: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - }, - } - - err := wh.ValidateUpdate(newHco, hco) - Expect(err).NotTo(BeNil()) - Expect(apierrors.IsNotFound(err)).To(BeTrue()) - }) - - It("should return error if dry-run update of KubeVirtTemplateValidator CR returns error", func() { - hco := &v1beta1.HyperConverged{ - Spec: v1beta1.HyperConvergedSpec{ - Infra: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - Workloads: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - }, - } - c := getFakeClient(s, hco) - cli := errorClient{c, kubevirtTemplateValidatorUpdateFailure} - wh := &WebhookHandler{} - wh.Init(logger, cli, HcoValidNamespace) - - newHco := &v1beta1.HyperConverged{} - hco.DeepCopyInto(newHco) - // change something in workloads to trigger dry-run update - newHco.Spec.Workloads.NodePlacement.NodeSelector["a change"] = "Something else" - - err := wh.ValidateUpdate(newHco, hco) - Expect(err).NotTo(BeNil()) - Expect(err).Should(Equal(ErrFakeTemplateValidatorError)) - - }) - - It("should return error if NewKubeVirtMetricsAggregation CR is missing", func() { - hco := &v1beta1.HyperConverged{} - ctx := context.TODO() - cli := getFakeClient(s, hco) - Expect(cli.Delete(ctx, operands.NewKubeVirtMetricsAggregationForCR(hco, hco.Namespace))).To(BeNil()) - wh := &WebhookHandler{} - wh.Init(logger, cli, HcoValidNamespace) - - newHco := &v1beta1.HyperConverged{ - Spec: v1beta1.HyperConvergedSpec{ - Infra: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - Workloads: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - }, - } - - err := wh.ValidateUpdate(newHco, hco) - Expect(err).NotTo(BeNil()) - Expect(apierrors.IsNotFound(err)).To(BeTrue()) - }) - - It("should return error if dry-run update of NewKubeVirtMetricsAggregation CR returns error", func() { - hco := &v1beta1.HyperConverged{ - Spec: v1beta1.HyperConvergedSpec{ - Infra: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - Workloads: v1beta1.HyperConvergedConfig{ - NodePlacement: newHyperConvergedConfig(), - }, - }, - } - c := getFakeClient(s, hco) - cli := errorClient{c, kubevirtMetricsAggregationUpdateFailure} - wh := &WebhookHandler{} - wh.Init(logger, cli, HcoValidNamespace) - - newHco := &v1beta1.HyperConverged{} - hco.DeepCopyInto(newHco) - // change something in workloads to trigger dry-run update - newHco.Spec.Workloads.NodePlacement.NodeSelector["a change"] = "Something else" - - err := wh.ValidateUpdate(newHco, hco) - Expect(err).NotTo(BeNil()) - Expect(err).Should(Equal(ErrFakeMetricsAggregationError)) - }) - It("should return error if VMImport CR is missing", func() { hco := &v1beta1.HyperConverged{} ctx := context.TODO() @@ -644,10 +491,7 @@ func getFakeClient(s *runtime.Scheme, hco *v1beta1.HyperConverged) client.Client operands.NewKubeVirt(hco), operands.NewCDI(hco), operands.NewNetworkAddons(hco), - operands.NewKubeVirtCommonTemplateBundle(hco), - operands.NewKubeVirtNodeLabellerBundleForCR(hco, hco.Namespace), - operands.NewKubeVirtTemplateValidatorForCR(hco, hco.Namespace), - operands.NewKubeVirtMetricsAggregationForCR(hco, hco.Namespace), + operands.NewSSP(hco), operands.NewVMImportForCR(hco)) } @@ -658,10 +502,7 @@ const ( kvUpdateFailure cdiUpdateFailure networkUpdateFailure - kubevirtCommonTemplateBundleUpdateFailure - kubevirtNodeLabellerBundleUpdateFailure - kubevirtTemplateValidatorUpdateFailure - kubevirtMetricsAggregationUpdateFailure + sspUpdateFailure vmImportUpdateFailure timeoutError ) @@ -672,14 +513,11 @@ type errorClient struct { } var ( - ErrFakeKvError = errors.New("fake KubeVirt error") - ErrFakeCdiError = errors.New("fake CDI error") - ErrFakeNetworkError = errors.New("fake Network error") - ErrFakeCommonTemplateBundleError = errors.New("fake CommonTemplateBundle error") - ErrFakeNodeLabellerBundleError = errors.New("fake NodeLabellerBundle error") - ErrFakeTemplateValidatorError = errors.New("fake TemplateValidator error") - ErrFakeMetricsAggregationError = errors.New("fake MetricsAggregation error") - ErrFakeVMImportError = errors.New("fake VMImport error") + ErrFakeKvError = errors.New("fake KubeVirt error") + ErrFakeCdiError = errors.New("fake CDI error") + ErrFakeNetworkError = errors.New("fake Network error") + ErrFakeSspError = errors.New("fake SSP error") + ErrFakeVMImportError = errors.New("fake VMImport error") ) func (ec errorClient) Update(ctx context.Context, obj runtime.Object, opts ...client.UpdateOption) error { @@ -696,21 +534,9 @@ func (ec errorClient) Update(ctx context.Context, obj runtime.Object, opts ...cl if ec.failure == networkUpdateFailure { return ErrFakeNetworkError } - case *sspv1.KubevirtCommonTemplatesBundle: - if ec.failure == kubevirtCommonTemplateBundleUpdateFailure { - return ErrFakeCommonTemplateBundleError - } - case *sspv1.KubevirtNodeLabellerBundle: - if ec.failure == kubevirtNodeLabellerBundleUpdateFailure { - return ErrFakeNodeLabellerBundleError - } - case *sspv1.KubevirtTemplateValidator: - if ec.failure == kubevirtTemplateValidatorUpdateFailure { - return ErrFakeTemplateValidatorError - } - case *sspv1.KubevirtMetricsAggregation: - if ec.failure == kubevirtMetricsAggregationUpdateFailure { - return ErrFakeMetricsAggregationError + case *sspv1beta1.SSP: + if ec.failure == sspUpdateFailure { + return ErrFakeSspError } case *vmimportv1beta1.VMImportConfig: if ec.failure == vmImportUpdateFailure { From 7e549287eec3e3d9a01174eb9386aac78fe419f8 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Wed, 9 Dec 2020 16:53:06 +0200 Subject: [PATCH 06/19] Update CSV generation for new SSP Signed-off-by: Zvi Cahana --- deploy/cluster_role.yaml | 248 +++-- deploy/cluster_role_binding.yaml | 24 +- .../scheduling-scale-performance00.crd.yaml | 852 ++++++++++++++++-- .../scheduling-scale-performance01.crd.yaml | 114 --- .../scheduling-scale-performance02.crd.yaml | 750 --------------- .../scheduling-scale-performance03.crd.yaml | 753 ---------------- deploy/images.csv | 2 +- deploy/images.env | 4 +- ...operator.v1.3.0.clusterserviceversion.yaml | 349 ++++--- .../scheduling-scale-performance00.crd.yaml | 852 ++++++++++++++++-- .../scheduling-scale-performance01.crd.yaml | 114 --- .../scheduling-scale-performance02.crd.yaml | 750 --------------- .../scheduling-scale-performance03.crd.yaml | 753 ---------------- ...operator.v1.3.0.clusterserviceversion.yaml | 349 ++++--- .../scheduling-scale-performance00.crd.yaml | 852 ++++++++++++++++-- .../scheduling-scale-performance01.crd.yaml | 114 --- .../scheduling-scale-performance02.crd.yaml | 750 --------------- .../scheduling-scale-performance03.crd.yaml | 753 ---------------- deploy/operator.yaml | 60 +- deploy/service_account.yaml | 8 +- hack/build-manifests.sh | 3 +- hack/config | 2 +- 22 files changed, 3122 insertions(+), 5334 deletions(-) delete mode 100644 deploy/crds/scheduling-scale-performance01.crd.yaml delete mode 100644 deploy/crds/scheduling-scale-performance02.crd.yaml delete mode 100644 deploy/crds/scheduling-scale-performance03.crd.yaml delete mode 100644 deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml delete mode 100644 deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml delete mode 100644 deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml delete mode 100644 deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml delete mode 100644 deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml delete mode 100644 deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml diff --git a/deploy/cluster_role.yaml b/deploy/cluster_role.yaml index f5003c8c07..1eea3a204e 100644 --- a/deploy/cluster_role.yaml +++ b/deploy/cluster_role.yaml @@ -65,6 +65,41 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role +metadata: + labels: + name: ssp-operator + name: ssp-operator +rules: +- apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete +- apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch +- apiGroups: + - "" + resources: + - events + verbs: + - create + - patch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role metadata: labels: name: cdi-operator @@ -1009,181 +1044,280 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: labels: - name: kubevirt-ssp-operator - name: kubevirt-ssp-operator + name: ssp-operator + name: ssp-operator rules: - apiGroups: - - kubevirt.io - - ssp.kubevirt.io - - template.openshift.io + - admissionregistration.k8s.io resources: - - '*' + - validatingwebhookconfigurations verbs: - create + - delete - get - list - patch - update - watch +- apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list +- apiGroups: + - apps + resources: + - daemonsets + verbs: + - create - delete + - get + - list + - patch + - update + - watch +- apiGroups: + - apps + resources: + - deployments + verbs: + - create + - delete + - get + - list + - patch + - update + - watch - apiGroups: - cdi.kubevirt.io resources: - datavolumes - - datavolumes/source verbs: - create + - delete - get - list - patch - update - watch - - delete - apiGroups: - - monitoring.coreos.com + - cdi.kubevirt.io resources: - - prometheusrules + - datavolumes/source + verbs: + - create +- apiGroups: + - "" + resources: + - configmaps + - serviceaccounts verbs: - create + - delete - get - list - patch + - update - watch +- apiGroups: + - "" + resources: + - namespaces + verbs: + - create - delete + - get + - list + - patch + - update + - watch - apiGroups: - - monitoring.coreos.com + - "" resources: - - servicemonitors + - nodes verbs: - get - - create + - patch + - update - apiGroups: - - rbac.authorization.k8s.io + - "" resources: - - clusterroles + - persistentvolumeclaims verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - - rbac.authorization.k8s.io + - "" resources: - - clusterrolebindings + - persistentvolumeclaims/status + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - serviceaccounts + - services verbs: - create + - delete - get - list + - patch + - update - watch +- apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: - - roles + - clusterrolebindings + - clusterroles verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: + - clusterroles - rolebindings + - roles verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - - extensions - - apps + - security.openshift.io resources: - - deployments - - deployments/finalizers - - replicasets - - daemonsets + - securitycontextconstraints verbs: - create - - update + - delete - get - list - patch + - update - watch - - delete - apiGroups: - - "" + - security.openshift.io + resourceNames: + - privileged resources: - - serviceaccounts - - configmaps - - persistentvolumeclaims - - services - - services/finalizers + - securitycontextconstraints + verbs: + - use +- apiGroups: + - ssp.kubevirt.io + resources: + - kubevirtcommontemplatesbundles verbs: - create - - update + - delete - get - - patch - list + - patch + - update - watch - - delete - apiGroups: - - "" + - ssp.kubevirt.io resources: - - nodes + - kubevirtmetricsaggregations verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - "" + - ssp.kubevirt.io resources: - - pods - - persistentvolumeclaims/status + - kubevirtnodelabellerbundles verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - - "" + - ssp.kubevirt.io resources: - - namespaces + - kubevirttemplatevalidators verbs: - create + - delete - get - list - patch + - update - watch - apiGroups: - - admissionregistration.k8s.io + - ssp.kubevirt.io resources: - - validatingwebhookconfigurations + - ssps verbs: - create + - delete - get - list - patch + - update - watch - apiGroups: - - security.openshift.io + - ssp.kubevirt.io resources: - - securitycontextconstraints + - ssps/finalizers + verbs: + - update +- apiGroups: + - ssp.kubevirt.io + resources: + - ssps/status verbs: - get - - list - - create - - watch - patch + - update - apiGroups: - - security.openshift.io - resourceNames: - - privileged + - template.openshift.io resources: - - securitycontextconstraints + - templates verbs: - - use + - create + - delete + - get + - list + - patch + - update + - watch --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole diff --git a/deploy/cluster_role_binding.yaml b/deploy/cluster_role_binding.yaml index 9ba6655f4c..8968242ffc 100644 --- a/deploy/cluster_role_binding.yaml +++ b/deploy/cluster_role_binding.yaml @@ -33,6 +33,22 @@ subjects: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding +metadata: + labels: + name: ssp-operator + name: ssp-operator + namespace: kubevirt-hyperconverged +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: ssp-operator +subjects: +- kind: ServiceAccount + name: ssp-operator + namespace: kubevirt-hyperconverged +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding metadata: labels: name: cdi-operator @@ -112,15 +128,15 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: labels: - name: kubevirt-ssp-operator - name: kubevirt-ssp-operator + name: ssp-operator + name: ssp-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole - name: kubevirt-ssp-operator + name: ssp-operator subjects: - kind: ServiceAccount - name: kubevirt-ssp-operator + name: ssp-operator namespace: kubevirt-hyperconverged --- apiVersion: rbac.authorization.k8s.io/v1 diff --git a/deploy/crds/scheduling-scale-performance00.crd.yaml b/deploy/crds/scheduling-scale-performance00.crd.yaml index aa1dd67dd8..f8bd4f2c14 100644 --- a/deploy/crds/scheduling-scale-performance00.crd.yaml +++ b/deploy/crds/scheduling-scale-performance00.crd.yaml @@ -2,50 +2,815 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kubevirtcommontemplatesbundles.ssp.kubevirt.io + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + name: ssps.ssp.kubevirt.io spec: group: ssp.kubevirt.io names: - kind: KubevirtCommonTemplatesBundle - listKind: KubevirtCommonTemplatesBundleList - plural: kubevirtcommontemplatesbundles - shortNames: - - kvct - singular: kubevirtcommontemplatesbundle + kind: SSP + listKind: SSPList + plural: ssps + singular: ssp scope: Namespaced versions: - - name: v1 + - name: v1beta1 schema: openAPIV3Schema: - description: KubevirtCommonTemplatesBundle defines the CommonTemplates CR + description: SSP is the Schema for the ssps API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec contains the configuration of Common Templates + description: SSPSpec defines the desired state of SSP properties: - version: - description: Defines the version of the operand - type: string + commonTemplates: + description: CommonTemplates is the configuration of the common templates operand + properties: + namespace: + description: Namespace is the k8s namespace where CommonTemplates should be installed + maxLength: 63 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - namespace + type: object + nodeLabeller: + description: NodeLabeller is configuration of the node-labeller operand + properties: + placement: + description: Placement describes the node scheduling configuration + properties: + affinity: + description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector' + type: object + tolerations: + description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + templateValidator: + description: TemplateValidator is configuration of the template validator operand + properties: + placement: + description: Placement describes the node scheduling configuration + properties: + affinity: + description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector' + type: object + tolerations: + description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + default: 2 + description: Replicas is the number of replicas of the template validator pod + format: int32 + minimum: 0 + type: integer + type: object + required: + - commonTemplates type: object status: - description: Status holds the current status of Common Templates + description: SSPStatus defines the observed state of SSP properties: conditions: - description: Reported states of the controller + description: A list of current conditions of the resource items: - description: Condition represents the state of the operator's reconciliation - functionality. + description: Condition represents the state of the operator's reconciliation functionality. properties: lastHeartbeatTime: format: date-time @@ -60,50 +825,24 @@ spec: status: type: string type: - description: ConditionType is the state of the operator's reconciliation - functionality. + description: ConditionType is the state of the operator's reconciliation functionality. type: string required: - status - type type: object type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array observedVersion: - description: The version of the deployed operands + description: The observed version of the resource type: string operatorVersion: - description: The version of the deployed operator + description: The version of the resource as defined by the operator + type: string + phase: + description: Phase is the current phase of the deployment type: string targetVersion: - description: The desired version of the deployed operands + description: The desired version of the resource type: string type: object type: object @@ -111,4 +850,3 @@ spec: storage: true subresources: status: {} - preserveUnknownFields: false diff --git a/deploy/crds/scheduling-scale-performance01.crd.yaml b/deploy/crds/scheduling-scale-performance01.crd.yaml deleted file mode 100644 index 329beb039c..0000000000 --- a/deploy/crds/scheduling-scale-performance01.crd.yaml +++ /dev/null @@ -1,114 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirtmetricsaggregations.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtMetricsAggregation - listKind: KubevirtMetricsAggregationList - plural: kubevirtmetricsaggregations - shortNames: - - kvma - singular: kubevirtmetricsaggregation - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtMetricsAggregation defines the MetricsAggregation CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of MetricsAggregation - properties: - version: - description: Defines the version of the operand - type: string - type: object - status: - description: Status holds the current status of MetricsAggregation - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/crds/scheduling-scale-performance02.crd.yaml b/deploy/crds/scheduling-scale-performance02.crd.yaml deleted file mode 100644 index d9ab4670f3..0000000000 --- a/deploy/crds/scheduling-scale-performance02.crd.yaml +++ /dev/null @@ -1,750 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirtnodelabellerbundles.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtNodeLabellerBundle - listKind: KubevirtNodeLabellerBundleList - plural: kubevirtnodelabellerbundles - shortNames: - - kvnl - singular: kubevirtnodelabellerbundle - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtNodeLabellerBundle defines the NodeLabeller CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of NodeLabeller - properties: - affinity: - description: Define the node affinity for NodeLabeller pods - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: Define node selector labels for NodeLabeller pods - type: object - tolerations: - description: Define tolerations for NodeLabeller pods - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - version: - description: Defines the version of the NodeLabeller - type: string - type: object - status: - description: Status holds the current status of NodeLabeller - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/crds/scheduling-scale-performance03.crd.yaml b/deploy/crds/scheduling-scale-performance03.crd.yaml deleted file mode 100644 index 5215df1afb..0000000000 --- a/deploy/crds/scheduling-scale-performance03.crd.yaml +++ /dev/null @@ -1,753 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirttemplatevalidators.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtTemplateValidator - listKind: KubevirtTemplateValidatorList - plural: kubevirttemplatevalidators - shortNames: - - kvtv - singular: kubevirttemplatevalidator - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtTemplateValidator defines the TemplateValidator CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of TemplateValidator - properties: - affinity: - description: Define the node affinity for TemplateValidator pods - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: Define node selector labels for TemplateValidator - type: object - templateValidatorReplicas: - description: Defines the desired number of replicas for TemplateValidator - type: integer - tolerations: - description: Define tolerations for TemplateValidator - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - version: - description: Defines the version of TemplateValidaotr - type: string - type: object - status: - description: Status holds the current status of TemplateValidator - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/images.csv b/deploy/images.csv index 22b133ef63..d75eac6628 100644 --- a/deploy/images.csv +++ b/deploy/images.csv @@ -5,7 +5,7 @@ KUBEVIRT_CONTROLLER_IMAGE,docker.io/kubevirt/virt-controller,KUBEVIRT_VERSION,f1 KUBEVIRT_LAUNCHER_IMAGE,docker.io/kubevirt/virt-launcher,KUBEVIRT_VERSION,1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12 KUBEVIRT_HANDLER_IMAGE,docker.io/kubevirt/virt-handler,KUBEVIRT_VERSION,10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236 CNA_OPERATOR_IMAGE,quay.io/kubevirt/cluster-network-addons-operator,NETWORK_ADDONS_VERSION,38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d -SSP_OPERATOR_IMAGE,quay.io/fromani/kubevirt-ssp-operator-container,SSP_VERSION,13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c +SSP_OPERATOR_IMAGE,quay.io/kubevirt/ssp-operator,SSP_VERSION,efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 CDI_OPERATOR_IMAGE,docker.io/kubevirt/cdi-operator,CDI_VERSION,8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711 CDI_CONTROLLER_IMAGE,docker.io/kubevirt/cdi-controller,CDI_VERSION,bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc CDI_APISERVER_IMAGE,docker.io/kubevirt/cdi-apiserver,CDI_VERSION,02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813 diff --git a/deploy/images.env b/deploy/images.env index 1e9933d612..2555484be1 100755 --- a/deploy/images.env +++ b/deploy/images.env @@ -4,7 +4,7 @@ KUBEVIRT_CONTROLLER_IMAGE=docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89 KUBEVIRT_LAUNCHER_IMAGE=docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12 KUBEVIRT_HANDLER_IMAGE=docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236 CNA_OPERATOR_IMAGE=quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d -SSP_OPERATOR_IMAGE=quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c +SSP_OPERATOR_IMAGE=quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 CDI_OPERATOR_IMAGE=docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711 CDI_CONTROLLER_IMAGE=docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc CDI_APISERVER_IMAGE=docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813 @@ -22,4 +22,4 @@ HCO_WEBHOOK_IMAGE=quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594 NMO_IMAGE=quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a CONVERSION_IMAGE=quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815 VMWARE_IMAGE=quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 -DIGEST_LIST=docker.io/kubevirt/virt-operator@sha256:4c33eaab42033c84310b62e146274f95a55699235c59846c8cec3009a91a723f,docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21,docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5,docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12,docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236,quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d,quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c,docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711,docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc,docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813,docker.io/kubevirt/cdi-cloner@sha256:c8946ef116d4d2fccf08b895faf07d9bb85f76d315e4380416261db9970306f1,docker.io/kubevirt/cdi-importer@sha256:d16dd224def1713a51278f6f164a04c7e9b38364b794f14d18c2d8d14ab04eb3,docker.io/kubevirt/cdi-uploadproxy@sha256:9e5ae41e3da07c30ac9a0326c89f2d64021abfc5d74ee6c4b750d6915f02eeb5,docker.io/kubevirt/cdi-uploadserver@sha256:3915ba0501fa0e3ee9ae178d6617d0c1ac5f34a83243f723421c42de99e705e5,quay.io/kubevirt/hostpath-provisioner-operator@sha256:a51e9b075a60600244757386f5894b314170543edb1d7f4738f4860a19602072,quay.io/kubevirt/hostpath-provisioner@sha256:3838d8e713d2e85a807a9c302501b25c248ba3f3c1602fe50480e6510de43e11,quay.io/kubevirt/vm-import-operator@sha256:74fc74dab0671ef1098e69872e47bcb6f85a40b4b18a1e23fd6d3cfc36dfee32,quay.io/kubevirt/vm-import-controller@sha256:481f4a493a66d1310734ac135e8dbaa5dc01c9d93f6e9ecc9326b81c1c08dbfe,quay.io/kubevirt/vm-import-virtv2v@sha256:97caccb965d771afefd901c71381b6c1126e4177b477d47f2ca5ca57c5b06593,quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da,quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594403627756bae3b4d6579967819fc544ced3c371d971a090a99fe0a9e9,quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a,quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815,quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 \ No newline at end of file +DIGEST_LIST=docker.io/kubevirt/virt-operator@sha256:4c33eaab42033c84310b62e146274f95a55699235c59846c8cec3009a91a723f,docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21,docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5,docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12,docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236,quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d,quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941,docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711,docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc,docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813,docker.io/kubevirt/cdi-cloner@sha256:c8946ef116d4d2fccf08b895faf07d9bb85f76d315e4380416261db9970306f1,docker.io/kubevirt/cdi-importer@sha256:d16dd224def1713a51278f6f164a04c7e9b38364b794f14d18c2d8d14ab04eb3,docker.io/kubevirt/cdi-uploadproxy@sha256:9e5ae41e3da07c30ac9a0326c89f2d64021abfc5d74ee6c4b750d6915f02eeb5,docker.io/kubevirt/cdi-uploadserver@sha256:3915ba0501fa0e3ee9ae178d6617d0c1ac5f34a83243f723421c42de99e705e5,quay.io/kubevirt/hostpath-provisioner-operator@sha256:a51e9b075a60600244757386f5894b314170543edb1d7f4738f4860a19602072,quay.io/kubevirt/hostpath-provisioner@sha256:3838d8e713d2e85a807a9c302501b25c248ba3f3c1602fe50480e6510de43e11,quay.io/kubevirt/vm-import-operator@sha256:74fc74dab0671ef1098e69872e47bcb6f85a40b4b18a1e23fd6d3cfc36dfee32,quay.io/kubevirt/vm-import-controller@sha256:481f4a493a66d1310734ac135e8dbaa5dc01c9d93f6e9ecc9326b81c1c08dbfe,quay.io/kubevirt/vm-import-virtv2v@sha256:97caccb965d771afefd901c71381b6c1126e4177b477d47f2ca5ca57c5b06593,quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da,quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594403627756bae3b4d6579967819fc544ced3c371d971a090a99fe0a9e9,quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a,quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815,quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index c851c79491..2ef98dd1ea 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -3,7 +3,7 @@ apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: annotations: - alm-examples: '[{"apiVersion":"hco.kubevirt.io/v1beta1","kind":"HyperConverged","metadata":{"annotations":{"deployOVS":"false"},"name":"kubevirt-hyperconverged","namespace":"kubevirt-hyperconverged"},"spec":{"BareMetalPlatform":false}},{"apiVersion":"networkaddonsoperator.network.kubevirt.io/v1","kind":"NetworkAddonsConfig","metadata":{"name":"cluster"},"spec":{"imagePullPolicy":"IfNotPresent","kubeMacPool":{"rangeEnd":"FD:FF:FF:FF:FF:FF","rangeStart":"02:00:00:00:00:00"},"linuxBridge":{},"macvtap":{},"multus":{},"nmstate":{},"ovs":{}}},{"apiVersion":"kubevirt.io/v1alpha3","kind":"KubeVirt","metadata":{"name":"kubevirt","namespace":"kubevirt"},"spec":{"imagePullPolicy":"Always"}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtTemplateValidator","metadata":{"name":"kubevirt-template-validator","namespace":"kubevirt"},"spec":{"templateValidatorReplicas":2}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtCommonTemplatesBundle","metadata":{"name":"kubevirt-common-template-bundle","namespace":"kubevirt"}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtMetricsAggregation","metadata":{"name":"kubevirt-metrics-aggregation","namespace":"kubevirt"}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtNodeLabellerBundle","metadata":{"name":"kubevirt-node-labeller-bundle","namespace":"kubevirt"}},{"apiVersion":"cdi.kubevirt.io/v1beta1","kind":"CDI","metadata":{"name":"cdi","namespace":"cdi"},"spec":{"imagePullPolicy":"IfNotPresent"}},{"apiVersion":"nodemaintenance.kubevirt.io/v1beta1","kind":"NodeMaintenance","metadata":{"name":"nodemaintenance-example"},"spec":{"nodeName":"node02","reason":"Test node maintenance"}},{"apiVersion":"hostpathprovisioner.kubevirt.io/v1beta1","kind":"HostPathProvisioner","metadata":{"name":"hostpath-provisioner"},"spec":{"imagePullPolicy":"IfNotPresent","pathConfig":{"path":"/var/hpvolumes","useNamingPrefix":false}}},{"apiVersion":"v2v.kubevirt.io/v1beta1","kind":"VMImportConfig","metadata":{"name":"vm-import-operator-config"},"spec":{"imagePullPolicy":"IfNotPresent"}}]' + alm-examples: '[{"apiVersion":"hco.kubevirt.io/v1beta1","kind":"HyperConverged","metadata":{"annotations":{"deployOVS":"false"},"name":"kubevirt-hyperconverged","namespace":"kubevirt-hyperconverged"},"spec":{"BareMetalPlatform":false}},{"apiVersion":"networkaddonsoperator.network.kubevirt.io/v1","kind":"NetworkAddonsConfig","metadata":{"name":"cluster"},"spec":{"imagePullPolicy":"IfNotPresent","kubeMacPool":{"rangeEnd":"FD:FF:FF:FF:FF:FF","rangeStart":"02:00:00:00:00:00"},"linuxBridge":{},"macvtap":{},"multus":{},"nmstate":{},"ovs":{}}},{"apiVersion":"kubevirt.io/v1alpha3","kind":"KubeVirt","metadata":{"name":"kubevirt","namespace":"kubevirt"},"spec":{"imagePullPolicy":"Always"}},{"apiVersion":"ssp.kubevirt.io/v1beta1","kind":"SSP","metadata":{"name":"ssp-sample","namespace":"kubevirt"},"spec":{"commonTemplates":{"namespace":"kubevirt"},"templateValidator":{"replicas":2}}},{"apiVersion":"cdi.kubevirt.io/v1beta1","kind":"CDI","metadata":{"name":"cdi","namespace":"cdi"},"spec":{"imagePullPolicy":"IfNotPresent"}},{"apiVersion":"nodemaintenance.kubevirt.io/v1beta1","kind":"NodeMaintenance","metadata":{"name":"nodemaintenance-example"},"spec":{"nodeName":"node02","reason":"Test node maintenance"}},{"apiVersion":"hostpathprovisioner.kubevirt.io/v1beta1","kind":"HostPathProvisioner","metadata":{"name":"hostpath-provisioner"},"spec":{"imagePullPolicy":"IfNotPresent","pathConfig":{"path":"/var/hpvolumes","useNamingPrefix":false}}},{"apiVersion":"v2v.kubevirt.io/v1beta1","kind":"VMImportConfig","metadata":{"name":"vm-import-operator-config"},"spec":{"imagePullPolicy":"IfNotPresent"}}]' capabilities: Full Lifecycle categories: OpenShift Optional certified: "false" @@ -76,7 +76,7 @@ metadata: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt). operatorframework.io/initialization-resource: '{"apiVersion":"hco.kubevirt.io/v1beta1","kind":"HyperConverged","metadata":{"annotations":{"deployOVS":"false"},"name":"kubevirt-hyperconverged","namespace":"kubevirt-hyperconverged"},"spec":{"BareMetalPlatform":false}}' operatorframework.io/suggested-namespace: kubevirt-hyperconverged - operators.operatorframework.io/internal-objects: '["v2vvmwares.v2v.kubevirt.io","ovirtproviders.v2v.kubevirt.io","networkaddonsconfigs.networkaddonsoperator.network.kubevirt.io","kubevirts.kubevirt.io","kubevirtcommontemplatesbundles.ssp.kubevirt.io","kubevirtmetricsaggregations.ssp.kubevirt.io","kubevirtnodelabellerbundles.ssp.kubevirt.io","kubevirttemplatevalidators.ssp.kubevirt.io","cdis.cdi.kubevirt.io","nodemaintenances.nodemaintenance.kubevirt.io","vmimportconfigs.v2v.kubevirt.io"]' + operators.operatorframework.io/internal-objects: '["v2vvmwares.v2v.kubevirt.io","ovirtproviders.v2v.kubevirt.io","networkaddonsconfigs.networkaddonsoperator.network.kubevirt.io","kubevirts.kubevirt.io","ssps.ssp.kubevirt.io","cdis.cdi.kubevirt.io","nodemaintenances.nodemaintenance.kubevirt.io","vmimportconfigs.v2v.kubevirt.io"]' repository: https://github.com/kubevirt/hyperconverged-cluster-operator support: "false" name: kubevirt-hyperconverged-operator.v1.3.0 @@ -146,26 +146,11 @@ spec: kind: KubeVirt name: kubevirts.kubevirt.io version: v1alpha3 - - description: Represents a deployment of the predefined VM templates - displayName: KubeVirt common templates - kind: KubevirtCommonTemplatesBundle - name: kubevirtcommontemplatesbundles.ssp.kubevirt.io - version: v1 - - description: Provide aggregation rules for core kubevirt metrics - displayName: KubeVirt Metric Aggregation - kind: KubevirtMetricsAggregation - name: kubevirtmetricsaggregations.ssp.kubevirt.io - version: v1 - - description: Represents a deployment of Node labeller component - displayName: KubeVirt Node labeller - kind: KubevirtNodeLabellerBundle - name: kubevirtnodelabellerbundles.ssp.kubevirt.io - version: v1 - - description: Represents a deployment of admission control webhook to validate the KubeVirt templates - displayName: KubeVirt Template Validator admission webhook - kind: KubevirtTemplateValidator - name: kubevirttemplatevalidators.ssp.kubevirt.io - version: v1 + - description: SSP is the Schema for the ssps API + displayName: SSP + kind: SSP + name: ssps.ssp.kubevirt.io + version: v1beta1 - description: Represents a CDI deployment displayName: CDI deployment kind: CDI @@ -1088,178 +1073,277 @@ spec: serviceAccountName: kubevirt-operator - rules: - apiGroups: - - kubevirt.io - - ssp.kubevirt.io - - template.openshift.io + - admissionregistration.k8s.io resources: - - '*' + - validatingwebhookconfigurations verbs: - create + - delete - get - list - patch - update - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + verbs: + - create - delete + - get + - list + - patch + - update + - watch - apiGroups: - cdi.kubevirt.io resources: - datavolumes - - datavolumes/source verbs: - create + - delete - get - list - patch - update - watch - - delete - apiGroups: - - monitoring.coreos.com + - cdi.kubevirt.io resources: - - prometheusrules + - datavolumes/source verbs: - create + - apiGroups: + - "" + resources: + - configmaps + - serviceaccounts + verbs: + - create + - delete - get - list - patch + - update - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - create - delete + - get + - list + - patch + - update + - watch - apiGroups: - - monitoring.coreos.com + - "" resources: - - servicemonitors + - nodes verbs: - get - - create + - patch + - update - apiGroups: - - rbac.authorization.k8s.io + - "" resources: - - clusterroles + - persistentvolumeclaims verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - - rbac.authorization.k8s.io + - "" resources: - - clusterrolebindings + - persistentvolumeclaims/status + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + - services verbs: - create + - delete - get - list + - patch + - update - watch + - apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: - - roles + - clusterrolebindings + - clusterroles verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: + - clusterroles - rolebindings + - roles verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - - extensions - - apps + - security.openshift.io resources: - - deployments - - deployments/finalizers - - replicasets - - daemonsets + - securitycontextconstraints verbs: - create - - update + - delete - get - list - patch + - update - watch - - delete - apiGroups: - - "" + - security.openshift.io + resourceNames: + - privileged resources: - - serviceaccounts - - configmaps - - persistentvolumeclaims - - services - - services/finalizers + - securitycontextconstraints + verbs: + - use + - apiGroups: + - ssp.kubevirt.io + resources: + - kubevirtcommontemplatesbundles verbs: - create - - update + - delete - get - - patch - list + - patch + - update - watch - - delete - apiGroups: - - "" + - ssp.kubevirt.io resources: - - nodes + - kubevirtmetricsaggregations verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - "" + - ssp.kubevirt.io resources: - - pods - - persistentvolumeclaims/status + - kubevirtnodelabellerbundles verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - - "" + - ssp.kubevirt.io resources: - - namespaces + - kubevirttemplatevalidators verbs: - create + - delete - get - list - patch + - update - watch - apiGroups: - - admissionregistration.k8s.io + - ssp.kubevirt.io resources: - - validatingwebhookconfigurations + - ssps verbs: - create + - delete - get - list - patch + - update - watch - apiGroups: - - security.openshift.io + - ssp.kubevirt.io resources: - - securitycontextconstraints + - ssps/finalizers + verbs: + - update + - apiGroups: + - ssp.kubevirt.io + resources: + - ssps/status verbs: - get - - list - - create - - watch - patch + - update - apiGroups: - - security.openshift.io - resourceNames: - - privileged + - template.openshift.io resources: - - securitycontextconstraints + - templates verbs: - - use - serviceAccountName: kubevirt-ssp-operator + - create + - delete + - get + - list + - patch + - update + - watch + serviceAccountName: ssp-operator - rules: - apiGroups: - rbac.authorization.k8s.io @@ -1747,7 +1831,7 @@ spec: - name: NETWORK_ADDONS_VERSION value: v0.44.0 - name: SSP_VERSION - value: v1.2.1 + value: v0.1.0-rc.1 - name: NMO_VERSION value: v0.7.0 - name: HPPO_VERSION @@ -1972,45 +2056,49 @@ spec: secret: optional: true secretName: kubevirt-operator-certs - - name: kubevirt-ssp-operator + - name: ssp-operator spec: replicas: 1 selector: matchLabels: - name: kubevirt-ssp-operator + control-plane: ssp-operator strategy: {} template: metadata: labels: - name: kubevirt-ssp-operator + control-plane: ssp-operator spec: containers: - - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_REFERENCE - value: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - - name: WATCH_NAMESPACE - - name: KVM_INFO_TAG - - name: VALIDATOR_TAG - - name: VIRT_LAUNCHER_TAG - - name: NODE_LABELLER_TAG - - name: CPU_PLUGIN_TAG - - name: IMAGE_NAME_PREFIX - - name: OPERATOR_NAME - value: kubevirt-ssp-operator + - args: + - --enable-leader-election + command: + - /manager + env: + - name: KVM_IMAGE + - name: VALIDATOR_IMAGE + - name: VIRT_LAUNCHER_IMAGE + - name: NODE_LABELLER_IMAGE + - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION - value: v1.2.1 - image: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - imagePullPolicy: IfNotPresent - name: kubevirt-ssp-operator + value: v0.1.0-rc.1 + image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + name: manager ports: - - containerPort: 60000 - name: metrics + - containerPort: 9443 + name: webhook-server + protocol: TCP resources: {} - serviceAccountName: kubevirt-ssp-operator + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + serviceAccountName: ssp-operator + terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2230,6 +2318,35 @@ spec: - patch - delete serviceAccountName: kubevirt-operator + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: ssp-operator - rules: - apiGroups: - rbac.authorization.k8s.io @@ -2373,8 +2490,6 @@ spec: name: kubemacpool - image: quay.io/nmstate/kubernetes-nmstate-handler@sha256:d5155de6aa6cf97c2feb5f4171384e63ce957362a66d47ec14982cbabca5169c name: kubernetes-nmstate-handler - - image: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - name: kubevirt-ssp-operator-container - image: quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815 name: kubevirt-v2v-conversion - image: quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 @@ -2389,6 +2504,8 @@ spec: name: ovs-cni-marker - image: quay.io/kubevirt/ovs-cni-plugin@sha256:d43d34ed4b1bd0b107c2049d21e33f9f870c36e5bf6dc1d80ab567271735c8da name: ovs-cni-plugin + - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + name: ssp-operator - image: docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21 name: virt-api - image: docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5 @@ -2458,6 +2575,24 @@ spec: timeoutSeconds: 30 type: MutatingAdmissionWebhook webhookPath: /mutate-ns-hco-kubevirt-io + - admissionReviewVersions: + - v1beta1 + deploymentName: ssp-operator + failurePolicy: Fail + generateName: vssp.kb.io + rules: + - apiGroups: + - ssp.kubevirt.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ssps + sideEffects: None + type: ValidatingAdmissionWebhook + webhookPath: /validate-ssp-kubevirt-io-v1beta1-ssp - admissionReviewVersions: - v1beta1 containerPort: 8443 diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml index aa1dd67dd8..f8bd4f2c14 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml @@ -2,50 +2,815 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kubevirtcommontemplatesbundles.ssp.kubevirt.io + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + name: ssps.ssp.kubevirt.io spec: group: ssp.kubevirt.io names: - kind: KubevirtCommonTemplatesBundle - listKind: KubevirtCommonTemplatesBundleList - plural: kubevirtcommontemplatesbundles - shortNames: - - kvct - singular: kubevirtcommontemplatesbundle + kind: SSP + listKind: SSPList + plural: ssps + singular: ssp scope: Namespaced versions: - - name: v1 + - name: v1beta1 schema: openAPIV3Schema: - description: KubevirtCommonTemplatesBundle defines the CommonTemplates CR + description: SSP is the Schema for the ssps API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec contains the configuration of Common Templates + description: SSPSpec defines the desired state of SSP properties: - version: - description: Defines the version of the operand - type: string + commonTemplates: + description: CommonTemplates is the configuration of the common templates operand + properties: + namespace: + description: Namespace is the k8s namespace where CommonTemplates should be installed + maxLength: 63 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - namespace + type: object + nodeLabeller: + description: NodeLabeller is configuration of the node-labeller operand + properties: + placement: + description: Placement describes the node scheduling configuration + properties: + affinity: + description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector' + type: object + tolerations: + description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + templateValidator: + description: TemplateValidator is configuration of the template validator operand + properties: + placement: + description: Placement describes the node scheduling configuration + properties: + affinity: + description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector' + type: object + tolerations: + description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + default: 2 + description: Replicas is the number of replicas of the template validator pod + format: int32 + minimum: 0 + type: integer + type: object + required: + - commonTemplates type: object status: - description: Status holds the current status of Common Templates + description: SSPStatus defines the observed state of SSP properties: conditions: - description: Reported states of the controller + description: A list of current conditions of the resource items: - description: Condition represents the state of the operator's reconciliation - functionality. + description: Condition represents the state of the operator's reconciliation functionality. properties: lastHeartbeatTime: format: date-time @@ -60,50 +825,24 @@ spec: status: type: string type: - description: ConditionType is the state of the operator's reconciliation - functionality. + description: ConditionType is the state of the operator's reconciliation functionality. type: string required: - status - type type: object type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array observedVersion: - description: The version of the deployed operands + description: The observed version of the resource type: string operatorVersion: - description: The version of the deployed operator + description: The version of the resource as defined by the operator + type: string + phase: + description: Phase is the current phase of the deployment type: string targetVersion: - description: The desired version of the deployed operands + description: The desired version of the resource type: string type: object type: object @@ -111,4 +850,3 @@ spec: storage: true subresources: status: {} - preserveUnknownFields: false diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml deleted file mode 100644 index 329beb039c..0000000000 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml +++ /dev/null @@ -1,114 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirtmetricsaggregations.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtMetricsAggregation - listKind: KubevirtMetricsAggregationList - plural: kubevirtmetricsaggregations - shortNames: - - kvma - singular: kubevirtmetricsaggregation - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtMetricsAggregation defines the MetricsAggregation CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of MetricsAggregation - properties: - version: - description: Defines the version of the operand - type: string - type: object - status: - description: Status holds the current status of MetricsAggregation - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml deleted file mode 100644 index d9ab4670f3..0000000000 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml +++ /dev/null @@ -1,750 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirtnodelabellerbundles.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtNodeLabellerBundle - listKind: KubevirtNodeLabellerBundleList - plural: kubevirtnodelabellerbundles - shortNames: - - kvnl - singular: kubevirtnodelabellerbundle - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtNodeLabellerBundle defines the NodeLabeller CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of NodeLabeller - properties: - affinity: - description: Define the node affinity for NodeLabeller pods - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: Define node selector labels for NodeLabeller pods - type: object - tolerations: - description: Define tolerations for NodeLabeller pods - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - version: - description: Defines the version of the NodeLabeller - type: string - type: object - status: - description: Status holds the current status of NodeLabeller - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml deleted file mode 100644 index 5215df1afb..0000000000 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml +++ /dev/null @@ -1,753 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirttemplatevalidators.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtTemplateValidator - listKind: KubevirtTemplateValidatorList - plural: kubevirttemplatevalidators - shortNames: - - kvtv - singular: kubevirttemplatevalidator - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtTemplateValidator defines the TemplateValidator CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of TemplateValidator - properties: - affinity: - description: Define the node affinity for TemplateValidator pods - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: Define node selector labels for TemplateValidator - type: object - templateValidatorReplicas: - description: Defines the desired number of replicas for TemplateValidator - type: integer - tolerations: - description: Define tolerations for TemplateValidator - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - version: - description: Defines the version of TemplateValidaotr - type: string - type: object - status: - description: Status holds the current status of TemplateValidator - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index 7a46207ff7..aa72782cc3 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -3,7 +3,7 @@ apiVersion: operators.coreos.com/v1alpha1 kind: ClusterServiceVersion metadata: annotations: - alm-examples: '[{"apiVersion":"hco.kubevirt.io/v1beta1","kind":"HyperConverged","metadata":{"annotations":{"deployOVS":"false"},"name":"kubevirt-hyperconverged","namespace":"kubevirt-hyperconverged"},"spec":{"BareMetalPlatform":false}},{"apiVersion":"networkaddonsoperator.network.kubevirt.io/v1","kind":"NetworkAddonsConfig","metadata":{"name":"cluster"},"spec":{"imagePullPolicy":"IfNotPresent","kubeMacPool":{"rangeEnd":"FD:FF:FF:FF:FF:FF","rangeStart":"02:00:00:00:00:00"},"linuxBridge":{},"macvtap":{},"multus":{},"nmstate":{},"ovs":{}}},{"apiVersion":"kubevirt.io/v1alpha3","kind":"KubeVirt","metadata":{"name":"kubevirt","namespace":"kubevirt"},"spec":{"imagePullPolicy":"Always"}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtTemplateValidator","metadata":{"name":"kubevirt-template-validator","namespace":"kubevirt"},"spec":{"templateValidatorReplicas":2}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtCommonTemplatesBundle","metadata":{"name":"kubevirt-common-template-bundle","namespace":"kubevirt"}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtMetricsAggregation","metadata":{"name":"kubevirt-metrics-aggregation","namespace":"kubevirt"}},{"apiVersion":"ssp.kubevirt.io/v1","kind":"KubevirtNodeLabellerBundle","metadata":{"name":"kubevirt-node-labeller-bundle","namespace":"kubevirt"}},{"apiVersion":"cdi.kubevirt.io/v1beta1","kind":"CDI","metadata":{"name":"cdi","namespace":"cdi"},"spec":{"imagePullPolicy":"IfNotPresent"}},{"apiVersion":"nodemaintenance.kubevirt.io/v1beta1","kind":"NodeMaintenance","metadata":{"name":"nodemaintenance-example"},"spec":{"nodeName":"node02","reason":"Test node maintenance"}},{"apiVersion":"hostpathprovisioner.kubevirt.io/v1beta1","kind":"HostPathProvisioner","metadata":{"name":"hostpath-provisioner"},"spec":{"imagePullPolicy":"IfNotPresent","pathConfig":{"path":"/var/hpvolumes","useNamingPrefix":false}}},{"apiVersion":"v2v.kubevirt.io/v1beta1","kind":"VMImportConfig","metadata":{"name":"vm-import-operator-config"},"spec":{"imagePullPolicy":"IfNotPresent"}}]' + alm-examples: '[{"apiVersion":"hco.kubevirt.io/v1beta1","kind":"HyperConverged","metadata":{"annotations":{"deployOVS":"false"},"name":"kubevirt-hyperconverged","namespace":"kubevirt-hyperconverged"},"spec":{"BareMetalPlatform":false}},{"apiVersion":"networkaddonsoperator.network.kubevirt.io/v1","kind":"NetworkAddonsConfig","metadata":{"name":"cluster"},"spec":{"imagePullPolicy":"IfNotPresent","kubeMacPool":{"rangeEnd":"FD:FF:FF:FF:FF:FF","rangeStart":"02:00:00:00:00:00"},"linuxBridge":{},"macvtap":{},"multus":{},"nmstate":{},"ovs":{}}},{"apiVersion":"kubevirt.io/v1alpha3","kind":"KubeVirt","metadata":{"name":"kubevirt","namespace":"kubevirt"},"spec":{"imagePullPolicy":"Always"}},{"apiVersion":"ssp.kubevirt.io/v1beta1","kind":"SSP","metadata":{"name":"ssp-sample","namespace":"kubevirt"},"spec":{"commonTemplates":{"namespace":"kubevirt"},"templateValidator":{"replicas":2}}},{"apiVersion":"cdi.kubevirt.io/v1beta1","kind":"CDI","metadata":{"name":"cdi","namespace":"cdi"},"spec":{"imagePullPolicy":"IfNotPresent"}},{"apiVersion":"nodemaintenance.kubevirt.io/v1beta1","kind":"NodeMaintenance","metadata":{"name":"nodemaintenance-example"},"spec":{"nodeName":"node02","reason":"Test node maintenance"}},{"apiVersion":"hostpathprovisioner.kubevirt.io/v1beta1","kind":"HostPathProvisioner","metadata":{"name":"hostpath-provisioner"},"spec":{"imagePullPolicy":"IfNotPresent","pathConfig":{"path":"/var/hpvolumes","useNamingPrefix":false}}},{"apiVersion":"v2v.kubevirt.io/v1beta1","kind":"VMImportConfig","metadata":{"name":"vm-import-operator-config"},"spec":{"imagePullPolicy":"IfNotPresent"}}]' capabilities: Full Lifecycle categories: OpenShift Optional certified: "false" @@ -76,7 +76,7 @@ metadata: [Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0.txt). operatorframework.io/initialization-resource: '{"apiVersion":"hco.kubevirt.io/v1beta1","kind":"HyperConverged","metadata":{"annotations":{"deployOVS":"false"},"name":"kubevirt-hyperconverged","namespace":"kubevirt-hyperconverged"},"spec":{"BareMetalPlatform":false}}' operatorframework.io/suggested-namespace: kubevirt-hyperconverged - operators.operatorframework.io/internal-objects: '["v2vvmwares.v2v.kubevirt.io","ovirtproviders.v2v.kubevirt.io","networkaddonsconfigs.networkaddonsoperator.network.kubevirt.io","kubevirts.kubevirt.io","kubevirtcommontemplatesbundles.ssp.kubevirt.io","kubevirtmetricsaggregations.ssp.kubevirt.io","kubevirtnodelabellerbundles.ssp.kubevirt.io","kubevirttemplatevalidators.ssp.kubevirt.io","cdis.cdi.kubevirt.io","nodemaintenances.nodemaintenance.kubevirt.io","vmimportconfigs.v2v.kubevirt.io"]' + operators.operatorframework.io/internal-objects: '["v2vvmwares.v2v.kubevirt.io","ovirtproviders.v2v.kubevirt.io","networkaddonsconfigs.networkaddonsoperator.network.kubevirt.io","kubevirts.kubevirt.io","ssps.ssp.kubevirt.io","cdis.cdi.kubevirt.io","nodemaintenances.nodemaintenance.kubevirt.io","vmimportconfigs.v2v.kubevirt.io"]' repository: https://github.com/kubevirt/hyperconverged-cluster-operator support: "false" name: kubevirt-hyperconverged-operator.v1.3.0 @@ -146,26 +146,11 @@ spec: kind: KubeVirt name: kubevirts.kubevirt.io version: v1alpha3 - - description: Represents a deployment of the predefined VM templates - displayName: KubeVirt common templates - kind: KubevirtCommonTemplatesBundle - name: kubevirtcommontemplatesbundles.ssp.kubevirt.io - version: v1 - - description: Provide aggregation rules for core kubevirt metrics - displayName: KubeVirt Metric Aggregation - kind: KubevirtMetricsAggregation - name: kubevirtmetricsaggregations.ssp.kubevirt.io - version: v1 - - description: Represents a deployment of Node labeller component - displayName: KubeVirt Node labeller - kind: KubevirtNodeLabellerBundle - name: kubevirtnodelabellerbundles.ssp.kubevirt.io - version: v1 - - description: Represents a deployment of admission control webhook to validate the KubeVirt templates - displayName: KubeVirt Template Validator admission webhook - kind: KubevirtTemplateValidator - name: kubevirttemplatevalidators.ssp.kubevirt.io - version: v1 + - description: SSP is the Schema for the ssps API + displayName: SSP + kind: SSP + name: ssps.ssp.kubevirt.io + version: v1beta1 - description: Represents a CDI deployment displayName: CDI deployment kind: CDI @@ -1088,178 +1073,277 @@ spec: serviceAccountName: kubevirt-operator - rules: - apiGroups: - - kubevirt.io - - ssp.kubevirt.io - - template.openshift.io + - admissionregistration.k8s.io resources: - - '*' + - validatingwebhookconfigurations verbs: - create + - delete - get - list - patch - update - watch + - apiGroups: + - apiextensions.k8s.io + resources: + - customresourcedefinitions + verbs: + - list + - apiGroups: + - apps + resources: + - daemonsets + verbs: + - create + - delete + - get + - list + - patch + - update + - watch + - apiGroups: + - apps + resources: + - deployments + verbs: + - create - delete + - get + - list + - patch + - update + - watch - apiGroups: - cdi.kubevirt.io resources: - datavolumes - - datavolumes/source verbs: - create + - delete - get - list - patch - update - watch - - delete - apiGroups: - - monitoring.coreos.com + - cdi.kubevirt.io resources: - - prometheusrules + - datavolumes/source verbs: - create + - apiGroups: + - "" + resources: + - configmaps + - serviceaccounts + verbs: + - create + - delete - get - list - patch + - update - watch + - apiGroups: + - "" + resources: + - namespaces + verbs: + - create - delete + - get + - list + - patch + - update + - watch - apiGroups: - - monitoring.coreos.com + - "" resources: - - servicemonitors + - nodes verbs: - get - - create + - patch + - update - apiGroups: - - rbac.authorization.k8s.io + - "" resources: - - clusterroles + - persistentvolumeclaims verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - - rbac.authorization.k8s.io + - "" resources: - - clusterrolebindings + - persistentvolumeclaims/status + verbs: + - get + - list + - watch + - apiGroups: + - "" + resources: + - serviceaccounts + - services verbs: - create + - delete - get - list + - patch + - update - watch + - apiGroups: + - monitoring.coreos.com + resources: + - prometheusrules + verbs: + - create + - delete + - get + - list - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: - - roles + - clusterrolebindings + - clusterroles verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - rbac.authorization.k8s.io resources: + - clusterroles - rolebindings + - roles verbs: - create + - delete - get - list - - watch - patch + - update + - watch - apiGroups: - - extensions - - apps + - security.openshift.io resources: - - deployments - - deployments/finalizers - - replicasets - - daemonsets + - securitycontextconstraints verbs: - create - - update + - delete - get - list - patch + - update - watch - - delete - apiGroups: - - "" + - security.openshift.io + resourceNames: + - privileged resources: - - serviceaccounts - - configmaps - - persistentvolumeclaims - - services - - services/finalizers + - securitycontextconstraints + verbs: + - use + - apiGroups: + - ssp.kubevirt.io + resources: + - kubevirtcommontemplatesbundles verbs: - create - - update + - delete - get - - patch - list + - patch + - update - watch - - delete - apiGroups: - - "" + - ssp.kubevirt.io resources: - - nodes + - kubevirtmetricsaggregations verbs: + - create + - delete - get + - list - patch - update + - watch - apiGroups: - - "" + - ssp.kubevirt.io resources: - - pods - - persistentvolumeclaims/status + - kubevirtnodelabellerbundles verbs: + - create + - delete - get - list + - patch + - update - watch - apiGroups: - - "" + - ssp.kubevirt.io resources: - - namespaces + - kubevirttemplatevalidators verbs: - create + - delete - get - list - patch + - update - watch - apiGroups: - - admissionregistration.k8s.io + - ssp.kubevirt.io resources: - - validatingwebhookconfigurations + - ssps verbs: - create + - delete - get - list - patch + - update - watch - apiGroups: - - security.openshift.io + - ssp.kubevirt.io resources: - - securitycontextconstraints + - ssps/finalizers + verbs: + - update + - apiGroups: + - ssp.kubevirt.io + resources: + - ssps/status verbs: - get - - list - - create - - watch - patch + - update - apiGroups: - - security.openshift.io - resourceNames: - - privileged + - template.openshift.io resources: - - securitycontextconstraints + - templates verbs: - - use - serviceAccountName: kubevirt-ssp-operator + - create + - delete + - get + - list + - patch + - update + - watch + serviceAccountName: ssp-operator - rules: - apiGroups: - rbac.authorization.k8s.io @@ -1747,7 +1831,7 @@ spec: - name: NETWORK_ADDONS_VERSION value: v0.44.0 - name: SSP_VERSION - value: v1.2.1 + value: v0.1.0-rc.1 - name: NMO_VERSION value: v0.7.0 - name: HPPO_VERSION @@ -1972,45 +2056,49 @@ spec: secret: optional: true secretName: kubevirt-operator-certs - - name: kubevirt-ssp-operator + - name: ssp-operator spec: replicas: 1 selector: matchLabels: - name: kubevirt-ssp-operator + control-plane: ssp-operator strategy: {} template: metadata: labels: - name: kubevirt-ssp-operator + control-plane: ssp-operator spec: containers: - - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_REFERENCE - value: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - - name: WATCH_NAMESPACE - - name: KVM_INFO_TAG - - name: VALIDATOR_TAG - - name: VIRT_LAUNCHER_TAG - - name: NODE_LABELLER_TAG - - name: CPU_PLUGIN_TAG - - name: IMAGE_NAME_PREFIX - - name: OPERATOR_NAME - value: kubevirt-ssp-operator + - args: + - --enable-leader-election + command: + - /manager + env: + - name: KVM_IMAGE + - name: VALIDATOR_IMAGE + - name: VIRT_LAUNCHER_IMAGE + - name: NODE_LABELLER_IMAGE + - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION - value: v1.2.1 - image: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - imagePullPolicy: IfNotPresent - name: kubevirt-ssp-operator + value: v0.1.0-rc.1 + image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + name: manager ports: - - containerPort: 60000 - name: metrics + - containerPort: 9443 + name: webhook-server + protocol: TCP resources: {} - serviceAccountName: kubevirt-ssp-operator + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + serviceAccountName: ssp-operator + terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2230,6 +2318,35 @@ spec: - patch - delete serviceAccountName: kubevirt-operator + - rules: + - apiGroups: + - "" + resources: + - configmaps + verbs: + - get + - list + - watch + - create + - update + - patch + - delete + - apiGroups: + - "" + resources: + - configmaps/status + verbs: + - get + - update + - patch + - apiGroups: + - "" + resources: + - events + verbs: + - create + - patch + serviceAccountName: ssp-operator - rules: - apiGroups: - rbac.authorization.k8s.io @@ -2373,8 +2490,6 @@ spec: name: kubemacpool - image: quay.io/nmstate/kubernetes-nmstate-handler@sha256:d5155de6aa6cf97c2feb5f4171384e63ce957362a66d47ec14982cbabca5169c name: kubernetes-nmstate-handler - - image: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - name: kubevirt-ssp-operator-container - image: quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815 name: kubevirt-v2v-conversion - image: quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 @@ -2389,6 +2504,8 @@ spec: name: ovs-cni-marker - image: quay.io/kubevirt/ovs-cni-plugin@sha256:d43d34ed4b1bd0b107c2049d21e33f9f870c36e5bf6dc1d80ab567271735c8da name: ovs-cni-plugin + - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + name: ssp-operator - image: docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21 name: virt-api - image: docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5 @@ -2458,6 +2575,24 @@ spec: timeoutSeconds: 30 type: MutatingAdmissionWebhook webhookPath: /mutate-ns-hco-kubevirt-io + - admissionReviewVersions: + - v1beta1 + deploymentName: ssp-operator + failurePolicy: Fail + generateName: vssp.kb.io + rules: + - apiGroups: + - ssp.kubevirt.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ssps + sideEffects: None + type: ValidatingAdmissionWebhook + webhookPath: /validate-ssp-kubevirt-io-v1beta1-ssp - admissionReviewVersions: - v1beta1 containerPort: 8443 diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml index aa1dd67dd8..f8bd4f2c14 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance00.crd.yaml @@ -2,50 +2,815 @@ apiVersion: apiextensions.k8s.io/v1 kind: CustomResourceDefinition metadata: - name: kubevirtcommontemplatesbundles.ssp.kubevirt.io + annotations: + controller-gen.kubebuilder.io/version: v0.4.0 + name: ssps.ssp.kubevirt.io spec: group: ssp.kubevirt.io names: - kind: KubevirtCommonTemplatesBundle - listKind: KubevirtCommonTemplatesBundleList - plural: kubevirtcommontemplatesbundles - shortNames: - - kvct - singular: kubevirtcommontemplatesbundle + kind: SSP + listKind: SSPList + plural: ssps + singular: ssp scope: Namespaced versions: - - name: v1 + - name: v1beta1 schema: openAPIV3Schema: - description: KubevirtCommonTemplatesBundle defines the CommonTemplates CR + description: SSP is the Schema for the ssps API properties: apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' type: string kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' type: string metadata: type: object spec: - description: Spec contains the configuration of Common Templates + description: SSPSpec defines the desired state of SSP properties: - version: - description: Defines the version of the operand - type: string + commonTemplates: + description: CommonTemplates is the configuration of the common templates operand + properties: + namespace: + description: Namespace is the k8s namespace where CommonTemplates should be installed + maxLength: 63 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?$ + type: string + required: + - namespace + type: object + nodeLabeller: + description: NodeLabeller is configuration of the node-labeller operand + properties: + placement: + description: Placement describes the node scheduling configuration + properties: + affinity: + description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector' + type: object + tolerations: + description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + type: object + templateValidator: + description: TemplateValidator is configuration of the template validator operand + properties: + placement: + description: Placement describes the node scheduling configuration + properties: + affinity: + description: affinity enables pod affinity/anti-affinity placement expanding the types of constraints that can be expressed with nodeSelector. affinity is going to be applied to the relevant kind of pods in parallel with nodeSelector See https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#affinity-and-anti-affinity + properties: + nodeAffinity: + description: Describes node affinity scheduling rules for the pod. + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. + items: + description: An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). + properties: + preference: + description: A node selector term, associated with the corresponding weight. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + weight: + description: Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + format: int32 + type: integer + required: + - preference + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. + properties: + nodeSelectorTerms: + description: Required. A list of node selector terms. The terms are ORed. + items: + description: A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. + properties: + matchExpressions: + description: A list of node selector requirements by node's labels. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchFields: + description: A list of node selector requirements by node's fields. + items: + description: A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: The label key that the selector applies to. + type: string + operator: + description: Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + type: string + values: + description: An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + type: object + type: array + required: + - nodeSelectorTerms + type: object + type: object + podAffinity: + description: Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + podAntiAffinity: + description: Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + properties: + preferredDuringSchedulingIgnoredDuringExecution: + description: The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. + items: + description: The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) + properties: + podAffinityTerm: + description: Required. A pod affinity term, associated with the corresponding weight. + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + weight: + description: weight associated with matching the corresponding podAffinityTerm, in the range 1-100. + format: int32 + type: integer + required: + - podAffinityTerm + - weight + type: object + type: array + requiredDuringSchedulingIgnoredDuringExecution: + description: If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. + items: + description: Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running + properties: + labelSelector: + description: A label query over a set of resources, in this case pods. + properties: + matchExpressions: + description: matchExpressions is a list of label selector requirements. The requirements are ANDed. + items: + description: A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values. + properties: + key: + description: key is the label key that the selector applies to. + type: string + operator: + description: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist. + type: string + values: + description: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch. + items: + type: string + type: array + required: + - key + - operator + type: object + type: array + matchLabels: + additionalProperties: + type: string + description: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed. + type: object + type: object + namespaces: + description: namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace" + items: + type: string + type: array + topologyKey: + description: This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed. + type: string + required: + - topologyKey + type: object + type: array + type: object + type: object + nodeSelector: + additionalProperties: + type: string + description: 'nodeSelector is the node selector applied to the relevant kind of pods It specifies a map of key-value pairs: for the pod to be eligible to run on a node, the node must have each of the indicated key-value pairs as labels (it can have additional labels as well). See https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector' + type: object + tolerations: + description: tolerations is a list of tolerations applied to the relevant kind of pods See https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ for more info. These are additional tolerations other than default ones. + items: + description: The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . + properties: + effect: + description: Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + type: string + key: + description: Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys. + type: string + operator: + description: Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category. + type: string + tolerationSeconds: + description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. + format: int64 + type: integer + value: + description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. + type: string + type: object + type: array + type: object + replicas: + default: 2 + description: Replicas is the number of replicas of the template validator pod + format: int32 + minimum: 0 + type: integer + type: object + required: + - commonTemplates type: object status: - description: Status holds the current status of Common Templates + description: SSPStatus defines the observed state of SSP properties: conditions: - description: Reported states of the controller + description: A list of current conditions of the resource items: - description: Condition represents the state of the operator's reconciliation - functionality. + description: Condition represents the state of the operator's reconciliation functionality. properties: lastHeartbeatTime: format: date-time @@ -60,50 +825,24 @@ spec: status: type: string type: - description: ConditionType is the state of the operator's reconciliation - functionality. + description: ConditionType is the state of the operator's reconciliation functionality. type: string required: - status - type type: object type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array observedVersion: - description: The version of the deployed operands + description: The observed version of the resource type: string operatorVersion: - description: The version of the deployed operator + description: The version of the resource as defined by the operator + type: string + phase: + description: Phase is the current phase of the deployment type: string targetVersion: - description: The desired version of the deployed operands + description: The desired version of the resource type: string type: object type: object @@ -111,4 +850,3 @@ spec: storage: true subresources: status: {} - preserveUnknownFields: false diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml deleted file mode 100644 index 329beb039c..0000000000 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance01.crd.yaml +++ /dev/null @@ -1,114 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirtmetricsaggregations.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtMetricsAggregation - listKind: KubevirtMetricsAggregationList - plural: kubevirtmetricsaggregations - shortNames: - - kvma - singular: kubevirtmetricsaggregation - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtMetricsAggregation defines the MetricsAggregation CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of MetricsAggregation - properties: - version: - description: Defines the version of the operand - type: string - type: object - status: - description: Status holds the current status of MetricsAggregation - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml deleted file mode 100644 index d9ab4670f3..0000000000 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance02.crd.yaml +++ /dev/null @@ -1,750 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirtnodelabellerbundles.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtNodeLabellerBundle - listKind: KubevirtNodeLabellerBundleList - plural: kubevirtnodelabellerbundles - shortNames: - - kvnl - singular: kubevirtnodelabellerbundle - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtNodeLabellerBundle defines the NodeLabeller CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of NodeLabeller - properties: - affinity: - description: Define the node affinity for NodeLabeller pods - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: Define node selector labels for NodeLabeller pods - type: object - tolerations: - description: Define tolerations for NodeLabeller pods - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - version: - description: Defines the version of the NodeLabeller - type: string - type: object - status: - description: Status holds the current status of NodeLabeller - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml deleted file mode 100644 index 5215df1afb..0000000000 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/scheduling-scale-performance03.crd.yaml +++ /dev/null @@ -1,753 +0,0 @@ ---- -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: kubevirttemplatevalidators.ssp.kubevirt.io -spec: - group: ssp.kubevirt.io - names: - kind: KubevirtTemplateValidator - listKind: KubevirtTemplateValidatorList - plural: kubevirttemplatevalidators - shortNames: - - kvtv - singular: kubevirttemplatevalidator - scope: Namespaced - versions: - - name: v1 - schema: - openAPIV3Schema: - description: KubevirtTemplateValidator defines the TemplateValidator CR - properties: - apiVersion: - description: 'APIVersion defines the versioned schema of this representation - of an object. Servers should convert recognized schemas to the latest - internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' - type: string - kind: - description: 'Kind is a string value representing the REST resource this - object represents. Servers may infer this from the endpoint the client - submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' - type: string - metadata: - type: object - spec: - description: Spec contains the configuration of TemplateValidator - properties: - affinity: - description: Define the node affinity for TemplateValidator pods - properties: - nodeAffinity: - description: Describes node affinity scheduling rules for the - pod. - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node matches - the corresponding matchExpressions; the node(s) with the - highest sum are the most preferred. - items: - description: An empty preferred scheduling term matches - all objects with implicit weight 0 (i.e. it's a no-op). - A null preferred scheduling term matches no objects (i.e. - is also a no-op). - properties: - preference: - description: A node selector term, associated with the - corresponding weight. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - weight: - description: Weight associated with matching the corresponding - nodeSelectorTerm, in the range 1-100. - format: int32 - type: integer - required: - - preference - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to an update), the system may or may not try to - eventually evict the pod from its node. - properties: - nodeSelectorTerms: - description: Required. A list of node selector terms. - The terms are ORed. - items: - description: A null or empty node selector term matches - no objects. The requirements of them are ANDed. The - TopologySelectorTerm type implements a subset of the - NodeSelectorTerm. - properties: - matchExpressions: - description: A list of node selector requirements - by node's labels. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchFields: - description: A list of node selector requirements - by node's fields. - items: - description: A node selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: The label key that the selector - applies to. - type: string - operator: - description: Represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists, DoesNotExist. Gt, and - Lt. - type: string - values: - description: An array of string values. If - the operator is In or NotIn, the values - array must be non-empty. If the operator - is Exists or DoesNotExist, the values array - must be empty. If the operator is Gt or - Lt, the values array must have a single - element, which will be interpreted as an - integer. This array is replaced during a - strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - type: object - type: array - required: - - nodeSelectorTerms - type: object - type: object - podAffinity: - description: Describes pod affinity scheduling rules (e.g. co-locate - this pod in the same node, zone, etc. as some other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the affinity expressions specified by - this field, but it may choose a node that violates one or - more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the affinity requirements specified by this - field are not met at scheduling time, the pod will not be - scheduled onto the node. If the affinity requirements specified - by this field cease to be met at some point during pod execution - (e.g. due to a pod label update), the system may or may - not try to eventually evict the pod from its node. When - there are multiple elements, the lists of nodes corresponding - to each podAffinityTerm are intersected, i.e. all terms - must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - podAntiAffinity: - description: Describes pod anti-affinity scheduling rules (e.g. - avoid putting this pod in the same node, zone, etc. as some - other pod(s)). - properties: - preferredDuringSchedulingIgnoredDuringExecution: - description: The scheduler will prefer to schedule pods to - nodes that satisfy the anti-affinity expressions specified - by this field, but it may choose a node that violates one - or more of the expressions. The node that is most preferred - is the one with the greatest sum of weights, i.e. for each - node that meets all of the scheduling requirements (resource - request, requiredDuringScheduling anti-affinity expressions, - etc.), compute a sum by iterating through the elements of - this field and adding "weight" to the sum if the node has - pods which matches the corresponding podAffinityTerm; the - node(s) with the highest sum are the most preferred. - items: - description: The weights of all of the matched WeightedPodAffinityTerm - fields are added per-node to find the most preferred node(s) - properties: - podAffinityTerm: - description: Required. A pod affinity term, associated - with the corresponding weight. - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are - ANDed. - items: - description: A label selector requirement - is a selector that contains values, a key, - and an operator that relates the key and - values. - properties: - key: - description: key is the label key that - the selector applies to. - type: string - operator: - description: operator represents a key's - relationship to a set of values. Valid - operators are In, NotIn, Exists and - DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. - If the operator is Exists or DoesNotExist, - the values array must be empty. This - array is replaced during a strategic - merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is - "In", and the values array contains only "value". - The requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces - the labelSelector applies to (matches against); - null or empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods - matching the labelSelector in the specified namespaces, - where co-located is defined as running on a node - whose value of the label with key topologyKey - matches that of any node on which any of the selected - pods is running. Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - weight: - description: weight associated with matching the corresponding - podAffinityTerm, in the range 1-100. - format: int32 - type: integer - required: - - podAffinityTerm - - weight - type: object - type: array - requiredDuringSchedulingIgnoredDuringExecution: - description: If the anti-affinity requirements specified by - this field are not met at scheduling time, the pod will - not be scheduled onto the node. If the anti-affinity requirements - specified by this field cease to be met at some point during - pod execution (e.g. due to a pod label update), the system - may or may not try to eventually evict the pod from its - node. When there are multiple elements, the lists of nodes - corresponding to each podAffinityTerm are intersected, i.e. - all terms must be satisfied. - items: - description: Defines a set of pods (namely those matching - the labelSelector relative to the given namespace(s)) - that this pod should be co-located (affinity) or not co-located - (anti-affinity) with, where co-located is defined as running - on a node whose value of the label with key - matches that of any node on which a pod of the set of - pods is running - properties: - labelSelector: - description: A label query over a set of resources, - in this case pods. - properties: - matchExpressions: - description: matchExpressions is a list of label - selector requirements. The requirements are ANDed. - items: - description: A label selector requirement is a - selector that contains values, a key, and an - operator that relates the key and values. - properties: - key: - description: key is the label key that the - selector applies to. - type: string - operator: - description: operator represents a key's relationship - to a set of values. Valid operators are - In, NotIn, Exists and DoesNotExist. - type: string - values: - description: values is an array of string - values. If the operator is In or NotIn, - the values array must be non-empty. If the - operator is Exists or DoesNotExist, the - values array must be empty. This array is - replaced during a strategic merge patch. - items: - type: string - type: array - required: - - key - - operator - type: object - type: array - matchLabels: - additionalProperties: - type: string - description: matchLabels is a map of {key,value} - pairs. A single {key,value} in the matchLabels - map is equivalent to an element of matchExpressions, - whose key field is "key", the operator is "In", - and the values array contains only "value". The - requirements are ANDed. - type: object - type: object - namespaces: - description: namespaces specifies which namespaces the - labelSelector applies to (matches against); null or - empty list means "this pod's namespace" - items: - type: string - type: array - topologyKey: - description: This pod should be co-located (affinity) - or not co-located (anti-affinity) with the pods matching - the labelSelector in the specified namespaces, where - co-located is defined as running on a node whose value - of the label with key topologyKey matches that of - any node on which any of the selected pods is running. - Empty topologyKey is not allowed. - type: string - required: - - topologyKey - type: object - type: array - type: object - type: object - nodeSelector: - additionalProperties: - type: string - description: Define node selector labels for TemplateValidator - type: object - templateValidatorReplicas: - description: Defines the desired number of replicas for TemplateValidator - type: integer - tolerations: - description: Define tolerations for TemplateValidator - items: - description: The pod this Toleration is attached to tolerates any - taint that matches the triple using the matching - operator . - properties: - effect: - description: Effect indicates the taint effect to match. Empty - means match all taint effects. When specified, allowed values - are NoSchedule, PreferNoSchedule and NoExecute. - type: string - key: - description: Key is the taint key that the toleration applies - to. Empty means match all taint keys. If the key is empty, - operator must be Exists; this combination means to match all - values and all keys. - type: string - operator: - description: Operator represents a key's relationship to the - value. Valid operators are Exists and Equal. Defaults to Equal. - Exists is equivalent to wildcard for value, so that a pod - can tolerate all taints of a particular category. - type: string - tolerationSeconds: - description: TolerationSeconds represents the period of time - the toleration (which must be of effect NoExecute, otherwise - this field is ignored) tolerates the taint. By default, it - is not set, which means tolerate the taint forever (do not - evict). Zero and negative values will be treated as 0 (evict - immediately) by the system. - format: int64 - type: integer - value: - description: Value is the taint value the toleration matches - to. If the operator is Exists, the value should be empty, - otherwise just a regular string. - type: string - type: object - type: array - version: - description: Defines the version of TemplateValidaotr - type: string - type: object - status: - description: Status holds the current status of TemplateValidator - properties: - conditions: - description: Reported states of the controller - items: - description: Condition represents the state of the operator's reconciliation - functionality. - properties: - lastHeartbeatTime: - format: date-time - type: string - lastTransitionTime: - format: date-time - type: string - message: - type: string - reason: - type: string - status: - type: string - type: - description: ConditionType is the state of the operator's reconciliation - functionality. - type: string - required: - - status - - type - type: object - type: array - containers: - description: Containers used in the current deployment - items: - description: Defines a container - properties: - image: - description: Image path - type: string - name: - description: Container name - type: string - namespace: - description: Container namespace - type: string - parentKind: - description: Parent kind - type: string - parentName: - description: Parent image - type: string - required: - - image - - name - - namespace - - parentKind - - parentName - type: object - type: array - observedVersion: - description: The version of the deployed operands - type: string - operatorVersion: - description: The version of the deployed operator - type: string - targetVersion: - description: The desired version of the deployed operands - type: string - type: object - type: object - served: true - storage: true - subresources: - status: {} - preserveUnknownFields: false diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 8f09051b69..20d0689d6b 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -56,7 +56,7 @@ spec: - name: NETWORK_ADDONS_VERSION value: v0.44.0 - name: SSP_VERSION - value: v1.2.1 + value: v0.1.0-rc.1 - name: NMO_VERSION value: v0.7.0 - name: HPPO_VERSION @@ -306,46 +306,50 @@ apiVersion: apps/v1 kind: Deployment metadata: labels: - name: kubevirt-ssp-operator - name: kubevirt-ssp-operator + name: ssp-operator + name: ssp-operator spec: replicas: 1 selector: matchLabels: - name: kubevirt-ssp-operator + control-plane: ssp-operator strategy: {} template: metadata: labels: - name: kubevirt-ssp-operator + control-plane: ssp-operator spec: containers: - - env: - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: IMAGE_REFERENCE - value: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - - name: WATCH_NAMESPACE - - name: KVM_INFO_TAG - - name: VALIDATOR_TAG - - name: VIRT_LAUNCHER_TAG - - name: NODE_LABELLER_TAG - - name: CPU_PLUGIN_TAG - - name: IMAGE_NAME_PREFIX - - name: OPERATOR_NAME - value: kubevirt-ssp-operator + - args: + - --enable-leader-election + command: + - /manager + env: + - name: KVM_IMAGE + - name: VALIDATOR_IMAGE + - name: VIRT_LAUNCHER_IMAGE + - name: NODE_LABELLER_IMAGE + - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION - value: v1.2.1 - image: quay.io/fromani/kubevirt-ssp-operator-container@sha256:13ecfd8bc5779721378cfed69109bcc99392b0dcd589ddd600eb2648de9fce8c - imagePullPolicy: IfNotPresent - name: kubevirt-ssp-operator + value: v0.1.0-rc.1 + image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + name: manager ports: - - containerPort: 60000 - name: metrics + - containerPort: 9443 + name: webhook-server + protocol: TCP resources: {} - serviceAccountName: kubevirt-ssp-operator + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true + serviceAccountName: ssp-operator + terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: ssp-webhook-server-cert --- apiVersion: apps/v1 kind: Deployment diff --git a/deploy/service_account.yaml b/deploy/service_account.yaml index c556f26206..cb2e89b9f3 100644 --- a/deploy/service_account.yaml +++ b/deploy/service_account.yaml @@ -43,16 +43,16 @@ apiVersion: v1 kind: ServiceAccount metadata: labels: - name: kubevirt-ssp-operator - name: kubevirt-ssp-operator + name: node-maintenance-operator + name: node-maintenance-operator namespace: kubevirt-hyperconverged --- apiVersion: v1 kind: ServiceAccount metadata: labels: - name: node-maintenance-operator - name: node-maintenance-operator + name: ssp-operator + name: ssp-operator namespace: kubevirt-hyperconverged --- apiVersion: v1 diff --git a/hack/build-manifests.sh b/hack/build-manifests.sh index 37142fb44a..f3afe77af3 100755 --- a/hack/build-manifests.sh +++ b/hack/build-manifests.sh @@ -52,6 +52,7 @@ CRD_DIR="${DEPLOY_DIR}/crds" OLM_DIR="${DEPLOY_DIR}/olm-catalog" CSV_DIR="${OLM_DIR}/kubevirt-hyperconverged/${CSV_VERSION}" DEFAULT_CSV_GENERATOR="/usr/bin/csv-generator" +SSP_CSV_GENERATOR="/csv-generator" INDEX_IMAGE_DIR=${DEPLOY_DIR}/index-image @@ -154,7 +155,7 @@ function create_ssp_csv() { --operator-version=${SSP_VERSION} \ " - gen_csv ${DEFAULT_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs} + gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs} echo "${operatorName}" } diff --git a/hack/config b/hack/config index a7424b32c1..13e744a4c6 100644 --- a/hack/config +++ b/hack/config @@ -3,7 +3,7 @@ KUBEVIRT_VERSION="v0.36.0" CDI_VERSION="v1.28.0" NETWORK_ADDONS_VERSION="v0.44.0" -SSP_VERSION="v1.2.1" +SSP_VERSION="v0.1.0-rc.1" NMO_VERSION="v0.7.0" HPPO_VERSION="v0.7.0" HPP_VERSION="v0.7.0" From 329158a08441183e3409135fd2d02b0864d6be76 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Tue, 15 Dec 2020 14:04:55 +0200 Subject: [PATCH 07/19] Additional automation updates Signed-off-by: Zvi Cahana --- README.md | 2 +- automation/release-bumper/release-bumper.sh | 2 +- hack/clean.sh | 3 +-- hack/common.sh | 5 ++--- hack/defaults | 8 +++----- hack/deploy.sh | 2 +- hack/operator-test.sh | 2 +- 7 files changed, 10 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 3d71070c25..b35dd50bff 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ A unified operator deploying and controlling [KubeVirt](https://github.com/kubevirt/kubevirt) and several adjacent operators: - [Containerized Data Importer](https://github.com/kubevirt/containerized-data-importer) -- [Scheduling, Scale and Performance](https://github.com/kubevirt/kubevirt-ssp-operator) +- [Scheduling, Scale and Performance](https://github.com/kubevirt/ssp-operator) - [Cluster Network Addons](https://github.com/kubevirt/cluster-network-addons-operator) - [Node Maintenance](https://github.com/kubevirt/node-maintenance-operator) diff --git a/automation/release-bumper/release-bumper.sh b/automation/release-bumper/release-bumper.sh index 3231bb3fe1..a3a8fc25f0 100755 --- a/automation/release-bumper/release-bumper.sh +++ b/automation/release-bumper/release-bumper.sh @@ -73,7 +73,7 @@ function get_updated_versions { ["KUBEVIRT"]="kubevirt/kubevirt" ["CDI"]="kubevirt/containerized-data-importer" ["NETWORK_ADDONS"]="kubevirt/cluster-network-addons-operator" - ["SSP"]="kubevirt/kubevirt-ssp-operator" + ["SSP"]="kubevirt/ssp-operator" ["NMO"]="kubevirt/node-maintenance-operator" ["HPPO"]="kubevirt/hostpath-provisioner-operator" ["HPP"]="kubevirt/hostpath-provisioner" diff --git a/hack/clean.sh b/hack/clean.sh index 3056850533..9c07d819eb 100755 --- a/hack/clean.sh +++ b/hack/clean.sh @@ -41,8 +41,7 @@ source hack/common.sh "${CMD}" delete ns cluster-network-addons-operator --ignore-not-found || true # Delete ssp-operator -"${CMD}" delete -f "${SSP_URL_PREFIX}"/kubevirt-ssp-operator-crd.yaml --ignore-not-found || true -"${CMD}" delete -f "${SSP_URL_PREFIX}"/kubevirt-ssp-operator.yaml --ignore-not-found || true +"${CMD}" delete -f "${SSP_OPERATOR_URL}" --ignore-not-found || true # Delete vm-import-operator "${CMD}" delete -f "${VM_IMPORT_URL_PREFIX}"/operator.yaml --ignore-not-found || true diff --git a/hack/common.sh b/hack/common.sh index f487aead75..efe57e1040 100644 --- a/hack/common.sh +++ b/hack/common.sh @@ -24,7 +24,9 @@ source cluster/kubevirtci.sh CDI_OPERATOR_URL="https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-operator.yaml" KUBEVIRT_OPERATOR_URL="https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-operator.yaml" +SSP_OPERATOR_URL="https://github.com/kubevirt/ssp-operator/releases/download/${SSP_VERSION}/ssp-operator.yaml" CNA_URL_PREFIX="https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${NETWORK_ADDONS_VERSION}" +VM_IMPORT_URL_PREFIX="https://github.com/kubevirt/vm-import-operator/releases/download/${VM_IMPORT_VERSION}" mem_size=${KUBEVIRT_MEMORY_SIZE:-5120M} num_nodes=${KUBEVIRT_NUM_NODES:-1} @@ -37,9 +39,6 @@ TEST_PATH="tests/func-tests" TEST_OUT_PATH=${TEST_PATH}/_out JOB_TYPE=${JOB_TYPE:-} -SSP_URL_PREFIX="https://github.com/kubevirt/kubevirt-ssp-operator/releases/download/${SSP_VERSION}" -VM_IMPORT_URL_PREFIX="https://github.com/kubevirt/vm-import-operator/releases/download/${VM_IMPORT_VERSION}" - KUBECTL=$(which kubectl 2> /dev/null) || true if [ -z "${CMD}" ]; then diff --git a/hack/defaults b/hack/defaults index 0ba2dcb757..7ad7886527 100644 --- a/hack/defaults +++ b/hack/defaults @@ -9,7 +9,7 @@ WAIT_TIMEOUT="${WAIT_TIMEOUT:-450s}" CDI_CONTAINER_REGISTRY="${CDI_CONTAINER_REGISTRY:-docker.io/kubevirt}" KUBEVIRT_CONTAINER_REGISTRY="${KUBEVIRT_CONTAINER_REGISTRY:-docker.io/kubevirt}" NETWORK_ADDONS_CONTAINER_REGISTRY="${NETWORK_ADDONS_CONTAINER_REGISTRY:-quay.io/kubevirt}" -SSP_CONTAINER_REGISTRY="${SSP_CONTAINER_REGISTRY:-quay.io/fromani}" +SSP_CONTAINER_REGISTRY="${SSP_CONTAINER_REGISTRY:-quay.io/kubevirt}" CDI_OPERATOR_NAME="${CDI_OPERATOR_NAME:-cdi-operator}" @@ -84,7 +84,7 @@ function network_addons_sed { } function ssp_sed { - sed -i "s| image: \&image quay\.io\/fromani\/kubevirt-ssp-operator-container:${SSP_VERSION}| image: \&image ${SSP_CONTAINER_REGISTRY}\/kubevirt-ssp-operator-container:${SSP_VERSION}|g" ${TEMP_DIR}/operator.yaml + sed -i "s| image: \&image quay\.io\/kubevirt\/ssp-operator:${SSP_VERSION}| image: \&image ${SSP_CONTAINER_REGISTRY}\/ssp-operator:${SSP_VERSION}|g" ${TEMP_DIR}/operator.yaml } function vm_import_sed { @@ -97,12 +97,10 @@ https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_ https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${NETWORK_ADDONS_VERSION}/network-addons-config.crd.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${NETWORK_ADDONS_VERSION}/namespace.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${NETWORK_ADDONS_VERSION}/operator.yaml -https://github.com/kubevirt/kubevirt-ssp-operator/releases/download/${SSP_VERSION}/kubevirt-ssp-operator-crd.yaml -https://github.com/kubevirt/kubevirt-ssp-operator/releases/download/${SSP_VERSION}/kubevirt-ssp-operator.yaml +https://github.com/kubevirt/ssp-operator/releases/download/${SSP_VERSION}/ssp-operator.yaml https://github.com/kubevirt/vm-import-operator/releases/download/${VM_IMPORT_VERSION}/operator.yaml" OPERATOR_CRS="https://github.com/kubevirt/kubevirt/releases/download/${KUBEVIRT_VERSION}/kubevirt-cr.yaml https://github.com/kubevirt/containerized-data-importer/releases/download/${CDI_VERSION}/cdi-operator-cr.yaml https://github.com/kubevirt/cluster-network-addons-operator/releases/download/${NETWORK_ADDONS_VERSION}/network-addons-config-example.cr.yaml -https://github.com/kubevirt/kubevirt-ssp-operator/releases/download/${SSP_VERSION}/kubevirt-ssp-operator-cr.yaml https://github.com/kubevirt/vm-import-operator/releases/download/${VM_IMPORT_VERSION}/vmimportconfig_cr.yaml" diff --git a/hack/deploy.sh b/hack/deploy.sh index 87857d6c81..b00b568763 100755 --- a/hack/deploy.sh +++ b/hack/deploy.sh @@ -145,7 +145,7 @@ sleep 20 # avoid checking the availability of virt-operator here because it will become available only when # HCO will create its priorityClass and this will happen only when wi will have HCO cr -for op in cdi-operator cluster-network-addons-operator kubevirt-ssp-operator node-maintenance-operator vm-import-operator; do +for op in cdi-operator cluster-network-addons-operator ssp-operator node-maintenance-operator vm-import-operator; do "${CMD}" wait deployment/"${op}" --for=condition=Available --timeout="540s" || CONTAINER_ERRORED+="${op} " done diff --git a/hack/operator-test.sh b/hack/operator-test.sh index 3ad478b437..9c52563eac 100755 --- a/hack/operator-test.sh +++ b/hack/operator-test.sh @@ -43,7 +43,7 @@ sleep 10 VIRT_POD=`oc get pods -n kubevirt | grep virt-operator | head -1 | awk '{ print $1 }'` CDI_POD=`oc get pods -n cdi | grep cdi-operator | head -1 | awk '{ print $1 }'` NETWORK_ADDONS_POD=`oc get pods -n cluster-network-addons-operator | grep cluster-network-addons-operator | head -1 | awk '{ print $1 }'` -SSP_POD=`oc get pods -n kubevirt-hyperconverged | grep kubevirt-ssp-operator | head -1 | awk '{ print $1 }'` +SSP_POD=`oc get pods -n kubevirt-hyperconverged | grep ssp-operator | head -1 | awk '{ print $1 }'` VM_IMPORT_POD=`oc get pods -n kubevirt-hyperconverged | grep vm-import-operator | head -1 | awk '{ print $1 }'` oc wait pod $VIRT_POD --for condition=Ready -n kubevirt --timeout="${WAIT_TIMEOUT}" oc wait pod $CDI_POD --for condition=Ready -n cdi --timeout="${WAIT_TIMEOUT}" From 7a4f75528364f7729f264de3745eaa26171b1fcd Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Tue, 15 Dec 2020 11:27:37 +0200 Subject: [PATCH 08/19] Temporary workaround in SSP CSV: remove certs volume and set webhook port Signed-off-by: Zvi Cahana --- ...onverged-operator.v1.3.0.clusterserviceversion.yaml | 10 +--------- ...onverged-operator.v1.3.0.clusterserviceversion.yaml | 10 +--------- deploy/operator.yaml | 9 --------- hack/build-manifests.sh | 9 +++++++++ 4 files changed, 11 insertions(+), 27 deletions(-) diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index 2ef98dd1ea..dedc2ebe25 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -2088,17 +2088,8 @@ spec: name: webhook-server protocol: TCP resources: {} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2577,6 +2568,7 @@ spec: webhookPath: /mutate-ns-hco-kubevirt-io - admissionReviewVersions: - v1beta1 + containerPort: 9443 deploymentName: ssp-operator failurePolicy: Fail generateName: vssp.kb.io diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index aa72782cc3..aa55ba3df0 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -2088,17 +2088,8 @@ spec: name: webhook-server protocol: TCP resources: {} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2577,6 +2568,7 @@ spec: webhookPath: /mutate-ns-hco-kubevirt-io - admissionReviewVersions: - v1beta1 + containerPort: 9443 deploymentName: ssp-operator failurePolicy: Fail generateName: vssp.kb.io diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 20d0689d6b..b98a26265a 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -339,17 +339,8 @@ spec: name: webhook-server protocol: TCP resources: {} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: ssp-webhook-server-cert --- apiVersion: apps/v1 kind: Deployment diff --git a/hack/build-manifests.sh b/hack/build-manifests.sh index f3afe77af3..7d458d3281 100755 --- a/hack/build-manifests.sh +++ b/hack/build-manifests.sh @@ -156,6 +156,15 @@ function create_ssp_csv() { " gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs} + + # Temporary CSV workarounds: + # 1) remove the `certs` volume from ssp-operator pod + # 2) set the containerPort to 9443 for the ssp webhook + sspCsv="${TEMPDIR}/${operatorName}.${CSV_EXT}" + sed -i '/volumes:/,/secretName: ssp-webhook-server-cert/d' ${sspCsv} + sed -i '/volumeMounts:/,/readOnly: true/d' ${sspCsv} + sed -i '/webhookPath: \/validate-ssp-kubevirt-io-v1beta1-ssp/a \ \ \ \ containerPort: 9443' ${sspCsv} + echo "${operatorName}" } From 3c542dfb720bbb806b93668c04a261fc4e808ca5 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Wed, 16 Dec 2020 18:41:44 +0200 Subject: [PATCH 09/19] Remove old SSP CRs from RelatedObjects Signed-off-by: Zvi Cahana --- pkg/controller/operands/ssp.go | 149 ++++++++++++++++++++++------ pkg/controller/operands/ssp_test.go | 123 ++++++++++++++++++++++- 2 files changed, 240 insertions(+), 32 deletions(-) diff --git a/pkg/controller/operands/ssp.go b/pkg/controller/operands/ssp.go index eac8e7bf88..53e948cfbe 100644 --- a/pkg/controller/operands/ssp.go +++ b/pkg/controller/operands/ssp.go @@ -4,19 +4,22 @@ import ( "errors" "fmt" "reflect" + "strings" "sync" - sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" - hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + objectreferencesv1 "github.com/openshift/custom-resource-status/objectreferences/v1" + sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" + corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "sigs.k8s.io/controller-runtime/pkg/client" ) @@ -32,11 +35,19 @@ const ( type sspHandler struct { genericOperand - crdsToRemove []string + // Old SSP CRDs that need to be removed from the cluster. + // Removal of the CRDs also leads to removal of its CRs. + crdsToRemove []schema.GroupKind + + // Old SSP CRs that need to be removed from the list of related objects. + // This list is maintained separately from the list of CRDs to remove, + // so we can retry to delete them upon HCO status update failures, even + // when the CRD itself has been successfully removed. + relatedObjectsToRemove []schema.GroupKind } func newSspHandler(Client client.Client, Scheme *runtime.Scheme) *sspHandler { - return &sspHandler{ + handler := &sspHandler{ genericOperand: genericOperand{ Client: Client, Scheme: Scheme, @@ -47,22 +58,30 @@ func newSspHandler(Client client.Client, Scheme *runtime.Scheme) *sspHandler { hooks: &sspHooks{}, }, - crdsToRemove: []string{ + crdsToRemove: []schema.GroupKind{ // These are the 2nd generation SSP CRDs, // where the group name has been changed to "ssp.kubevirt.io" - "kubevirtcommontemplatesbundles.ssp.kubevirt.io", - "kubevirtmetricsaggregations.ssp.kubevirt.io", - "kubevirtnodelabellerbundles.ssp.kubevirt.io", - "kubevirttemplatevalidators.ssp.kubevirt.io", + {Group: "ssp.kubevirt.io", Kind: "KubevirtCommonTemplatesBundle"}, + {Group: "ssp.kubevirt.io", Kind: "KubevirtNodeLabellerBundle"}, + {Group: "ssp.kubevirt.io", Kind: "KubevirtTemplateValidator"}, + {Group: "ssp.kubevirt.io", Kind: "KubevirtMetricsAggregation"}, // These are the original SSP CRDs, with the group name "kubevirt.io". - // We attempt to remove these too, for upgrades from an older version. - "kubevirtcommontemplatesbundles.kubevirt.io", - "kubevirtmetricsaggregations.kubevirt.io", - "kubevirtnodelabellerbundles.kubevirt.io", - "kubevirttemplatevalidators.kubevirt.io", + // We attempt to remove these too, for upgrades from even older version. + {Group: "kubevirt.io", Kind: "KubevirtCommonTemplatesBundle"}, + {Group: "kubevirt.io", Kind: "KubevirtNodeLabellerBundle"}, + {Group: "kubevirt.io", Kind: "KubevirtTemplateValidator"}, + {Group: "kubevirt.io", Kind: "KubevirtMetricsAggregation"}, }, } + + // The list of related objects to remove is initialized empty; + // Once the corresponding CRD (and hence CR) is removed successfully, + // the CR can be removed from the list of related objects. + handler.relatedObjectsToRemove = make([]schema.GroupKind, 0, len(handler.crdsToRemove)) + + return handler + } func (handler *sspHandler) ensure(req *common.HcoRequest) *EnsureResult { @@ -70,8 +89,23 @@ func (handler *sspHandler) ensure(req *common.HcoRequest) *EnsureResult { // Attempt to remove old CRDs if len(handler.crdsToRemove) > 0 && (!req.UpgradeMode || res.UpgradeDone) { - unremovedCRDs := removeCRDs(handler.Client, req, handler.crdsToRemove) - handler.crdsToRemove = unremovedCRDs + removed, unremoved := removeCRDs(handler.Client, req, handler.crdsToRemove) + + // For CRDs that failed to remove, we'll retry in the next reconciliation loop. + handler.crdsToRemove = unremoved + + // For CRDs that were successfully removed, we can proceed to remove + // their corresponding entries from HCO.Status.RelatedObjects. + handler.relatedObjectsToRemove = append(handler.relatedObjectsToRemove, removed...) + } + + if len(handler.relatedObjectsToRemove) > 0 { + removed := removeRelatedObjects(req, handler.relatedObjectsToRemove) + + // CRDs that were removed from the related objects list are kept around, + // so that the next reconciliation loop can validate that they were actually removed, + // and not lost in some failing status update. + handler.relatedObjectsToRemove = removed } return res @@ -152,38 +186,41 @@ func NewSSP(hc *hcov1beta1.HyperConverged, opts ...string) *sspv1beta1.SSP { } } -// returns a slice of CRD names that weren't successfully removed -func removeCRDs(clt client.Client, req *common.HcoRequest, crdNames []string) []string { - unremovedCRDs := make([]string, 0, len(crdNames)) +// returns a slice of CRDs that were successfully removed or don't exist, and a slice of CRDs that failed to remove. +func removeCRDs(clt client.Client, req *common.HcoRequest, crds []schema.GroupKind) ([]schema.GroupKind, []schema.GroupKind) { + removed := make([]schema.GroupKind, 0, len(crds)) + unremoved := make([]schema.GroupKind, 0, len(crds)) // The deletion is performed concurrently for all CRDs. var mutex sync.Mutex var wg sync.WaitGroup - wg.Add(len(crdNames)) + wg.Add(len(crds)) - for _, crdName := range crdNames { - go func(crdName string) { - removed := removeCRD(clt, req, crdName) + for _, crd := range crds { + go func(crd schema.GroupKind) { + isRemoved := removeCRD(clt, req, crd) - // If removal failed for some reason, we'll retry in the next reconciliation loop. - if !removed { - mutex.Lock() - defer mutex.Unlock() + mutex.Lock() + defer mutex.Unlock() - unremovedCRDs = append(unremovedCRDs, crdName) + if isRemoved { + removed = append(removed, crd) + } else { + unremoved = append(unremoved, crd) } wg.Done() - }(crdName) + }(crd) } wg.Wait() - return unremovedCRDs + return removed, unremoved } // returns true if not found or if deletion succeeded, and false otherwise. -func removeCRD(clt client.Client, req *common.HcoRequest, crdName string) bool { +func removeCRD(clt client.Client, req *common.HcoRequest, crd schema.GroupKind) bool { + crdName := groupKindToCRDName(crd) found := &apiextensionsv1.CustomResourceDefinition{} key := client.ObjectKey{Namespace: hcoutil.UndefinedNamespace, Name: crdName} err := clt.Get(req.Ctx, key, found) @@ -204,3 +241,53 @@ func removeCRD(clt client.Client, req *common.HcoRequest, crdName string) bool { return true } + +// This function creates a CRD name with the form ., +// where is the lowercase plural form of the Kind. +// Note that this doesn't use any generic pluralization, so may not +// work for CRDs other than the old SSP CRDs we're trying to handle here. +func groupKindToCRDName(gk schema.GroupKind) string { + plural := strings.ToLower(gk.Kind) + "s" + return fmt.Sprintf("%s.%s", plural, gk.Group) +} + +// Removes CRs of the given CRDs from the list of related objects in HCO status. +// Returns a list of CRDs that were removed from the related objects list. +func removeRelatedObjects(req *common.HcoRequest, crds []schema.GroupKind) []schema.GroupKind { + // Collect CRD GKs into a set + crdSet := make(map[schema.GroupKind]struct{}) + for _, crd := range crds { + crdSet[crd] = struct{}{} + } + + objRefsToRemove := make([]corev1.ObjectReference, 0, len(crds)) + removedCRDs := make([]schema.GroupKind, 0, len(crds)) + + // Find related objects to be removed + for _, objRef := range req.Instance.Status.RelatedObjects { + objGK := objRef.GroupVersionKind().GroupKind() + if _, exist := crdSet[objGK]; exist { + objRefsToRemove = append(objRefsToRemove, objRef) + removedCRDs = append(removedCRDs, objGK) + } + } + + // Remove from related objects + if len(objRefsToRemove) > 0 { + req.StatusDirty = true + for _, objRef := range objRefsToRemove { + err := objectreferencesv1.RemoveObjectReference(&req.Instance.Status.RelatedObjects, objRef) + if err != nil { + // This shouldn't really happen, but... + req.Logger.Error(err, "Failed removing object reference from HCO.Status.RelatedObjects", + "ObjectReference.Name", objRef.Name, + "ObjectReference.Namespace", objRef.Namespace, + "ObjectReference.Kind", objRef.Kind, + "ObjectReference.APIVersion", objRef.APIVersion) + } + } + + } + + return removedCRDs +} diff --git a/pkg/controller/operands/ssp_test.go b/pkg/controller/operands/ssp_test.go index 69c6a8f72c..f9a70daa65 100644 --- a/pkg/controller/operands/ssp_test.go +++ b/pkg/controller/operands/ssp_test.go @@ -3,15 +3,17 @@ package operands import ( "context" "fmt" + "os" + hcov1beta1 "github.com/kubevirt/hyperconverged-cluster-operator/pkg/apis/hco/v1beta1" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/common" "github.com/kubevirt/hyperconverged-cluster-operator/pkg/controller/commonTestUtils" hcoutil "github.com/kubevirt/hyperconverged-cluster-operator/pkg/util" conditionsv1 "github.com/openshift/custom-resource-status/conditions/v1" + v1 "github.com/openshift/custom-resource-status/objectreferences/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" lifecycleapi "kubevirt.io/controller-lifecycle-operator-sdk/pkg/sdk/api" sspv1beta1 "kubevirt.io/ssp-operator/api/v1beta1" - "os" corev1 "k8s.io/api/core/v1" apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" @@ -466,6 +468,79 @@ var _ = Describe("SSP Operands", func() { Expect(foundCrds.Items).To(BeEmpty()) }) + It("should remove old related objects if upgrade is done", func() { + // Simulate no upgrade + req.SetUpgradeMode(false) + + // Initialize RelatedObjects with a bunch of objects + // including old SSP ones. + for _, objRef := range oldSSPRelatedObjects() { + v1.SetObjectReference(&hco.Status.RelatedObjects, objRef) + } + for _, objRef := range otherRelatedObjects() { + v1.SetObjectReference(&hco.Status.RelatedObjects, objRef) + } + + cl := commonTestUtils.InitClient(nil) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + + Expect(res.Created).To(BeTrue()) + Expect(res.Updated).To(BeFalse()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + Expect(hco.Status.RelatedObjects).To(HaveLen(len(otherRelatedObjects()))) + for _, objRef := range oldSSPRelatedObjects() { + Expect(hco.Status.RelatedObjects).ToNot(ContainElement(objRef)) + } + }) + + It("should retry removing old related objects when they fail to be removed from the status", func() { + // Simulate no upgrade + req.SetUpgradeMode(false) + + // Initialize RelatedObjects with a bunch of objects + // including old SSP ones. + for _, objRef := range oldSSPRelatedObjects() { + v1.SetObjectReference(&hco.Status.RelatedObjects, objRef) + } + for _, objRef := range otherRelatedObjects() { + v1.SetObjectReference(&hco.Status.RelatedObjects, objRef) + } + + cl := commonTestUtils.InitClient(nil) + handler := newSspHandler(cl, commonTestUtils.GetScheme()) + res := handler.ensure(req) + + Expect(res.Created).To(BeTrue()) + Expect(res.Updated).To(BeFalse()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // Now simulate "status update failure", + // i.e. related objects aren't removed. + for _, objRef := range oldSSPRelatedObjects() { + v1.SetObjectReference(&hco.Status.RelatedObjects, objRef) + } + + // Simulate another reconciliation cycle + res = handler.ensure(req) + + Expect(res.Created).To(BeFalse()) + Expect(res.Updated).To(BeFalse()) + Expect(res.Overwritten).To(BeFalse()) + Expect(res.UpgradeDone).To(BeFalse()) + Expect(res.Err).To(BeNil()) + + // len+1 because the (new) SSP object is now added to RelatedObjects + Expect(hco.Status.RelatedObjects).To(HaveLen(len(otherRelatedObjects()) + 1)) + for _, objRef := range oldSSPRelatedObjects() { + Expect(hco.Status.RelatedObjects).ToNot(ContainElement(objRef)) + } + }) }) }) }) @@ -504,3 +579,49 @@ func oldSSPCrdsAsObjects() []runtime.Object { return objs } + +func oldSSPRelatedObjects() []corev1.ObjectReference { + return []corev1.ObjectReference{ + { + APIVersion: "ssp.kubevirt.io/v1", + Kind: "KubevirtCommonTemplatesBundle", + Name: "common-templates-kubevirt-hyperconverged", + Namespace: "openshift", + }, + { + APIVersion: "ssp.kubevirt.io/v1", + Kind: "KubevirtNodeLabellerBundle", + Name: "node-labeller-kubevirt-hyperconverged", + Namespace: "kubevirt-hyperconverged", + }, + { + APIVersion: "ssp.kubevirt.io/v1", + Kind: "KubevirtTemplateValidator", + Name: "template-validator-kubevirt-hyperconverged", + Namespace: "kubevirt-hyperconverged", + }, + { + APIVersion: "ssp.kubevirt.io/v1", + Kind: "KubevirtMetricsAggregation", + Name: "metrics-aggregation-kubevirt-hyperconverged", + Namespace: "kubevirt-hyperconverged", + }, + } +} + +func otherRelatedObjects() []corev1.ObjectReference { + return []corev1.ObjectReference{ + { + APIVersion: "kubevirt.io/v1alpha3", + Kind: "Kubevirt", + Name: "kubevirt-kubevirt-hyperconverged", + Namespace: "openshift", + }, + { + APIVersion: "cdi.kubevirt.io/v1beta1", + Kind: "CDI", + Name: "cdi-kubevirt-hyperconverged", + Namespace: "kubevirt-hyperconverged", + }, + } +} From 0f831f054413a11379785233e496908d591b5654 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Sun, 20 Dec 2020 13:07:43 +0200 Subject: [PATCH 10/19] Merge SSP node placement tests Signed-off-by: Zvi Cahana --- pkg/controller/commonTestUtils/testUtils.go | 32 ++++ pkg/controller/operands/ssp_test.go | 176 ++++---------------- 2 files changed, 67 insertions(+), 141 deletions(-) diff --git a/pkg/controller/commonTestUtils/testUtils.go b/pkg/controller/commonTestUtils/testUtils.go index 5f5e900ca1..e65629b188 100644 --- a/pkg/controller/commonTestUtils/testUtils.go +++ b/pkg/controller/commonTestUtils/testUtils.go @@ -99,6 +99,38 @@ func NewHyperConvergedConfig() *sdkapi.NodePlacement { } } +func NewHyperConvergedConfig2() *sdkapi.NodePlacement { + seconds3, seconds4 := int64(3), int64(4) + return &sdkapi.NodePlacement{ + NodeSelector: map[string]string{ + "key3": "value3", + "key4": "value4", + }, + Affinity: &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + {Key: "key3", Operator: "operator3", Values: []string{"value31, value32"}}, + {Key: "key4", Operator: "operator4", Values: []string{"value41, value42"}}, + }, + MatchFields: []corev1.NodeSelectorRequirement{ + {Key: "key3", Operator: "operator3", Values: []string{"value31, value32"}}, + {Key: "key4", Operator: "operator4", Values: []string{"value41, value42"}}, + }, + }, + }, + }, + }, + }, + Tolerations: []corev1.Toleration{ + {Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3}, + {Key: "key4", Operator: "operator4", Value: "value4", Effect: "effect4", TolerationSeconds: &seconds4}, + }, + } +} + var testScheme *runtime.Scheme func GetScheme() *runtime.Scheme { diff --git a/pkg/controller/operands/ssp_test.go b/pkg/controller/operands/ssp_test.go index f9a70daa65..8f81ad2072 100644 --- a/pkg/controller/operands/ssp_test.go +++ b/pkg/controller/operands/ssp_test.go @@ -110,12 +110,13 @@ var _ = Describe("SSP Operands", func() { Expect(foundResource.Spec).To(Equal(expectedResource.Spec)) }) - Context("NodeLabeller node placement", func() { + Context("Node placement", func() { It("should add node placement if missing", func() { existingResource := NewSSP(hco, commonTestUtils.Namespace) hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig2() cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := newSspHandler(cl, commonTestUtils.GetScheme()) @@ -134,8 +135,9 @@ var _ = Describe("SSP Operands", func() { ).To(BeNil()) Expect(existingResource.Spec.NodeLabeller.Placement).To(BeZero()) + Expect(existingResource.Spec.TemplateValidator.Placement).To(BeZero()) Expect(*foundResource.Spec.NodeLabeller.Placement).To(Equal(*hco.Spec.Workloads.NodePlacement)) - Expect(foundResource.Spec.TemplateValidator.Placement).To(BeZero()) + Expect(*foundResource.Spec.TemplateValidator.Placement).To(Equal(*hco.Spec.Infra.NodePlacement)) Expect(req.Conditions).To(BeEmpty()) }) @@ -143,6 +145,7 @@ var _ = Describe("SSP Operands", func() { hcoNodePlacement := commonTestUtils.NewHco() hcoNodePlacement.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig2() existingResource := NewSSP(hcoNodePlacement, commonTestUtils.Namespace) cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) @@ -162,23 +165,31 @@ var _ = Describe("SSP Operands", func() { ).To(BeNil()) Expect(existingResource.Spec.NodeLabeller.Placement).ToNot(BeZero()) + Expect(existingResource.Spec.TemplateValidator.Placement).ToNot(BeZero()) Expect(foundResource.Spec.NodeLabeller.Placement).To(BeZero()) - + Expect(foundResource.Spec.TemplateValidator.Placement).To(BeZero()) Expect(req.Conditions).To(BeEmpty()) }) It("should modify node placement according to HCO CR", func() { hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() + hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig2() existingResource := NewSSP(hco, commonTestUtils.Namespace) // now, modify HCO's node placement - seconds3 := int64(3) + seconds12 := int64(12) hco.Spec.Workloads.NodePlacement.Tolerations = append(hco.Spec.Workloads.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + Key: "key12", Operator: "operator12", Value: "value12", Effect: "effect12", TolerationSeconds: &seconds12, }) hco.Spec.Workloads.NodePlacement.NodeSelector["key1"] = "something else" + seconds34 := int64(34) + hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ + Key: "key34", Operator: "operator34", Value: "value34", Effect: "effect34", TolerationSeconds: &seconds34, + }) + hco.Spec.Infra.NodePlacement.NodeSelector["key3"] = "something entirely else" + cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := newSspHandler(cl, commonTestUtils.GetScheme()) res := handler.ensure(req) @@ -198,162 +209,41 @@ var _ = Describe("SSP Operands", func() { Expect(existingResource.Spec.NodeLabeller.Placement.Affinity.NodeAffinity).ToNot(BeZero()) Expect(existingResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(2)) Expect(existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("value1")) + Expect(existingResource.Spec.TemplateValidator.Placement.Affinity.NodeAffinity).ToNot(BeZero()) + Expect(existingResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(2)) + Expect(existingResource.Spec.TemplateValidator.Placement.NodeSelector["key3"]).Should(Equal("value3")) Expect(foundResource.Spec.NodeLabeller.Placement.Affinity.NodeAffinity).ToNot(BeNil()) Expect(foundResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(3)) Expect(foundResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("something else")) + Expect(foundResource.Spec.TemplateValidator.Placement.Affinity.NodeAffinity).ToNot(BeNil()) + Expect(foundResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(3)) + Expect(foundResource.Spec.TemplateValidator.Placement.NodeSelector["key3"]).Should(Equal("something entirely else")) Expect(req.Conditions).To(BeEmpty()) }) It("should overwrite node placement if directly set on SSP CR", func() { hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig2()} existingResource := NewSSP(hco, commonTestUtils.Namespace) // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR req.HCOTriggered = false // now, modify NodeLabeller node placement - seconds3 := int64(3) + seconds12 := int64(12) existingResource.Spec.NodeLabeller.Placement.Tolerations = append(hco.Spec.Workloads.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + Key: "key12", Operator: "operator12", Value: "value12", Effect: "effect12", TolerationSeconds: &seconds12, }) existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"] = "BADvalue1" - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newSspHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeTrue()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1beta1.SSP{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(3)) - Expect(existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("BADvalue1")) - - Expect(foundResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(2)) - Expect(foundResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("value1")) - - Expect(req.Conditions).To(BeEmpty()) - }) - }) - - Context("TemplateValidator node placement", func() { - - It("should add node placement if missing ", func() { - existingResource := NewSSP(hco, commonTestUtils.Namespace) - - hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newSspHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1beta1.SSP{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.TemplateValidator.Placement).To(BeZero()) - Expect(*foundResource.Spec.TemplateValidator.Placement).To(Equal(*hco.Spec.Infra.NodePlacement)) - Expect(foundResource.Spec.NodeLabeller.Placement).To(BeZero()) - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should remove node placement if missing in HCO CR", func() { - - hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - existingResource := NewSSP(hcoNodePlacement, commonTestUtils.Namespace) - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newSspHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1beta1.SSP{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.TemplateValidator.Placement).ToNot(BeZero()) - Expect(foundResource.Spec.TemplateValidator.Placement).To(BeZero()) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should modify node placement according to HCO CR", func() { - - hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - existingResource := NewSSP(hco, commonTestUtils.Namespace) - - // now, modify HCO's node placement - seconds3 := int64(3) - hco.Spec.Infra.NodePlacement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, - }) - hco.Spec.Infra.NodePlacement.NodeSelector["key1"] = "something else" - - cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) - handler := newSspHandler(cl, commonTestUtils.GetScheme()) - res := handler.ensure(req) - Expect(res.Created).To(BeFalse()) - Expect(res.Updated).To(BeTrue()) - Expect(res.Overwritten).To(BeFalse()) - Expect(res.UpgradeDone).To(BeFalse()) - Expect(res.Err).To(BeNil()) - - foundResource := &sspv1beta1.SSP{} - Expect( - cl.Get(context.TODO(), - types.NamespacedName{Name: existingResource.Name, Namespace: existingResource.Namespace}, - foundResource), - ).To(BeNil()) - - Expect(existingResource.Spec.TemplateValidator.Placement.Affinity.NodeAffinity).ToNot(BeZero()) - Expect(existingResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(2)) - Expect(existingResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("value1")) - - Expect(foundResource.Spec.TemplateValidator.Placement.Affinity.NodeAffinity).ToNot(BeNil()) - Expect(foundResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(3)) - Expect(foundResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("something else")) - - Expect(req.Conditions).To(BeEmpty()) - }) - - It("should overwrite node placement if directly set on SSP CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - existingResource := NewSSP(hco, commonTestUtils.Namespace) - - // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR - req.HCOTriggered = false - - // now, modify NodeLabeller node placement - seconds3 := int64(3) + // and modify TemplateValidator node placement + seconds34 := int64(34) existingResource.Spec.TemplateValidator.Placement.Tolerations = append(hco.Spec.Infra.NodePlacement.Tolerations, corev1.Toleration{ - Key: "key3", Operator: "operator3", Value: "value3", Effect: "effect3", TolerationSeconds: &seconds3, + Key: "key34", Operator: "operator34", Value: "value34", Effect: "effect34", TolerationSeconds: &seconds34, }) - existingResource.Spec.TemplateValidator.Placement.NodeSelector["key1"] = "BADvalue1" + existingResource.Spec.TemplateValidator.Placement.NodeSelector["key3"] = "BADvalue3" cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := newSspHandler(cl, commonTestUtils.GetScheme()) @@ -370,11 +260,15 @@ var _ = Describe("SSP Operands", func() { foundResource), ).To(BeNil()) + Expect(existingResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(3)) + Expect(existingResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("BADvalue1")) Expect(existingResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(3)) - Expect(existingResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("BADvalue1")) + Expect(existingResource.Spec.TemplateValidator.Placement.NodeSelector["key3"]).Should(Equal("BADvalue3")) + Expect(foundResource.Spec.NodeLabeller.Placement.Tolerations).To(HaveLen(2)) + Expect(foundResource.Spec.NodeLabeller.Placement.NodeSelector["key1"]).Should(Equal("value1")) Expect(foundResource.Spec.TemplateValidator.Placement.Tolerations).To(HaveLen(2)) - Expect(foundResource.Spec.TemplateValidator.Placement.NodeSelector["key1"]).Should(Equal("value1")) + Expect(foundResource.Spec.TemplateValidator.Placement.NodeSelector["key3"]).Should(Equal("value3")) Expect(req.Conditions).To(BeEmpty()) }) From ff089d31d6c9f5cba3c62d2a6ffa76a9f743630f Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Sun, 20 Dec 2020 15:33:43 +0200 Subject: [PATCH 11/19] Revert "Temporary workaround in SSP CSV: remove certs volume and set webhook port" This reverts commit c73c124349c89a8213c987de0423f58b9ec0f611. Signed-off-by: Zvi Cahana --- ...onverged-operator.v1.3.0.clusterserviceversion.yaml | 10 +++++++++- ...onverged-operator.v1.3.0.clusterserviceversion.yaml | 10 +++++++++- deploy/operator.yaml | 9 +++++++++ hack/build-manifests.sh | 9 --------- 4 files changed, 27 insertions(+), 11 deletions(-) diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index dedc2ebe25..2ef98dd1ea 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -2088,8 +2088,17 @@ spec: name: webhook-server protocol: TCP resources: {} + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2568,7 +2577,6 @@ spec: webhookPath: /mutate-ns-hco-kubevirt-io - admissionReviewVersions: - v1beta1 - containerPort: 9443 deploymentName: ssp-operator failurePolicy: Fail generateName: vssp.kb.io diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index aa55ba3df0..aa72782cc3 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -2088,8 +2088,17 @@ spec: name: webhook-server protocol: TCP resources: {} + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2568,7 +2577,6 @@ spec: webhookPath: /mutate-ns-hco-kubevirt-io - admissionReviewVersions: - v1beta1 - containerPort: 9443 deploymentName: ssp-operator failurePolicy: Fail generateName: vssp.kb.io diff --git a/deploy/operator.yaml b/deploy/operator.yaml index b98a26265a..20d0689d6b 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -339,8 +339,17 @@ spec: name: webhook-server protocol: TCP resources: {} + volumeMounts: + - mountPath: /tmp/k8s-webhook-server/serving-certs + name: cert + readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 + volumes: + - name: cert + secret: + defaultMode: 420 + secretName: ssp-webhook-server-cert --- apiVersion: apps/v1 kind: Deployment diff --git a/hack/build-manifests.sh b/hack/build-manifests.sh index 7d458d3281..f3afe77af3 100755 --- a/hack/build-manifests.sh +++ b/hack/build-manifests.sh @@ -156,15 +156,6 @@ function create_ssp_csv() { " gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs} - - # Temporary CSV workarounds: - # 1) remove the `certs` volume from ssp-operator pod - # 2) set the containerPort to 9443 for the ssp webhook - sspCsv="${TEMPDIR}/${operatorName}.${CSV_EXT}" - sed -i '/volumes:/,/secretName: ssp-webhook-server-cert/d' ${sspCsv} - sed -i '/volumeMounts:/,/readOnly: true/d' ${sspCsv} - sed -i '/webhookPath: \/validate-ssp-kubevirt-io-v1beta1-ssp/a \ \ \ \ containerPort: 9443' ${sspCsv} - echo "${operatorName}" } From d97ceca29e2f60c7ff07e611926a6d40292dca2a Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Mon, 21 Dec 2020 14:44:01 +0200 Subject: [PATCH 12/19] Bump SSP to v0.1.0-rc.2 Signed-off-by: Zvi Cahana --- deploy/images.csv | 2 +- deploy/images.env | 4 ++-- ...-operator.v1.3.0.clusterserviceversion.yaml | 18 +++++------------- ...-operator.v1.3.0.clusterserviceversion.yaml | 18 +++++------------- deploy/operator.yaml | 15 +++------------ hack/config | 2 +- 6 files changed, 17 insertions(+), 42 deletions(-) diff --git a/deploy/images.csv b/deploy/images.csv index d75eac6628..5b39f4fc67 100644 --- a/deploy/images.csv +++ b/deploy/images.csv @@ -5,7 +5,7 @@ KUBEVIRT_CONTROLLER_IMAGE,docker.io/kubevirt/virt-controller,KUBEVIRT_VERSION,f1 KUBEVIRT_LAUNCHER_IMAGE,docker.io/kubevirt/virt-launcher,KUBEVIRT_VERSION,1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12 KUBEVIRT_HANDLER_IMAGE,docker.io/kubevirt/virt-handler,KUBEVIRT_VERSION,10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236 CNA_OPERATOR_IMAGE,quay.io/kubevirt/cluster-network-addons-operator,NETWORK_ADDONS_VERSION,38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d -SSP_OPERATOR_IMAGE,quay.io/kubevirt/ssp-operator,SSP_VERSION,efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 +SSP_OPERATOR_IMAGE,quay.io/kubevirt/ssp-operator,SSP_VERSION,1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa CDI_OPERATOR_IMAGE,docker.io/kubevirt/cdi-operator,CDI_VERSION,8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711 CDI_CONTROLLER_IMAGE,docker.io/kubevirt/cdi-controller,CDI_VERSION,bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc CDI_APISERVER_IMAGE,docker.io/kubevirt/cdi-apiserver,CDI_VERSION,02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813 diff --git a/deploy/images.env b/deploy/images.env index 2555484be1..e7dd3eebaa 100755 --- a/deploy/images.env +++ b/deploy/images.env @@ -4,7 +4,7 @@ KUBEVIRT_CONTROLLER_IMAGE=docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89 KUBEVIRT_LAUNCHER_IMAGE=docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12 KUBEVIRT_HANDLER_IMAGE=docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236 CNA_OPERATOR_IMAGE=quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d -SSP_OPERATOR_IMAGE=quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 +SSP_OPERATOR_IMAGE=quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa CDI_OPERATOR_IMAGE=docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711 CDI_CONTROLLER_IMAGE=docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc CDI_APISERVER_IMAGE=docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813 @@ -22,4 +22,4 @@ HCO_WEBHOOK_IMAGE=quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594 NMO_IMAGE=quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a CONVERSION_IMAGE=quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815 VMWARE_IMAGE=quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 -DIGEST_LIST=docker.io/kubevirt/virt-operator@sha256:4c33eaab42033c84310b62e146274f95a55699235c59846c8cec3009a91a723f,docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21,docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5,docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12,docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236,quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d,quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941,docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711,docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc,docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813,docker.io/kubevirt/cdi-cloner@sha256:c8946ef116d4d2fccf08b895faf07d9bb85f76d315e4380416261db9970306f1,docker.io/kubevirt/cdi-importer@sha256:d16dd224def1713a51278f6f164a04c7e9b38364b794f14d18c2d8d14ab04eb3,docker.io/kubevirt/cdi-uploadproxy@sha256:9e5ae41e3da07c30ac9a0326c89f2d64021abfc5d74ee6c4b750d6915f02eeb5,docker.io/kubevirt/cdi-uploadserver@sha256:3915ba0501fa0e3ee9ae178d6617d0c1ac5f34a83243f723421c42de99e705e5,quay.io/kubevirt/hostpath-provisioner-operator@sha256:a51e9b075a60600244757386f5894b314170543edb1d7f4738f4860a19602072,quay.io/kubevirt/hostpath-provisioner@sha256:3838d8e713d2e85a807a9c302501b25c248ba3f3c1602fe50480e6510de43e11,quay.io/kubevirt/vm-import-operator@sha256:74fc74dab0671ef1098e69872e47bcb6f85a40b4b18a1e23fd6d3cfc36dfee32,quay.io/kubevirt/vm-import-controller@sha256:481f4a493a66d1310734ac135e8dbaa5dc01c9d93f6e9ecc9326b81c1c08dbfe,quay.io/kubevirt/vm-import-virtv2v@sha256:97caccb965d771afefd901c71381b6c1126e4177b477d47f2ca5ca57c5b06593,quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da,quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594403627756bae3b4d6579967819fc544ced3c371d971a090a99fe0a9e9,quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a,quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815,quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 +DIGEST_LIST=docker.io/kubevirt/virt-operator@sha256:4c33eaab42033c84310b62e146274f95a55699235c59846c8cec3009a91a723f,docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21,docker.io/kubevirt/virt-controller@sha256:f1fe5d43cd89b2f7d18cb23a14753b260a85f7e862b13529b640d7e8c36e81d5,docker.io/kubevirt/virt-launcher@sha256:1bb248aeb9d0bc66667f6133f8230cc80944b562c3d267cc00a5382954140c12,docker.io/kubevirt/virt-handler@sha256:10e9294253c2037d29a9a33dfeb3b096dbb7ceb5ed2f0fc1662d66535108d236,quay.io/kubevirt/cluster-network-addons-operator@sha256:38dadf3624c30092b67aa74487b3f744c835f9a17f33b4408f1f1bab3a79004d,quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa,docker.io/kubevirt/cdi-operator@sha256:8e9a4d4a819c11e53b9ebc316590aeb0515ffd92a92f0281d8c2d90fd819e711,docker.io/kubevirt/cdi-controller@sha256:bbd22f481ec1dd1eaf63af9888d107c3f67507e578600da8a5429540c78527bc,docker.io/kubevirt/cdi-apiserver@sha256:02d0363e32cd28145a9802cad2c0adc077e67715d55a948c0bc92ae2adf08813,docker.io/kubevirt/cdi-cloner@sha256:c8946ef116d4d2fccf08b895faf07d9bb85f76d315e4380416261db9970306f1,docker.io/kubevirt/cdi-importer@sha256:d16dd224def1713a51278f6f164a04c7e9b38364b794f14d18c2d8d14ab04eb3,docker.io/kubevirt/cdi-uploadproxy@sha256:9e5ae41e3da07c30ac9a0326c89f2d64021abfc5d74ee6c4b750d6915f02eeb5,docker.io/kubevirt/cdi-uploadserver@sha256:3915ba0501fa0e3ee9ae178d6617d0c1ac5f34a83243f723421c42de99e705e5,quay.io/kubevirt/hostpath-provisioner-operator@sha256:a51e9b075a60600244757386f5894b314170543edb1d7f4738f4860a19602072,quay.io/kubevirt/hostpath-provisioner@sha256:3838d8e713d2e85a807a9c302501b25c248ba3f3c1602fe50480e6510de43e11,quay.io/kubevirt/vm-import-operator@sha256:74fc74dab0671ef1098e69872e47bcb6f85a40b4b18a1e23fd6d3cfc36dfee32,quay.io/kubevirt/vm-import-controller@sha256:481f4a493a66d1310734ac135e8dbaa5dc01c9d93f6e9ecc9326b81c1c08dbfe,quay.io/kubevirt/vm-import-virtv2v@sha256:97caccb965d771afefd901c71381b6c1126e4177b477d47f2ca5ca57c5b06593,quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da,quay.io/kubevirt/hyperconverged-cluster-webhook@sha256:e352594403627756bae3b4d6579967819fc544ced3c371d971a090a99fe0a9e9,quay.io/kubevirt/node-maintenance-operator@sha256:71bb8de714dc0de0616050d66405ccb58841930fc1562a61399e1b964a0b678a,quay.io/kubevirt/kubevirt-v2v-conversion@sha256:c620233c71b805004c2cd38927c421b69d99b27cb40af521967e655882b2f815,quay.io/kubevirt/kubevirt-vmware@sha256:ae5ccd98a49ab9e154ce482d2fa73f044b00211f273210a9cd371b40746d3c92 \ No newline at end of file diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index 2ef98dd1ea..e5f5492a79 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -1831,7 +1831,7 @@ spec: - name: NETWORK_ADDONS_VERSION value: v0.44.0 - name: SSP_VERSION - value: v0.1.0-rc.1 + value: v0.1.0-rc.2 - name: NMO_VERSION value: v0.7.0 - name: HPPO_VERSION @@ -2080,25 +2080,16 @@ spec: - name: NODE_LABELLER_IMAGE - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION - value: v0.1.0-rc.1 - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + value: v0.1.0-rc.2 + image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: - containerPort: 9443 name: webhook-server protocol: TCP resources: {} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2504,7 +2495,7 @@ spec: name: ovs-cni-marker - image: quay.io/kubevirt/ovs-cni-plugin@sha256:d43d34ed4b1bd0b107c2049d21e33f9f870c36e5bf6dc1d80ab567271735c8da name: ovs-cni-plugin - - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + - image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: ssp-operator - image: docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21 name: virt-api @@ -2577,6 +2568,7 @@ spec: webhookPath: /mutate-ns-hco-kubevirt-io - admissionReviewVersions: - v1beta1 + containerPort: 9443 deploymentName: ssp-operator failurePolicy: Fail generateName: vssp.kb.io diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index aa72782cc3..a40936b6f4 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -1831,7 +1831,7 @@ spec: - name: NETWORK_ADDONS_VERSION value: v0.44.0 - name: SSP_VERSION - value: v0.1.0-rc.1 + value: v0.1.0-rc.2 - name: NMO_VERSION value: v0.7.0 - name: HPPO_VERSION @@ -2080,25 +2080,16 @@ spec: - name: NODE_LABELLER_IMAGE - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION - value: v0.1.0-rc.1 - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + value: v0.1.0-rc.2 + image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: - containerPort: 9443 name: webhook-server protocol: TCP resources: {} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: ssp-webhook-server-cert - name: cdi-operator spec: replicas: 1 @@ -2504,7 +2495,7 @@ spec: name: ovs-cni-marker - image: quay.io/kubevirt/ovs-cni-plugin@sha256:d43d34ed4b1bd0b107c2049d21e33f9f870c36e5bf6dc1d80ab567271735c8da name: ovs-cni-plugin - - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + - image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: ssp-operator - image: docker.io/kubevirt/virt-api@sha256:4d778f63d2f5ecb61d4f6fe8b5ead010836d46d75349f3c3634f5862614e4a21 name: virt-api @@ -2577,6 +2568,7 @@ spec: webhookPath: /mutate-ns-hco-kubevirt-io - admissionReviewVersions: - v1beta1 + containerPort: 9443 deploymentName: ssp-operator failurePolicy: Fail generateName: vssp.kb.io diff --git a/deploy/operator.yaml b/deploy/operator.yaml index 20d0689d6b..bf61f790c4 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -56,7 +56,7 @@ spec: - name: NETWORK_ADDONS_VERSION value: v0.44.0 - name: SSP_VERSION - value: v0.1.0-rc.1 + value: v0.1.0-rc.2 - name: NMO_VERSION value: v0.7.0 - name: HPPO_VERSION @@ -331,25 +331,16 @@ spec: - name: NODE_LABELLER_IMAGE - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION - value: v0.1.0-rc.1 - image: quay.io/kubevirt/ssp-operator@sha256:efd9a9cfd67deb53a66464e840e020782ef65814ad37b395804e02ca00498941 + value: v0.1.0-rc.2 + image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: - containerPort: 9443 name: webhook-server protocol: TCP resources: {} - volumeMounts: - - mountPath: /tmp/k8s-webhook-server/serving-certs - name: cert - readOnly: true serviceAccountName: ssp-operator terminationGracePeriodSeconds: 10 - volumes: - - name: cert - secret: - defaultMode: 420 - secretName: ssp-webhook-server-cert --- apiVersion: apps/v1 kind: Deployment diff --git a/hack/config b/hack/config index 13e744a4c6..6160d64a52 100644 --- a/hack/config +++ b/hack/config @@ -3,7 +3,7 @@ KUBEVIRT_VERSION="v0.36.0" CDI_VERSION="v1.28.0" NETWORK_ADDONS_VERSION="v0.44.0" -SSP_VERSION="v0.1.0-rc.1" +SSP_VERSION="v0.1.0-rc.2" NMO_VERSION="v0.7.0" HPPO_VERSION="v0.7.0" HPP_VERSION="v0.7.0" From bcf31730abb45b206eedd00fb826edc1ba58be14 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Tue, 29 Dec 2020 13:27:40 +0200 Subject: [PATCH 13/19] add an upgrade test script to validate old SSP removal Signed-off-by: Zvi Cahana --- hack/check_old_ssp_removed.sh | 83 +++++++++++++++++++++++++++++++++++ hack/upgrade-test.sh | 7 ++- 2 files changed, 88 insertions(+), 2 deletions(-) create mode 100755 hack/check_old_ssp_removed.sh diff --git a/hack/check_old_ssp_removed.sh b/hack/check_old_ssp_removed.sh new file mode 100755 index 0000000000..78fea71b61 --- /dev/null +++ b/hack/check_old_ssp_removed.sh @@ -0,0 +1,83 @@ +#!/bin/bash -e +# +# This file is part of the KubeVirt project +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Copyright 2019 Red Hat, Inc. +# +# Usage: +# export KUBEVIRT_PROVIDER=okd-4.1 +# make cluster-up +# make upgrade-test +# +# Start deploying the HCO cluster using the latest images shipped +# in quay.io with latest tag: +# - quay.io/kubevirt/hyperconverged-cluster-operator:latest +# - quay.io/kubevirt/hco-container-registry:latest +# +# A new bundle, named 100.0.0, is then created with the content of +# the open PR (this can include new dependent images, new CRDs...). +# A new hco-operator image is created based off of the code in the +# current checkout. +# +# Both the hco-operator image and new registry image is pushed +# to the local registry. +# +# The subscription is checked to verify that it progresses +# to the new version. +# +# The hyperconverged-cluster deployment's image is also checked +# to verify that it is updated to the new operator image from +# the local registry. + +# This script checks that old SSP operator resources are removed from the cluster + +OLD_SSP_CRDS=( + "kubevirttemplatevalidators.ssp.kubevirt.io" + "kubevirtcommontemplatesbundles.ssp.kubevirt.io" + "kubevirtnodelabellerbundles.ssp.kubevirt.io" + "kubevirtmetricsaggregations.ssp.kubevirt.io" +) + +OLD_SSP_KINDS=( + "KubevirtTemplateValidator" + "KubevirtCommonTemplatesBundle" + "KubevirtNodeLabellerBundle" + "KubevirtMetricsAggregation" +) + +echo "Check that all old SSP CRDs were removed" + +for CRD in "${OLD_SSP_CRDS[@]}"; do + echo "Check that $CRD CRD was removed" + CRD_RESPONSE=$( $CMD get customresourcedefinition $CRD 2>&1 || true) + if ! grep -q "NotFound" <(echo "$CRD_RESPONSE"); then + echo "expected $CRD CRD to be removed from the cluster" + exit 1 + fi +done + +echo "Check that all old SSP CRs were removed from HCO's related objects" + +RELATED_OBJECTS=$( $CMD get hco $HCO_RESOURCE_NAME -n $HCO_NAMESPACE -o jsonpath="{.status.relatedObjecs}") + +for CR in "${OLD_SSP_KINDS[@]}"; do + echo "Check that $CR CR was removed from HCO's related objects" + if grep -q $CR <(echo "$RELATED_OBJECTS"); then + echo "expected $CR CR to be removed from HCO related objects, but found the following entry:" + echo $RELATED_OBJECTS | jq -r ".[] | select(.kind==\"$CR\")" + exit 1 + fi +done + diff --git a/hack/upgrade-test.sh b/hack/upgrade-test.sh index a34f982ab6..35a8dbba71 100755 --- a/hack/upgrade-test.sh +++ b/hack/upgrade-test.sh @@ -41,7 +41,7 @@ # to verify that it is updated to the new operator image from # the local registry. -MAX_STEPS=14 +MAX_STEPS=15 CUR_STEP=1 RELEASE_DELTA="${RELEASE_DELTA:-1}" HCO_DEPLOYMENT_NAME=hco-operator @@ -185,7 +185,7 @@ ${CMD} wait deployment ${HCO_DEPLOYMENT_NAME} ${HCO_WH_DEPLOYMENT_NAME} --for co # Creating a CR immediately after HCO pod started can # cause a connection error "validate-hco.kubevirt.io" webhook. -# Give it a bit of time to cirrectly start the webhook. +# Give it a bit of time to correctly start the webhook. sleep 30 CSV=$( ${CMD} get csv -o name -n ${HCO_NAMESPACE}) HCO_API_VERSION=$( ${CMD} get -n ${HCO_NAMESPACE} "${CSV}" -o jsonpath="{ .spec.customresourcedefinitions.owned[?(@.kind=='HyperConverged')].version }") @@ -267,6 +267,9 @@ for hco_pod in $( ${CMD} get pods -n ${HCO_NAMESPACE} -l "name=hyperconverged-cl fi done +Msg "Ensure that old SSP operator resources are removed from the cluster" +./hack/retry 5 30 "CMD=${CMD} HCO_RESOURCE_NAME=${HCO_RESOURCE_NAME} HCO_NAMESPACE=${HCO_NAMESPACE} ./hack/check_old_ssp_removed.sh" + [[ -n ${found_new_running_hco_pod} ]] echo "----- Images after upgrade" From 0318009bdc308cc72947e9fee2877ac42327a8d6 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Mon, 4 Jan 2021 20:08:49 +0200 Subject: [PATCH 14/19] NewHyperConvergedConfig() ==> NewNodePlacement() Signed-off-by: Zvi Cahana --- pkg/controller/commonTestUtils/testUtils.go | 4 ++-- .../hyperconverged_controller_test.go | 8 ++++---- pkg/controller/operands/cdi_test.go | 16 ++++++++-------- pkg/controller/operands/kubevirt_test.go | 16 ++++++++-------- pkg/controller/operands/networkAddons_test.go | 16 ++++++++-------- pkg/controller/operands/ssp_test.go | 16 ++++++++-------- pkg/controller/operands/vmImport_test.go | 16 ++++++++-------- 7 files changed, 46 insertions(+), 46 deletions(-) diff --git a/pkg/controller/commonTestUtils/testUtils.go b/pkg/controller/commonTestUtils/testUtils.go index e65629b188..732441763d 100644 --- a/pkg/controller/commonTestUtils/testUtils.go +++ b/pkg/controller/commonTestUtils/testUtils.go @@ -67,7 +67,7 @@ func NewReq(inst *hcov1beta1.HyperConverged) *common.HcoRequest { } } -func NewHyperConvergedConfig() *sdkapi.NodePlacement { +func NewNodePlacement() *sdkapi.NodePlacement { seconds1, seconds2 := int64(1), int64(2) return &sdkapi.NodePlacement{ NodeSelector: map[string]string{ @@ -99,7 +99,7 @@ func NewHyperConvergedConfig() *sdkapi.NodePlacement { } } -func NewHyperConvergedConfig2() *sdkapi.NodePlacement { +func NewOtherNodePlacement() *sdkapi.NodePlacement { seconds3, seconds4 := int64(3), int64(4) return &sdkapi.NodePlacement{ NodeSelector: map[string]string{ diff --git a/pkg/controller/hyperconverged/hyperconverged_controller_test.go b/pkg/controller/hyperconverged/hyperconverged_controller_test.go index 45551d4c49..c7746fb42a 100644 --- a/pkg/controller/hyperconverged/hyperconverged_controller_test.go +++ b/pkg/controller/hyperconverged/hyperconverged_controller_test.go @@ -366,8 +366,8 @@ var _ = Describe("HyperconvergedController", func() { It("should increment counter when out-of-band change overwritten", func() { hco := commonTestUtils.NewHco() - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := operands.NewKubeVirt(hco, namespace) // now, modify KV's node placement @@ -424,8 +424,8 @@ var _ = Describe("HyperconvergedController", func() { It("should not increment counter when CR was changed by HCO", func() { hco := commonTestUtils.NewHco() - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := operands.NewKubeVirt(hco, namespace) // now, modify KV's node placement diff --git a/pkg/controller/operands/cdi_test.go b/pkg/controller/operands/cdi_test.go index aadf7000ff..19cc63cad5 100644 --- a/pkg/controller/operands/cdi_test.go +++ b/pkg/controller/operands/cdi_test.go @@ -112,8 +112,8 @@ var _ = Describe("CDI Operand", func() { It("should add node placement if missing in CDI", func() { existingResource := NewCDI(hco) - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := (*genericOperand)(newCdiHandler(cl, commonTestUtils.GetScheme())) @@ -150,8 +150,8 @@ var _ = Describe("CDI Operand", func() { It("should remove node placement if missing in HCO CR", func() { hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewCDI(hcoNodePlacement) cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) @@ -187,8 +187,8 @@ var _ = Describe("CDI Operand", func() { }) It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewCDI(hco) // now, modify HCO's node placement @@ -224,8 +224,8 @@ var _ = Describe("CDI Operand", func() { }) It("should overwrite node placement if directly set on CDI CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewCDI(hco) // mock a reconciliation triggered by a change in CDI CR diff --git a/pkg/controller/operands/kubevirt_test.go b/pkg/controller/operands/kubevirt_test.go index edbc67f7e5..525633daa4 100644 --- a/pkg/controller/operands/kubevirt_test.go +++ b/pkg/controller/operands/kubevirt_test.go @@ -340,8 +340,8 @@ var _ = Describe("KubeVirt Operand", func() { It("should add node placement if missing in KubeVirt", func() { existingResource := NewKubeVirt(hco) - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := (*genericOperand)(newKubevirtHandler(cl, commonTestUtils.GetScheme())) @@ -377,8 +377,8 @@ var _ = Describe("KubeVirt Operand", func() { It("should remove node placement if missing in HCO CR", func() { hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewKubeVirt(hcoNodePlacement) cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) @@ -406,8 +406,8 @@ var _ = Describe("KubeVirt Operand", func() { }) It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewKubeVirt(hco) // now, modify HCO's node placement @@ -452,8 +452,8 @@ var _ = Describe("KubeVirt Operand", func() { }) It("should overwrite node placement if directly set on KV CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewKubeVirt(hco) // mock a reconciliation triggered by a change in KV CR diff --git a/pkg/controller/operands/networkAddons_test.go b/pkg/controller/operands/networkAddons_test.go index 015b19302e..4d55073157 100644 --- a/pkg/controller/operands/networkAddons_test.go +++ b/pkg/controller/operands/networkAddons_test.go @@ -117,8 +117,8 @@ var _ = Describe("CNA Operand", func() { It("should add node placement if missing in CNAO", func() { existingResource := NewNetworkAddons(hco) - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{commonTestUtils.NewNodePlacement()} cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := (*genericOperand)(newCnaHandler(cl, commonTestUtils.GetScheme())) @@ -150,8 +150,8 @@ var _ = Describe("CNA Operand", func() { It("should remove node placement if missing in HCO CR", func() { hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewNetworkAddons(hcoNodePlacement) cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) @@ -176,8 +176,8 @@ var _ = Describe("CNA Operand", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewNetworkAddons(hco) // now, modify HCO's node placement @@ -214,8 +214,8 @@ var _ = Describe("CNA Operand", func() { }) It("should overwrite node placement if directly set on CNAO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewNetworkAddons(hco) // mock a reconciliation triggered by a change in CNAO CR diff --git a/pkg/controller/operands/ssp_test.go b/pkg/controller/operands/ssp_test.go index 8f81ad2072..9ab3a5824b 100644 --- a/pkg/controller/operands/ssp_test.go +++ b/pkg/controller/operands/ssp_test.go @@ -115,8 +115,8 @@ var _ = Describe("SSP Operands", func() { It("should add node placement if missing", func() { existingResource := NewSSP(hco, commonTestUtils.Namespace) - hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig2() + hco.Spec.Workloads.NodePlacement = commonTestUtils.NewNodePlacement() + hco.Spec.Infra.NodePlacement = commonTestUtils.NewOtherNodePlacement() cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := newSspHandler(cl, commonTestUtils.GetScheme()) @@ -144,8 +144,8 @@ var _ = Describe("SSP Operands", func() { It("should remove node placement if missing in HCO CR", func() { hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig2() + hcoNodePlacement.Spec.Workloads.NodePlacement = commonTestUtils.NewNodePlacement() + hcoNodePlacement.Spec.Infra.NodePlacement = commonTestUtils.NewOtherNodePlacement() existingResource := NewSSP(hcoNodePlacement, commonTestUtils.Namespace) cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) @@ -173,8 +173,8 @@ var _ = Describe("SSP Operands", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Workloads.NodePlacement = commonTestUtils.NewHyperConvergedConfig() - hco.Spec.Infra.NodePlacement = commonTestUtils.NewHyperConvergedConfig2() + hco.Spec.Workloads.NodePlacement = commonTestUtils.NewNodePlacement() + hco.Spec.Infra.NodePlacement = commonTestUtils.NewOtherNodePlacement() existingResource := NewSSP(hco, commonTestUtils.Namespace) // now, modify HCO's node placement @@ -224,8 +224,8 @@ var _ = Describe("SSP Operands", func() { }) It("should overwrite node placement if directly set on SSP CR", func() { - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig2()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewOtherNodePlacement()} existingResource := NewSSP(hco, commonTestUtils.Namespace) // mock a reconciliation triggered by a change in NewKubeVirtNodeLabellerBundle CR diff --git a/pkg/controller/operands/vmImport_test.go b/pkg/controller/operands/vmImport_test.go index 3468a901bf..34e74ae867 100644 --- a/pkg/controller/operands/vmImport_test.go +++ b/pkg/controller/operands/vmImport_test.go @@ -101,8 +101,8 @@ var _ = Describe("VM-Import", func() { It("should add node placement if missing in VM-Import", func() { existingResource := NewVMImportForCR(hco) - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) handler := (*genericOperand)(newVmImportHandler(cl, commonTestUtils.GetScheme())) @@ -141,8 +141,8 @@ var _ = Describe("VM-Import", func() { It("should remove node placement if missing in HCO CR", func() { hcoNodePlacement := commonTestUtils.NewHco() - hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hcoNodePlacement.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hcoNodePlacement.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewVMImportForCR(hcoNodePlacement) cl := commonTestUtils.InitClient([]runtime.Object{hco, existingResource}) @@ -174,8 +174,8 @@ var _ = Describe("VM-Import", func() { It("should modify node placement according to HCO CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewVMImportForCR(hco) // now, modify HCO's node placement @@ -214,8 +214,8 @@ var _ = Describe("VM-Import", func() { }) It("should overwrite node placement if directly set on VMImport CR", func() { - hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} - hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewHyperConvergedConfig()} + hco.Spec.Infra = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} + hco.Spec.Workloads = hcov1beta1.HyperConvergedConfig{NodePlacement: commonTestUtils.NewNodePlacement()} existingResource := NewVMImportForCR(hco) // mock a reconciliation triggered by a change in VMImport CR From 7768ed6935b6ee5f0466b3eddea12051e0b3b1ad Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Mon, 4 Jan 2021 22:25:51 +0200 Subject: [PATCH 15/19] Temporary workaround in SSP CSV: disable webhook Signed-off-by: Zvi Cahana --- ...operator.v1.3.0.clusterserviceversion.yaml | 21 ++--------------- ...operator.v1.3.0.clusterserviceversion.yaml | 23 +++---------------- deploy/operator.yaml | 2 ++ hack/build-manifests.sh | 12 ++++++++++ 4 files changed, 19 insertions(+), 39 deletions(-) diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index e5f5492a79..c643cdd0c3 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -2081,6 +2081,8 @@ spec: - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION value: v0.1.0-rc.2 + - name: ENABLE_WEBHOOKS + value: "false" image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: @@ -2566,25 +2568,6 @@ spec: timeoutSeconds: 30 type: MutatingAdmissionWebhook webhookPath: /mutate-ns-hco-kubevirt-io - - admissionReviewVersions: - - v1beta1 - containerPort: 9443 - deploymentName: ssp-operator - failurePolicy: Fail - generateName: vssp.kb.io - rules: - - apiGroups: - - ssp.kubevirt.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - ssps - sideEffects: None - type: ValidatingAdmissionWebhook - webhookPath: /validate-ssp-kubevirt-io-v1beta1-ssp - admissionReviewVersions: - v1beta1 containerPort: 8443 diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index a40936b6f4..c6561e8f44 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -8,7 +8,7 @@ metadata: categories: OpenShift Optional certified: "false" containerImage: quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da - createdAt: "2020-12-24 04:16:53" + createdAt: "2021-01-04 22:22:44" description: |- **HyperConverged Cluster Operator** is an Operator pattern for managing multi-operator products. Specifcally, the HyperConverged Cluster Operator manages the deployment of KubeVirt, @@ -2081,6 +2081,8 @@ spec: - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION value: v0.1.0-rc.2 + - name: ENABLE_WEBHOOKS + value: "false" image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: @@ -2566,25 +2568,6 @@ spec: timeoutSeconds: 30 type: MutatingAdmissionWebhook webhookPath: /mutate-ns-hco-kubevirt-io - - admissionReviewVersions: - - v1beta1 - containerPort: 9443 - deploymentName: ssp-operator - failurePolicy: Fail - generateName: vssp.kb.io - rules: - - apiGroups: - - ssp.kubevirt.io - apiVersions: - - v1beta1 - operations: - - CREATE - - UPDATE - resources: - - ssps - sideEffects: None - type: ValidatingAdmissionWebhook - webhookPath: /validate-ssp-kubevirt-io-v1beta1-ssp - admissionReviewVersions: - v1beta1 containerPort: 8443 diff --git a/deploy/operator.yaml b/deploy/operator.yaml index bf61f790c4..ba73fbc6c3 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -332,6 +332,8 @@ spec: - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION value: v0.1.0-rc.2 + - name: ENABLE_WEBHOOKS + value: "false" image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: diff --git a/hack/build-manifests.sh b/hack/build-manifests.sh index f3afe77af3..23d9d2b45a 100755 --- a/hack/build-manifests.sh +++ b/hack/build-manifests.sh @@ -156,6 +156,18 @@ function create_ssp_csv() { " gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs} + + # Temporarily remove CSV webhook to workaround BZ#1908596: + # 1. Convert CSV to JSON + # 2. Remove SSP webhook definition + # 3. Set ENABLE_WEBHOOKS=false for the SSP container + # 4. Convert CSV back to YAML + sspCsv="${TEMPDIR}/${operatorName}.${CSV_EXT}" + csvJson=$(kubectl patch -f $sspCsv --local --type json --patch '[]' -o json) + csvJson=$(echo "$csvJson" | jq 'del(.spec.webhookdefinitions[] | select(.deploymentName == "ssp-operator"))') + csvJson=$(echo "$csvJson" | jq '(.spec.install.spec.deployments[] | select(.name == "ssp-operator").spec.template.spec.containers[0].env) += [{ "name": "ENABLE_WEBHOOKS", "value": "false" }]') + echo "$csvJson" | kubectl patch -f - --local --type json --patch '[]' -o yaml > $sspCsv + echo "${operatorName}" } From dd3526d870ec58d1683ccf83f71f02ded9637773 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Tue, 5 Jan 2021 07:13:05 +0200 Subject: [PATCH 16/19] Revert "Temporary workaround in SSP CSV: disable webhook" This reverts commit 7768ed6935b6ee5f0466b3eddea12051e0b3b1ad. Signed-off-by: Zvi Cahana --- ...operator.v1.3.0.clusterserviceversion.yaml | 21 +++++++++++++++-- ...operator.v1.3.0.clusterserviceversion.yaml | 23 ++++++++++++++++--- deploy/operator.yaml | 2 -- hack/build-manifests.sh | 12 ---------- 4 files changed, 39 insertions(+), 19 deletions(-) diff --git a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index c643cdd0c3..e5f5492a79 100644 --- a/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/index-image/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -2081,8 +2081,6 @@ spec: - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION value: v0.1.0-rc.2 - - name: ENABLE_WEBHOOKS - value: "false" image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: @@ -2568,6 +2566,25 @@ spec: timeoutSeconds: 30 type: MutatingAdmissionWebhook webhookPath: /mutate-ns-hco-kubevirt-io + - admissionReviewVersions: + - v1beta1 + containerPort: 9443 + deploymentName: ssp-operator + failurePolicy: Fail + generateName: vssp.kb.io + rules: + - apiGroups: + - ssp.kubevirt.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ssps + sideEffects: None + type: ValidatingAdmissionWebhook + webhookPath: /validate-ssp-kubevirt-io-v1beta1-ssp - admissionReviewVersions: - v1beta1 containerPort: 8443 diff --git a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml index c6561e8f44..a40936b6f4 100644 --- a/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml +++ b/deploy/olm-catalog/kubevirt-hyperconverged/1.3.0/kubevirt-hyperconverged-operator.v1.3.0.clusterserviceversion.yaml @@ -8,7 +8,7 @@ metadata: categories: OpenShift Optional certified: "false" containerImage: quay.io/kubevirt/hyperconverged-cluster-operator@sha256:e7356254a1251f1fba682be77c9a90b1f840d6a84ff88492fb1e87ae30dda9da - createdAt: "2021-01-04 22:22:44" + createdAt: "2020-12-24 04:16:53" description: |- **HyperConverged Cluster Operator** is an Operator pattern for managing multi-operator products. Specifcally, the HyperConverged Cluster Operator manages the deployment of KubeVirt, @@ -2081,8 +2081,6 @@ spec: - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION value: v0.1.0-rc.2 - - name: ENABLE_WEBHOOKS - value: "false" image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: @@ -2568,6 +2566,25 @@ spec: timeoutSeconds: 30 type: MutatingAdmissionWebhook webhookPath: /mutate-ns-hco-kubevirt-io + - admissionReviewVersions: + - v1beta1 + containerPort: 9443 + deploymentName: ssp-operator + failurePolicy: Fail + generateName: vssp.kb.io + rules: + - apiGroups: + - ssp.kubevirt.io + apiVersions: + - v1beta1 + operations: + - CREATE + - UPDATE + resources: + - ssps + sideEffects: None + type: ValidatingAdmissionWebhook + webhookPath: /validate-ssp-kubevirt-io-v1beta1-ssp - admissionReviewVersions: - v1beta1 containerPort: 8443 diff --git a/deploy/operator.yaml b/deploy/operator.yaml index ba73fbc6c3..bf61f790c4 100644 --- a/deploy/operator.yaml +++ b/deploy/operator.yaml @@ -332,8 +332,6 @@ spec: - name: CPU_PLUGIN_IMAGE - name: OPERATOR_VERSION value: v0.1.0-rc.2 - - name: ENABLE_WEBHOOKS - value: "false" image: quay.io/kubevirt/ssp-operator@sha256:1466407727c1c802b2b1191b1f5a22a0a8933a39bdf68556a2f97607f7c523aa name: manager ports: diff --git a/hack/build-manifests.sh b/hack/build-manifests.sh index 23d9d2b45a..f3afe77af3 100755 --- a/hack/build-manifests.sh +++ b/hack/build-manifests.sh @@ -156,18 +156,6 @@ function create_ssp_csv() { " gen_csv ${SSP_CSV_GENERATOR} ${operatorName} "${SSP_OPERATOR_IMAGE}" ${dumpCRDsArg} ${operatorArgs} - - # Temporarily remove CSV webhook to workaround BZ#1908596: - # 1. Convert CSV to JSON - # 2. Remove SSP webhook definition - # 3. Set ENABLE_WEBHOOKS=false for the SSP container - # 4. Convert CSV back to YAML - sspCsv="${TEMPDIR}/${operatorName}.${CSV_EXT}" - csvJson=$(kubectl patch -f $sspCsv --local --type json --patch '[]' -o json) - csvJson=$(echo "$csvJson" | jq 'del(.spec.webhookdefinitions[] | select(.deploymentName == "ssp-operator"))') - csvJson=$(echo "$csvJson" | jq '(.spec.install.spec.deployments[] | select(.name == "ssp-operator").spec.template.spec.containers[0].env) += [{ "name": "ENABLE_WEBHOOKS", "value": "false" }]') - echo "$csvJson" | kubectl patch -f - --local --type json --patch '[]' -o yaml > $sspCsv - echo "${operatorName}" } From 1566f99902637e31e90b7346d6bd3871124922fc Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Tue, 5 Jan 2021 21:02:12 +0200 Subject: [PATCH 17/19] /hack/retry ==> /hack/retry.sh (doh!) Signed-off-by: Zvi Cahana --- hack/upgrade-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/upgrade-test.sh b/hack/upgrade-test.sh index 35a8dbba71..9f2e58b139 100755 --- a/hack/upgrade-test.sh +++ b/hack/upgrade-test.sh @@ -268,7 +268,7 @@ for hco_pod in $( ${CMD} get pods -n ${HCO_NAMESPACE} -l "name=hyperconverged-cl done Msg "Ensure that old SSP operator resources are removed from the cluster" -./hack/retry 5 30 "CMD=${CMD} HCO_RESOURCE_NAME=${HCO_RESOURCE_NAME} HCO_NAMESPACE=${HCO_NAMESPACE} ./hack/check_old_ssp_removed.sh" +./hack/retry.sh 5 30 "CMD=${CMD} HCO_RESOURCE_NAME=${HCO_RESOURCE_NAME} HCO_NAMESPACE=${HCO_NAMESPACE} ./hack/check_old_ssp_removed.sh" [[ -n ${found_new_running_hco_pod} ]] From 1b84fac56738d478ac56a554afa9ff07beb19d21 Mon Sep 17 00:00:00 2001 From: Zvi Cahana Date: Thu, 7 Jan 2021 10:15:42 +0200 Subject: [PATCH 18/19] Increase timeout waiting for new CSV to report success Signed-off-by: Zvi Cahana --- hack/upgrade-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/upgrade-test.sh b/hack/upgrade-test.sh index 9f2e58b139..e8d8903a35 100755 --- a/hack/upgrade-test.sh +++ b/hack/upgrade-test.sh @@ -248,7 +248,7 @@ timeout 20m bash -c 'export CMD="${CMD}";exec ./hack/check-state.sh' # Make sure the CSV is installed properly. Msg "Read the CSV to make sure the deployment is done" -./hack/retry.sh 30 10 "${CMD} get ClusterServiceVersion -n ${HCO_NAMESPACE} kubevirt-hyperconverged-operator.v${TARGET_VERSION} -o jsonpath='{ .status.phase }' | grep 'Succeeded'" +./hack/retry.sh 90 10 "${CMD} get ClusterServiceVersion -n ${HCO_NAMESPACE} kubevirt-hyperconverged-operator.v${TARGET_VERSION} -o jsonpath='{ .status.phase }' | grep 'Succeeded'" echo "----- Pod after upgrade" From eef8d0c08513b24844926ad785a09c61fb12dc9a Mon Sep 17 00:00:00 2001 From: Simone Tiraboschi Date: Thu, 7 Jan 2021 12:19:19 +0100 Subject: [PATCH 19/19] test: tolerate conditions with Unknown status Due to other errors, the reconciliation loop can fail and HCO conditions can temporary assume Unknown status which is sematically acceptable. Let's tolerate it. Test code only. Signed-off-by: Simone Tiraboschi --- hack/check-state.sh | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/hack/check-state.sh b/hack/check-state.sh index c21a73fe32..706ba2141e 100755 --- a/hack/check-state.sh +++ b/hack/check-state.sh @@ -70,10 +70,10 @@ function get_operator_json() { fi if [ "$has_reconcile" != "0" ]; then - # if present: extract APPLICATION_AVAILABLE value : check that it has a valid value (either True or False) + # if present: extract RECONCILE_COMPLETED value : check that it has a valid value (either True, False or Unknown) RECONCILE_COMPLETED=`printf '%s' "$HCO_DATA" | sed -e 's/.*ReconcileComplete: "\([^"]*\)".*$/\1/'` RECONCILE_COMPLETED_DATA=`printf '%s' "$HCO_DATA" | sed -e 's/.*ReconcileComplete: "\([^"]*\)" "\([^"]*\)" "\([^"]*\)".*$/Status: \1 Reason: \2 Message: \3/'` - if [ "$RECONCILE_COMPLETED" != 'True' ] && [ "$RECONCILE_COMPLETED" != 'False' ] && [ "$RECONCILE_COMPLETED" != 'Unknown' ] ; then + if [ "$RECONCILE_COMPLETED" != 'True' ] && [ "$RECONCILE_COMPLETED" != 'False' ] && [ "$RECONCILE_COMPLETED" != 'Unknown' ] ; then echo "Error: ReconcileComplete not valid: $RECONCILE_COMPLETED_DATA" echo "Error: ReconcileComplete not valid: '${RECONCILE_COMPLETED}'" echo "HCO_DATA: $HCO_DATA" @@ -85,10 +85,10 @@ function get_operator_json() { fi fi - # extract APPLICATION_AVAILABLE value : check that it has a valid value (either True or False) + # extract APPLICATION_AVAILABLE value : check that it has a valid value (either True, False or Unknown) APPLICATION_AVAILABLE_DATA=`printf '%s' "$HCO_DATA" | sed -e 's/.*ApplicationAvailable: "\([^"]*\)" "\([^"]*\)" "\([^"]*\)".*$/Status: \1 Reason: \2 Message: \3/'` APPLICATION_AVAILABLE=`printf '%s' "$HCO_DATA" | sed -e 's/.*ApplicationAvailable: "\([^"]*\)".*$/\1/'` - if [ "$APPLICATION_AVAILABLE" != 'True' ] && [ "$APPLICATION_AVAILABLE" != 'False' ]; then + if [ "$APPLICATION_AVAILABLE" != 'True' ] && [ "$APPLICATION_AVAILABLE" != 'False' ] && [ "$APPLICATION_AVAILABLE" != 'Unknown' ] ; then echo "Error: ApplicationAvailable not valid: $APPLICATION_AVAILABLE_DATA" echo "Error: ApplicationAvailable not valid: $APPLICATION_AVAILABLE" EXIT_STATUS=1 @@ -96,10 +96,10 @@ function get_operator_json() { return fi - # extract OPERATION_PROGRESSING value : check that it has a valid value (either True or False) + # extract OPERATION_PROGRESSING value : check that it has a valid value (either True, False or Unknown) OPERATION_PROGRESSING_DATA=`printf '%s' "$HCO_DATA" | sed -e 's/.*ApplicationProgressing: "\([^"]*\)" "\([^"]*\)" "\([^"]*\)".*$/Status: \1 Reason: \2 Message: \3/'` OPERATION_PROGRESSING=`printf '%s' "$HCO_DATA" | sed -e 's/.*ApplicationProgressing: "\([^"]*\)".*$/\1/'` - if [ "$OPERATION_PROGRESSING" != 'True' ] && [ "$OPERATION_PROGRESSING" != 'False' ]; then + if [ "$OPERATION_PROGRESSING" != 'True' ] && [ "$OPERATION_PROGRESSING" != 'False' ] && [ "$OPERATION_PROGRESSING" != 'Unknown' ] ; then echo "Error: OperationProgressing not valid: $OPERATION_PROGRESSING_DATA" echo "Error: OperationProgressing not valid: $OPERATION_PROGRESSING" EXIT_STATUS=1 @@ -107,10 +107,10 @@ function get_operator_json() { return fi - # extract APPLICATION_DEGRADED value : check that it has a valid value (either True or False) + # extract APPLICATION_DEGRADED value : check that it has a valid value (either True, False or Unknown) APPLICATION_DEGRADED_DATA=`printf '%s' "$HCO_DATA" | sed -e 's/.*ApplicationDegraded: "\([^"]*\)" "\([^"]*\)" "\([^"]*\)".*$/Status: \1 Reason: \2 Message: \3/'` APPLICATION_DEGRADED=`printf '%s' "$HCO_DATA" | sed -e 's/.*ApplicationDegraded: "\([^"]*\)".*$/\1/'` - if [ "$APPLICATION_DEGRADED" != 'True' ] && [ "$APPLICATION_DEGRADED" != 'False' ]; then + if [ "$APPLICATION_DEGRADED" != 'True' ] && [ "$APPLICATION_DEGRADED" != 'False' ] && [ "$APPLICATION_DEGRADED" != 'Unknown' ] ; then echo "Error: ApplicationDegraded not valid: $APPLICATION_DEGRADED_DATA" echo "Error: ApplicationDegraded not valid: $APPLICATION_DEGRADED" EXIT_STATUS=1