From e6dedf186e6eaf3ed359f343f1f43998f70d66dc Mon Sep 17 00:00:00 2001 From: Jian Qiu Date: Tue, 23 Jan 2024 14:32:49 +0800 Subject: [PATCH] Bump api and addon-framework deps Signed-off-by: Jian Qiu --- ...ter-management.io_clustermanagers.crd.yaml | 63 ++ ...cluster-manager.clusterserviceversion.yaml | 2 +- ...cluster-management.io_clustermanagers.yaml | 63 ++ ...cluster-management.io_klusterlets.crd.yaml | 55 +- .../klusterlet.clusterserviceversion.yaml | 2 +- ...pen-cluster-management.io_klusterlets.yaml | 55 +- go.mod | 6 +- go.sum | 12 +- pkg/common/helpers/clusters.go | 3 +- .../controllers/scheduling/enqueue.go | 5 +- vendor/modules.txt | 10 +- .../pkg/addonfactory/helm_agentaddon.go | 8 +- .../controllers/addonconfig/controller.go | 66 +- .../controllers/agentdeploy/controller.go | 49 +- .../agentdeploy/hosted_hook_sync.go | 46 +- .../controllers/agentdeploy/utils.go | 2 +- .../managementaddonconfig/controller.go | 63 +- .../controllers/registration/controller.go | 66 +- .../controllers/addonconfiguration/graph.go | 45 +- .../api/cluster/v1alpha1/helpers.go | 617 ------------------ .../cluster/v1alpha1/zz_generated.deepcopy.go | 86 +-- .../api/cluster/v1beta1/helpers.go | 274 -------- .../cluster/v1beta1/zz_generated.deepcopy.go | 33 - .../api/cluster/v1beta2/helpers.go | 110 ---- ...cluster-management.io_klusterlets.crd.yaml | 40 +- ...cluster-management.io_klusterlets.crd.yaml | 55 +- ...ter-management.io_clustermanagers.crd.yaml | 63 ++ .../api/operator/v1/funcs_clustermanager.go | 5 + .../api/operator/v1/funcs_klusterlet.go | 5 + .../operator/v1/type_resourcerequirement.go | 28 + .../api/operator/v1/types_clustermanager.go | 5 + .../api/operator/v1/types_klusterlet.go | 23 +- .../api/operator/v1/zz_generated.deepcopy.go | 12 +- .../v1/zz_generated.swagger_doc_generated.go | 11 +- .../pkg/apis/work/v1/builder}/workbuilder.go | 2 +- 35 files changed, 581 insertions(+), 1409 deletions(-) delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go delete mode 100644 vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go create mode 100644 vendor/open-cluster-management.io/api/operator/v1/funcs_clustermanager.go create mode 100644 vendor/open-cluster-management.io/api/operator/v1/funcs_klusterlet.go create mode 100644 vendor/open-cluster-management.io/api/operator/v1/type_resourcerequirement.go rename vendor/open-cluster-management.io/{api/utils/work/v1/workbuilder => sdk-go/pkg/apis/work/v1/builder}/workbuilder.go (99%) diff --git a/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index b57e95f5f..8cc24234b 100644 --- a/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/deploy/cluster-manager/config/crds/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -259,6 +259,69 @@ spec: description: RegistrationImagePullSpec represents the desired image of registration controller/webhook installed on hub. type: string + resourceRequirement: + description: ResourceRequirement specify QoS classes of deployments + managed by clustermanager. It applies to all the containers in the + deployments. + properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and + limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: + default: Default + enum: + - Default + - BestEffort + - ResourceRequirement + type: string + type: object workConfiguration: description: WorkConfiguration contains the configuration of work properties: diff --git a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml index 247583119..ebafd0d1b 100644 --- a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml +++ b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/cluster-manager.clusterserviceversion.yaml @@ -59,7 +59,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-01-15T16:05:25Z" + createdAt: "2024-01-23T06:30:04Z" description: Manages the installation and upgrade of the ClusterManager. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml index 47c956c68..12dd60949 100644 --- a/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml +++ b/deploy/cluster-manager/olm-catalog/cluster-manager/manifests/operator.open-cluster-management.io_clustermanagers.yaml @@ -259,6 +259,69 @@ spec: description: RegistrationImagePullSpec represents the desired image of registration controller/webhook installed on hub. type: string + resourceRequirement: + description: ResourceRequirement specify QoS classes of deployments + managed by clustermanager. It applies to all the containers in the + deployments. + properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and + limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: + default: Default + enum: + - Default + - BestEffort + - ResourceRequirement + type: string + type: object workConfiguration: description: WorkConfiguration contains the configuration of work properties: diff --git a/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index df8d8324d..ad6e6434b 100644 --- a/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/deploy/klusterlet/config/crds/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -244,14 +244,65 @@ spec: will be used if unspecified. type: string resourceRequirement: - description: ResourceRequirement specify QoS classes of klusterlet - deployment + description: ResourceRequirement specify QoS classes of deployments + managed by klusterlet. It applies to all the containers in the deployments. properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and + limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object type: default: Default enum: - Default - BestEffort + - ResourceRequirement type: string type: object workConfiguration: diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml b/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml index d9f529a9d..8e8b7fa63 100644 --- a/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/klusterlet.clusterserviceversion.yaml @@ -31,7 +31,7 @@ metadata: categories: Integration & Delivery,OpenShift Optional certified: "false" containerImage: quay.io/open-cluster-management/registration-operator:latest - createdAt: "2024-01-15T16:05:25Z" + createdAt: "2024-01-23T06:30:04Z" description: Manages the installation and upgrade of the Klusterlet. operators.operatorframework.io/builder: operator-sdk-v1.32.0 operators.operatorframework.io/project_layout: go.kubebuilder.io/v3 diff --git a/deploy/klusterlet/olm-catalog/klusterlet/manifests/operator.open-cluster-management.io_klusterlets.yaml b/deploy/klusterlet/olm-catalog/klusterlet/manifests/operator.open-cluster-management.io_klusterlets.yaml index 6bf31a7d0..3a476d8ef 100644 --- a/deploy/klusterlet/olm-catalog/klusterlet/manifests/operator.open-cluster-management.io_klusterlets.yaml +++ b/deploy/klusterlet/olm-catalog/klusterlet/manifests/operator.open-cluster-management.io_klusterlets.yaml @@ -244,14 +244,65 @@ spec: will be used if unspecified. type: string resourceRequirement: - description: ResourceRequirement specify QoS classes of klusterlet - deployment + description: ResourceRequirement specify QoS classes of deployments + managed by klusterlet. It applies to all the containers in the deployments. properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and + limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object type: default: Default enum: - Default - BestEffort + - ResourceRequirement type: string type: object workConfiguration: diff --git a/go.mod b/go.mod index ecf75ff02..f6dfca837 100644 --- a/go.mod +++ b/go.mod @@ -33,9 +33,9 @@ require ( k8s.io/klog/v2 v2.110.1 k8s.io/kube-aggregator v0.29.0 k8s.io/utils v0.0.0-20240102154912-e7106e64919e - open-cluster-management.io/addon-framework v0.8.1-0.20240119025526-d2afcef1ff66 - open-cluster-management.io/api v0.12.1-0.20240115071352-3d94ce8f3499 - open-cluster-management.io/sdk-go v0.0.0-20240118073603-6da392d39be0 + open-cluster-management.io/addon-framework v0.8.1-0.20240123051722-71f1b13cbb63 + open-cluster-management.io/api v0.12.1-0.20240122084346-e7bd1bd9ea6a + open-cluster-management.io/sdk-go v0.0.0-20240122034348-9793ade2466b sigs.k8s.io/controller-runtime v0.16.2 sigs.k8s.io/kube-storage-version-migrator v0.0.6-0.20230721195810-5c8923c5ff96 ) diff --git a/go.sum b/go.sum index 06990d100..dc0202017 100644 --- a/go.sum +++ b/go.sum @@ -450,12 +450,12 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/A k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ= k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -open-cluster-management.io/addon-framework v0.8.1-0.20240119025526-d2afcef1ff66 h1:d6vkhLLdam7/DLmZ4onPSbsRNHP3Rze0/0OkmjJgVjk= -open-cluster-management.io/addon-framework v0.8.1-0.20240119025526-d2afcef1ff66/go.mod h1:aj97pgpGJ0/LpQzBVtU2oDFqqIiZLOPnsjLKG/sVkFw= -open-cluster-management.io/api v0.12.1-0.20240115071352-3d94ce8f3499 h1:Odh200i57Z9V44eJak98VqdKMCAlj3m9dtnUp8285nE= -open-cluster-management.io/api v0.12.1-0.20240115071352-3d94ce8f3499/go.mod h1:K3Rza3qN/W1+E1a+wbLtFatvdI8UlQWkSqBlpeRHMPw= -open-cluster-management.io/sdk-go v0.0.0-20240118073603-6da392d39be0 h1:FCGRz0ZdCWCq6WebGJyr0XJfAsjEwYFJF6lXY4tBPFM= -open-cluster-management.io/sdk-go v0.0.0-20240118073603-6da392d39be0/go.mod h1:LfUmo1hhhlQdui8CiNO8aUq050dbJTQ2nrE97DRFREs= +open-cluster-management.io/addon-framework v0.8.1-0.20240123051722-71f1b13cbb63 h1:TEzWFMzQUqaCuuJkDEMXzmOKMW7IPdp7QrvmAMb+ktk= +open-cluster-management.io/addon-framework v0.8.1-0.20240123051722-71f1b13cbb63/go.mod h1:SBs6wF0Umzr5/miJb9p8uMaTDbcjphHHQLa76nXnbU8= +open-cluster-management.io/api v0.12.1-0.20240122084346-e7bd1bd9ea6a h1:NjIU3aN4JSJjTotHiOkOCqYaPGG2tNtm7BY/o9uPb8M= +open-cluster-management.io/api v0.12.1-0.20240122084346-e7bd1bd9ea6a/go.mod h1:vOz9InrJq1BDFEI51+OwAyq2M3tjYPY+1cnoQhMhIlE= +open-cluster-management.io/sdk-go v0.0.0-20240122034348-9793ade2466b h1:UH3uy5vv3/VdtHQoWzHWhVFqsbcG9zUk1coY1YgD/uo= +open-cluster-management.io/sdk-go v0.0.0-20240122034348-9793ade2466b/go.mod h1:p3oaf+iu9ghMl4cBJXWXlDnUHVn+QxL90YLTve9bn/k= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0 h1:TgtAeesdhpm2SGwkQasmbeqDo8th5wOBA5h/AjTKA4I= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.28.0/go.mod h1:VHVDI/KrK4fjnV61bE2g3sA7tiETLn8sooImelsCx3Y= sigs.k8s.io/controller-runtime v0.16.2 h1:mwXAVuEk3EQf478PQwQ48zGOXvW27UJc8NHktQVuIPU= diff --git a/pkg/common/helpers/clusters.go b/pkg/common/helpers/clusters.go index cc4d4e6ea..a24b74e31 100644 --- a/pkg/common/helpers/clusters.go +++ b/pkg/common/helpers/clusters.go @@ -6,6 +6,7 @@ import ( clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + clustersdkv1beta1 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1" ) type PlacementDecisionGetter struct { @@ -19,7 +20,7 @@ func (pdl PlacementDecisionGetter) List(selector labels.Selector, namespace stri // Get added and deleted clusters names func GetClusterChanges(client clusterlister.PlacementDecisionLister, placement *clusterv1beta1.Placement, existingClusters sets.Set[string]) (sets.Set[string], sets.Set[string], error) { - pdtracker := clusterv1beta1.NewPlacementDecisionClustersTracker( + pdtracker := clustersdkv1beta1.NewPlacementDecisionClustersTracker( placement, PlacementDecisionGetter{Client: client}, existingClusters) return pdtracker.GetClusterChanges() diff --git a/pkg/placement/controllers/scheduling/enqueue.go b/pkg/placement/controllers/scheduling/enqueue.go index 63f4c62d1..e9d53a9fe 100644 --- a/pkg/placement/controllers/scheduling/enqueue.go +++ b/pkg/placement/controllers/scheduling/enqueue.go @@ -18,6 +18,7 @@ import ( clusterapiv1 "open-cluster-management.io/api/cluster/v1" clusterapiv1beta1 "open-cluster-management.io/api/cluster/v1beta1" clusterapiv1beta2 "open-cluster-management.io/api/cluster/v1beta2" + clustersdkv1beta2 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2" ) const ( @@ -144,7 +145,7 @@ func (e *enqueuer) enqueueCluster(obj interface{}) { return } - clusterSets, err := clusterapiv1beta2.GetClusterSetsOfCluster(cluster, e.clusterSetLister) + clusterSets, err := clustersdkv1beta2.GetClusterSetsOfCluster(cluster, e.clusterSetLister) if err != nil { e.logger.V(4).Error(err, "Unable to get clusterSets of cluster", "clusterName", cluster.GetName()) return @@ -184,7 +185,7 @@ func (e *enqueuer) enqueuePlacementScore(obj interface{}) { e.logger.V(4).Error(err, "Unable to get cluster", "clusterNamespace", namespace) } - clusterSets, err := clusterapiv1beta2.GetClusterSetsOfCluster(cluster, e.clusterSetLister) + clusterSets, err := clustersdkv1beta2.GetClusterSetsOfCluster(cluster, e.clusterSetLister) if err != nil { e.logger.V(4).Error(err, "Unable to get clusterSets of cluster", "clusterName", cluster.GetName()) return diff --git a/vendor/modules.txt b/vendor/modules.txt index 4357f2aad..0407f5f2b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1487,8 +1487,8 @@ k8s.io/utils/pointer k8s.io/utils/ptr k8s.io/utils/strings/slices k8s.io/utils/trace -# open-cluster-management.io/addon-framework v0.8.1-0.20240119025526-d2afcef1ff66 -## explicit; go 1.20 +# open-cluster-management.io/addon-framework v0.8.1-0.20240123051722-71f1b13cbb63 +## explicit; go 1.21 open-cluster-management.io/addon-framework/pkg/addonfactory open-cluster-management.io/addon-framework/pkg/addonmanager open-cluster-management.io/addon-framework/pkg/addonmanager/addontesting @@ -1507,7 +1507,7 @@ open-cluster-management.io/addon-framework/pkg/index open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration open-cluster-management.io/addon-framework/pkg/manager/controllers/addonowner open-cluster-management.io/addon-framework/pkg/utils -# open-cluster-management.io/api v0.12.1-0.20240115071352-3d94ce8f3499 +# open-cluster-management.io/api v0.12.1-0.20240122084346-e7bd1bd9ea6a ## explicit; go 1.20 open-cluster-management.io/api/addon/v1alpha1 open-cluster-management.io/api/client/addon/clientset/versioned @@ -1575,16 +1575,16 @@ open-cluster-management.io/api/feature open-cluster-management.io/api/operator/v1 open-cluster-management.io/api/utils/work/v1/utils open-cluster-management.io/api/utils/work/v1/workapplier -open-cluster-management.io/api/utils/work/v1/workbuilder open-cluster-management.io/api/utils/work/v1/workvalidator open-cluster-management.io/api/work/v1 open-cluster-management.io/api/work/v1alpha1 -# open-cluster-management.io/sdk-go v0.0.0-20240118073603-6da392d39be0 +# open-cluster-management.io/sdk-go v0.0.0-20240122034348-9793ade2466b ## explicit; go 1.21 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1 open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta2 open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier +open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder open-cluster-management.io/sdk-go/pkg/apis/work/v1/validator open-cluster-management.io/sdk-go/pkg/cloudevents/generic open-cluster-management.io/sdk-go/pkg/cloudevents/generic/options diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go index 708ef3efe..c8e8f9917 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonfactory/helm_agentaddon.go @@ -151,7 +151,7 @@ func (a *HelmAgentAddon) getValues( defaultValues, err := a.getDefaultValues(cluster, addon) if err != nil { - klog.Error("failed to get defaultValue. err:%v", err) + klog.Errorf("failed to get defaultValue. err:%v", err) return nil, err } overrideValues = MergeValues(overrideValues, defaultValues) @@ -171,7 +171,7 @@ func (a *HelmAgentAddon) getValues( builtinValues, err := a.getBuiltinValues(cluster, addon) if err != nil { - klog.Error("failed to get builtinValue. err:%v", err) + klog.Errorf("failed to get builtinValue. err:%v", err) return nil, err } @@ -213,7 +213,7 @@ func (a *HelmAgentAddon) getBuiltinValues( helmBuiltinValues, err := JsonStructToValues(builtinValues) if err != nil { - klog.Error("failed to convert builtinValues to values %v.err:%v", builtinValues, err) + klog.Errorf("failed to convert builtinValues to values %v.err:%v", builtinValues, err) return nil, err } return helmBuiltinValues, nil @@ -237,7 +237,7 @@ func (a *HelmAgentAddon) getDefaultValues( helmDefaultValues, err := JsonStructToValues(defaultValues) if err != nil { - klog.Error("failed to convert defaultValues to values %v.err:%v", defaultValues, err) + klog.Errorf("failed to convert defaultValues to values %v.err:%v", defaultValues, err) return nil, err } return helmDefaultValues, nil diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go index 0c108435c..5172d6b8f 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/addonconfig/controller.go @@ -2,31 +2,25 @@ package addonconfig import ( "context" - "encoding/json" "fmt" - jsonpatch "github.com/evanphx/json-patch" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/dynamic/dynamiclister" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" + "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" + "open-cluster-management.io/addon-framework/pkg/index" + "open-cluster-management.io/addon-framework/pkg/utils" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" - - "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" - "open-cluster-management.io/addon-framework/pkg/index" - "open-cluster-management.io/addon-framework/pkg/utils" + "open-cluster-management.io/sdk-go/pkg/patcher" ) const ( @@ -166,7 +160,13 @@ func (c *addonConfigController) sync(ctx context.Context, syncCtx factory.SyncCo return err } - return c.patchConfigReferences(ctx, addon, addonCopy) + addonPatcher := patcher.NewPatcher[ + *addonapiv1alpha1.ManagedClusterAddOn, + addonapiv1alpha1.ManagedClusterAddOnSpec, + addonapiv1alpha1.ManagedClusterAddOnStatus](c.addonClient.AddonV1alpha1().ManagedClusterAddOns(addonNamespace)) + + _, err = addonPatcher.PatchStatus(ctx, addonCopy, addonCopy.Status, addon.Status) + return err } func (c *addonConfigController) updateConfigSpecHashAndGenerations(addon *addonapiv1alpha1.ManagedClusterAddOn) error { @@ -230,47 +230,3 @@ func (c *addonConfigController) updateConfigSpecHashAndGenerations(addon *addona return nil } - -func (c *addonConfigController) patchConfigReferences(ctx context.Context, old, new *addonapiv1alpha1.ManagedClusterAddOn) error { - if equality.Semantic.DeepEqual(new.Status.ConfigReferences, old.Status.ConfigReferences) { - return nil - } - - oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ - Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - ConfigReferences: old.Status.ConfigReferences, - }, - }) - if err != nil { - return err - } - - newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ - ObjectMeta: metav1.ObjectMeta{ - UID: new.UID, - ResourceVersion: new.ResourceVersion, - }, - Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - ConfigReferences: new.Status.ConfigReferences, - }, - }) - if err != nil { - return err - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) - } - - klog.V(4).Infof("Patching addon %s/%s config reference with %s", new.Namespace, new.Name, string(patchBytes)) - _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( - ctx, - new.Name, - types.MergePatchType, - patchBytes, - metav1.PatchOptions{}, - "status", - ) - return err -} diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go index d33e81bb8..930a0afaf 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/controller.go @@ -2,17 +2,14 @@ package agentdeploy import ( "context" - "encoding/json" "fmt" "strings" - jsonpatch "github.com/evanphx/json-patch" "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" errorsutil "k8s.io/apimachinery/pkg/util/errors" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/tools/cache" @@ -27,9 +24,10 @@ import ( workv1client "open-cluster-management.io/api/client/work/clientset/versioned" workinformers "open-cluster-management.io/api/client/work/informers/externalversions/work/v1" clusterv1 "open-cluster-management.io/api/cluster/v1" - "open-cluster-management.io/api/utils/work/v1/workapplier" - "open-cluster-management.io/api/utils/work/v1/workbuilder" workapiv1 "open-cluster-management.io/api/work/v1" + workapplier "open-cluster-management.io/sdk-go/pkg/apis/work/v1/applier" + workbuilder "open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder" + "open-cluster-management.io/sdk-go/pkg/patcher" "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" @@ -309,43 +307,12 @@ func (c *addonDeployController) updateAddon(ctx context.Context, new, old *addon return err } - if equality.Semantic.DeepEqual(new.Status.HealthCheck, old.Status.HealthCheck) && - equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) { - return nil - } - - oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ - Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - HealthCheck: old.Status.HealthCheck, - Conditions: old.Status.Conditions, - }, - }) - if err != nil { - return err - } - - newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ - ObjectMeta: metav1.ObjectMeta{ - UID: new.UID, - ResourceVersion: new.ResourceVersion, - }, - Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - HealthCheck: new.Status.HealthCheck, - Conditions: new.Status.Conditions, - }, - }) - if err != nil { - return err - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) - } + addonPatcher := patcher.NewPatcher[ + *addonapiv1alpha1.ManagedClusterAddOn, + addonapiv1alpha1.ManagedClusterAddOnSpec, + addonapiv1alpha1.ManagedClusterAddOnStatus](c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace)) - klog.V(2).Infof("Patching addon %s/%s condition with %s", new.Namespace, new.Name, string(patchBytes)) - _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( - ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") + _, err := addonPatcher.PatchStatus(ctx, new, new.Status, old.Status) return err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go index 9f628166b..98b14f80a 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/hosted_hook_sync.go @@ -80,20 +80,33 @@ func (s *hostedHookSyncer) sync(ctx context.Context, return addon, nil } - // will deploy the pre-delete hook manifestWork when the addon is deleting if addon.DeletionTimestamp.IsZero() { addonAddFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) return addon, nil } - // the hook work is completed if there is no HostingPreDeleteHookFinalizer when the addon is deleting. if !addonHasFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { return addon, nil } - hookWork, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, hookWork, addon) - if err != nil { - return addon, err + // apply the pre-delete hook manifestWork when the addon is deleting and HookManifestCompleted condition is not true. + // there are 2 cases: + // 1. the HookManifestCompleted condition is false. + // 2. there is no this condition. + if !meta.IsStatusConditionTrue(addon.Status.Conditions, addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted) { + hookWork, err = s.applyWork(ctx, addonapiv1alpha1.ManagedClusterAddOnHostingManifestApplied, hookWork, addon) + if err != nil { + return addon, err + } + } else { + // cleanup is safe here since there is no case which HookManifestCompleted condition is changed from true to false. + if err = s.cleanupHookWork(ctx, addon); err != nil { + return addon, err + } + if addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { + return addon, err + } + return addon, nil } // TODO: will surface more message here @@ -104,25 +117,16 @@ func (s *hostedHookSyncer) sync(ctx context.Context, Reason: "HookManifestIsCompleted", Message: fmt.Sprintf("hook manifestWork %v is completed.", hookWork.Name), }) - - if err = s.cleanupHookWork(ctx, addon); err != nil { - return addon, err - } - if addonRemoveFinalizer(addon, addonapiv1alpha1.AddonHostingPreDeleteHookFinalizer) { - return addon, err - } - return addon, nil + } else { + meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ + Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, + Status: metav1.ConditionFalse, + Reason: "HookManifestIsNotCompleted", + Message: fmt.Sprintf("hook manifestWork %v is not completed.", hookWork.Name), + }) } - meta.SetStatusCondition(&addon.Status.Conditions, metav1.Condition{ - Type: addonapiv1alpha1.ManagedClusterAddOnHookManifestCompleted, - Status: metav1.ConditionFalse, - Reason: "HookManifestIsNotCompleted", - Message: fmt.Sprintf("hook manifestWork %v is not completed.", hookWork.Name), - }) - return addon, nil - } // cleanupHookWork will delete the hosting pre-delete hook manifestWork and remove the finalizer, diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go index ad830fdef..a258369ea 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/agentdeploy/utils.go @@ -11,8 +11,8 @@ import ( "k8s.io/klog/v2" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterv1 "open-cluster-management.io/api/cluster/v1" - "open-cluster-management.io/api/utils/work/v1/workbuilder" workapiv1 "open-cluster-management.io/api/work/v1" + workbuilder "open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder" "open-cluster-management.io/addon-framework/pkg/addonmanager/constants" "open-cluster-management.io/addon-framework/pkg/agent" diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go index d508372ca..300caff89 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/managementaddonconfig/controller.go @@ -2,27 +2,22 @@ package managementaddonconfig import ( "context" - "encoding/json" "fmt" - jsonpatch "github.com/evanphx/json-patch" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/dynamic/dynamicinformer" "k8s.io/client-go/dynamic/dynamiclister" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" - "k8s.io/klog/v2" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" addonv1alpha1client "open-cluster-management.io/api/client/addon/clientset/versioned" addoninformerv1alpha1 "open-cluster-management.io/api/client/addon/informers/externalversions/addon/v1alpha1" addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" + "open-cluster-management.io/sdk-go/pkg/patcher" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" "open-cluster-management.io/addon-framework/pkg/index" @@ -44,6 +39,9 @@ type clusterManagementAddonConfigController struct { queue workqueue.RateLimitingInterface addonFilterFunc factory.EventFilterFunc configGVRs map[schema.GroupVersionResource]bool + addonPatcher patcher.Patcher[*addonapiv1alpha1.ClusterManagementAddOn, + addonapiv1alpha1.ClusterManagementAddOnSpec, + addonapiv1alpha1.ClusterManagementAddOnStatus] } func NewManagementAddonConfigController( @@ -63,6 +61,9 @@ func NewManagementAddonConfigController( queue: syncCtx.Queue(), addonFilterFunc: addonFilterFunc, configGVRs: configGVRs, + addonPatcher: patcher.NewPatcher[*addonapiv1alpha1.ClusterManagementAddOn, + addonapiv1alpha1.ClusterManagementAddOnSpec, + addonapiv1alpha1.ClusterManagementAddOnStatus](addonClient.AddonV1alpha1().ClusterManagementAddOns()), } configInformers := c.buildConfigInformers(configInformerFactory, configGVRs) @@ -153,7 +154,8 @@ func (c *clusterManagementAddonConfigController) sync(ctx context.Context, syncC return err } - return c.patchConfigReferences(ctx, cma, cmaCopy) + _, err = c.addonPatcher.PatchStatus(ctx, cmaCopy, cmaCopy.Status, cma.Status) + return err } func (c *clusterManagementAddonConfigController) updateConfigSpecHash(cma *addonapiv1alpha1.ClusterManagementAddOn) error { @@ -201,53 +203,6 @@ func (c *clusterManagementAddonConfigController) updateConfigSpecHash(cma *addon return nil } -func (c *clusterManagementAddonConfigController) patchConfigReferences(ctx context.Context, old, new *addonapiv1alpha1.ClusterManagementAddOn) error { - if equality.Semantic.DeepEqual(new.Status.DefaultConfigReferences, old.Status.DefaultConfigReferences) && - equality.Semantic.DeepEqual(new.Status.InstallProgressions, old.Status.InstallProgressions) { - return nil - } - - oldData, err := json.Marshal(&addonapiv1alpha1.ClusterManagementAddOn{ - Status: addonapiv1alpha1.ClusterManagementAddOnStatus{ - DefaultConfigReferences: old.Status.DefaultConfigReferences, - InstallProgressions: old.Status.InstallProgressions, - }, - }) - if err != nil { - return err - } - - newData, err := json.Marshal(&addonapiv1alpha1.ClusterManagementAddOn{ - ObjectMeta: metav1.ObjectMeta{ - UID: new.UID, - ResourceVersion: new.ResourceVersion, - }, - Status: addonapiv1alpha1.ClusterManagementAddOnStatus{ - DefaultConfigReferences: new.Status.DefaultConfigReferences, - InstallProgressions: new.Status.InstallProgressions, - }, - }) - if err != nil { - return err - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) - } - - klog.V(4).Infof("Patching addon %s/%s config reference with %s", new.Namespace, new.Name, string(patchBytes)) - _, err = c.addonClient.AddonV1alpha1().ClusterManagementAddOns().Patch( - ctx, - new.Name, - types.MergePatchType, - patchBytes, - metav1.PatchOptions{}, - "status", - ) - return err -} - func (c *clusterManagementAddonConfigController) getConfigSpecHash(gr addonapiv1alpha1.ConfigGroupResource, cr addonapiv1alpha1.ConfigReferent) (string, error) { lister, ok := c.configListers[schema.GroupResource{Group: gr.Group, Resource: gr.Resource}] diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go index 28a0c6016..650be3931 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/addonmanager/controllers/registration/controller.go @@ -2,16 +2,12 @@ package registration import ( "context" - "encoding/json" "fmt" - jsonpatch "github.com/evanphx/json-patch" - "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/cache" "k8s.io/klog/v2" addonapiv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" @@ -20,6 +16,7 @@ import ( addonlisterv1alpha1 "open-cluster-management.io/api/client/addon/listers/addon/v1alpha1" clusterinformers "open-cluster-management.io/api/client/cluster/informers/externalversions/cluster/v1" clusterlister "open-cluster-management.io/api/client/cluster/listers/cluster/v1" + "open-cluster-management.io/sdk-go/pkg/patcher" "open-cluster-management.io/addon-framework/pkg/agent" "open-cluster-management.io/addon-framework/pkg/basecontroller/factory" @@ -112,6 +109,11 @@ func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory. } managedClusterAddonCopy.Status.SupportedConfigs = supportedConfigs + addonPatcher := patcher.NewPatcher[ + *addonapiv1alpha1.ManagedClusterAddOn, + addonapiv1alpha1.ManagedClusterAddOnSpec, + addonapiv1alpha1.ManagedClusterAddOnStatus](c.addonClient.AddonV1alpha1().ManagedClusterAddOns(clusterName)) + registrationOption := agentAddon.GetAgentAddonOptions().Registration if registrationOption == nil { meta.SetStatusCondition(&managedClusterAddonCopy.Status.Conditions, metav1.Condition{ @@ -120,7 +122,8 @@ func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory. Reason: addonapiv1alpha1.RegistrationAppliedNilRegistration, Message: "Registration of the addon agent is configured", }) - return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) + _, err = addonPatcher.PatchStatus(ctx, managedClusterAddonCopy, managedClusterAddonCopy.Status, managedClusterAddon.Status) + return err } if registrationOption.PermissionConfig != nil { @@ -132,7 +135,8 @@ func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory. Reason: addonapiv1alpha1.RegistrationAppliedSetPermissionFailed, Message: fmt.Sprintf("Failed to set permission for hub agent: %v", err), }) - if patchErr := c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon); patchErr != nil { + if _, patchErr := addonPatcher.PatchStatus( + ctx, managedClusterAddonCopy, managedClusterAddonCopy.Status, managedClusterAddon.Status); patchErr != nil { return patchErr } return err @@ -146,7 +150,8 @@ func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory. Reason: addonapiv1alpha1.RegistrationAppliedNilRegistration, Message: "Registration of the addon agent is configured", }) - return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) + _, err = addonPatcher.PatchStatus(ctx, managedClusterAddonCopy, managedClusterAddonCopy.Status, managedClusterAddon.Status) + return err } configs := registrationOption.CSRConfigurations(managedCluster) @@ -174,52 +179,7 @@ func (c *addonRegistrationController) sync(ctx context.Context, syncCtx factory. Message: "Registration of the addon agent is configured", }) - return c.patchAddonStatus(ctx, managedClusterAddonCopy, managedClusterAddon) -} - -func (c *addonRegistrationController) patchAddonStatus(ctx context.Context, new, old *addonapiv1alpha1.ManagedClusterAddOn) error { - if equality.Semantic.DeepEqual(new.Status.Registrations, old.Status.Registrations) && - equality.Semantic.DeepEqual(new.Status.Conditions, old.Status.Conditions) && - equality.Semantic.DeepEqual(new.Status.SupportedConfigs, old.Status.SupportedConfigs) && - new.Status.Namespace == old.Status.Namespace { - return nil - } - - oldData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ - Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - Registrations: old.Status.Registrations, - Namespace: old.Status.Namespace, - SupportedConfigs: old.Status.SupportedConfigs, - Conditions: old.Status.Conditions, - }, - }) - if err != nil { - return err - } - - newData, err := json.Marshal(&addonapiv1alpha1.ManagedClusterAddOn{ - ObjectMeta: metav1.ObjectMeta{ - UID: new.UID, - ResourceVersion: new.ResourceVersion, - }, - Status: addonapiv1alpha1.ManagedClusterAddOnStatus{ - Registrations: new.Status.Registrations, - Namespace: new.Status.Namespace, - SupportedConfigs: new.Status.SupportedConfigs, - Conditions: new.Status.Conditions, - }, - }) - if err != nil { - return err - } - - patchBytes, err := jsonpatch.CreateMergePatch(oldData, newData) - if err != nil { - return fmt.Errorf("failed to create patch for addon %s: %w", new.Name, err) - } + _, err = addonPatcher.PatchStatus(ctx, managedClusterAddonCopy, managedClusterAddonCopy.Status, managedClusterAddon.Status) - klog.V(2).Infof("Patching addon %s/%s status with %s", new.Namespace, new.Name, string(patchBytes)) - _, err = c.addonClient.AddonV1alpha1().ManagedClusterAddOns(new.Namespace).Patch( - ctx, new.Name, types.MergePatchType, patchBytes, metav1.PatchOptions{}, "status") return err } diff --git a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go index 3bccd522a..d0b381e25 100644 --- a/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go +++ b/vendor/open-cluster-management.io/addon-framework/pkg/manager/controllers/addonconfiguration/graph.go @@ -11,7 +11,8 @@ import ( addonv1alpha1 "open-cluster-management.io/api/addon/v1alpha1" clusterlisterv1beta1 "open-cluster-management.io/api/client/cluster/listers/cluster/v1beta1" clusterv1alpha1 "open-cluster-management.io/api/cluster/v1alpha1" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" + clusterv1sdkalpha1 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1alpha1" + clustersdkv1beta1 "open-cluster-management.io/sdk-go/pkg/apis/cluster/v1beta1" ) // configurationTree is a 2 level snapshot tree on the configuration of addons @@ -27,9 +28,9 @@ type configurationGraph struct { // installStrategyNode is a node in configurationGraph defined by a install strategy type installStrategyNode struct { placementRef addonv1alpha1.PlacementRef - pdTracker *clusterv1beta1.PlacementDecisionClustersTracker + pdTracker *clustersdkv1beta1.PlacementDecisionClustersTracker rolloutStrategy clusterv1alpha1.RolloutStrategy - rolloutResult clusterv1alpha1.RolloutResult + rolloutResult clusterv1sdkalpha1.RolloutResult desiredConfigs addonConfigMap // children keeps a map of addons node as the children of this node children map[string]*addonNode @@ -41,18 +42,18 @@ type installStrategyNode struct { type addonNode struct { desiredConfigs addonConfigMap mca *addonv1alpha1.ManagedClusterAddOn - status *clusterv1alpha1.ClusterRolloutStatus + status *clusterv1sdkalpha1.ClusterRolloutStatus } type addonConfigMap map[addonv1alpha1.ConfigGroupResource]addonv1alpha1.ConfigReference // set addon rollout status func (n *addonNode) setRolloutStatus() { - n.status = &clusterv1alpha1.ClusterRolloutStatus{ClusterName: n.mca.Namespace} + n.status = &clusterv1sdkalpha1.ClusterRolloutStatus{ClusterName: n.mca.Namespace} // desired configs doesn't match actual configs, set to ToApply if len(n.mca.Status.ConfigReferences) != len(n.desiredConfigs) { - n.status.Status = clusterv1alpha1.ToApply + n.status.Status = clusterv1sdkalpha1.ToApply return } @@ -68,30 +69,30 @@ func (n *addonNode) setRolloutStatus() { if desired, ok := n.desiredConfigs[actual.ConfigGroupResource]; ok { // desired config spec hash doesn't match actual, set to ToApply if !equality.Semantic.DeepEqual(desired.DesiredConfig, actual.DesiredConfig) { - n.status.Status = clusterv1alpha1.ToApply + n.status.Status = clusterv1sdkalpha1.ToApply return // desired config spec hash matches actual, but last applied config spec hash doesn't match actual } else if !equality.Semantic.DeepEqual(actual.LastAppliedConfig, actual.DesiredConfig) { switch progressingCond.Reason { case addonv1alpha1.ProgressingReasonInstallFailed, addonv1alpha1.ProgressingReasonUpgradeFailed: - n.status.Status = clusterv1alpha1.Failed + n.status.Status = clusterv1sdkalpha1.Failed n.status.LastTransitionTime = &progressingCond.LastTransitionTime case addonv1alpha1.ProgressingReasonInstalling, addonv1alpha1.ProgressingReasonUpgrading: - n.status.Status = clusterv1alpha1.Progressing + n.status.Status = clusterv1sdkalpha1.Progressing n.status.LastTransitionTime = &progressingCond.LastTransitionTime default: - n.status.Status = clusterv1alpha1.Progressing + n.status.Status = clusterv1sdkalpha1.Progressing } return } } else { - n.status.Status = clusterv1alpha1.ToApply + n.status.Status = clusterv1sdkalpha1.ToApply return } } // succeed - n.status.Status = clusterv1alpha1.Succeeded + n.status.Status = clusterv1sdkalpha1.Succeeded if progressingCond.Reason == addonv1alpha1.ProgressingReasonInstallSucceed || progressingCond.Reason == addonv1alpha1.ProgressingReasonUpgradeSucceed { n.status.LastTransitionTime = &progressingCond.LastTransitionTime } @@ -172,7 +173,7 @@ func (g *configurationGraph) addPlacementNode( } // new decision tracker - pdTracker := clusterv1beta1.NewPlacementDecisionClustersTracker(placement, placementDecisionGetter, nil) + pdTracker := clustersdkv1beta1.NewPlacementDecisionClustersTracker(placement, placementDecisionGetter, nil) // refresh and get existing decision clusters err = pdTracker.Refresh() @@ -307,26 +308,26 @@ func (n *installStrategyNode) addNode(addon *addonv1alpha1.ManagedClusterAddOn) func (n *installStrategyNode) generateRolloutResult() error { if n.placementRef.Name == "" { // default addons - rolloutResult := clusterv1alpha1.RolloutResult{} - rolloutResult.ClustersToRollout = []clusterv1alpha1.ClusterRolloutStatus{} + rolloutResult := clusterv1sdkalpha1.RolloutResult{} + rolloutResult.ClustersToRollout = []clusterv1sdkalpha1.ClusterRolloutStatus{} for name, addon := range n.children { if addon.status == nil { return fmt.Errorf("failed to get rollout status on cluster %v", name) } - if addon.status.Status != clusterv1alpha1.Succeeded { + if addon.status.Status != clusterv1sdkalpha1.Succeeded { rolloutResult.ClustersToRollout = append(rolloutResult.ClustersToRollout, *addon.status) } } n.rolloutResult = rolloutResult } else { // placement addons - rolloutHandler, err := clusterv1alpha1.NewRolloutHandler(n.pdTracker, getClusterRolloutStatus) + rolloutHandler, err := clusterv1sdkalpha1.NewRolloutHandler(n.pdTracker, getClusterRolloutStatus) if err != nil { return err } // get existing addons - existingRolloutClusters := []clusterv1alpha1.ClusterRolloutStatus{} + existingRolloutClusters := []clusterv1sdkalpha1.ClusterRolloutStatus{} for name, addon := range n.children { clsRolloutStatus, err := getClusterRolloutStatus(name, addon) if err != nil { @@ -373,7 +374,7 @@ func (n *installStrategyNode) getAddonsToUpdate() []*addonNode { func (n *installStrategyNode) countAddonUpgradeSucceed() int { count := 0 for _, addon := range n.children { - if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1alpha1.Succeeded { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1sdkalpha1.Succeeded { count += 1 } } @@ -383,7 +384,7 @@ func (n *installStrategyNode) countAddonUpgradeSucceed() int { func (n *installStrategyNode) countAddonUpgrading() int { count := 0 for _, addon := range n.children { - if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1alpha1.Progressing { + if desiredConfigsEqual(addon.desiredConfigs, n.desiredConfigs) && addon.status.Status == clusterv1sdkalpha1.Progressing { count += 1 } } @@ -394,9 +395,9 @@ func (n *installStrategyNode) countAddonTimeOut() int { return len(n.rolloutResult.ClustersTimeOut) } -func getClusterRolloutStatus(clusterName string, addonNode *addonNode) (clusterv1alpha1.ClusterRolloutStatus, error) { +func getClusterRolloutStatus(clusterName string, addonNode *addonNode) (clusterv1sdkalpha1.ClusterRolloutStatus, error) { if addonNode.status == nil { - return clusterv1alpha1.ClusterRolloutStatus{}, fmt.Errorf("failed to get rollout status on cluster %v", clusterName) + return clusterv1sdkalpha1.ClusterRolloutStatus{}, fmt.Errorf("failed to get rollout status on cluster %v", clusterName) } return *addonNode.status, nil } diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go deleted file mode 100644 index fb5398194..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/helpers.go +++ /dev/null @@ -1,617 +0,0 @@ -package v1alpha1 - -import ( - "fmt" - "math" - "regexp" - "sort" - "strconv" - "strings" - "time" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" - "k8s.io/utils/clock" - clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1" -) - -var RolloutClock = clock.Clock(clock.RealClock{}) -var maxTimeDuration = time.Duration(math.MaxInt64) - -// RolloutStatus represents the status of a rollout operation. -type RolloutStatus int - -const ( - // ToApply indicates that the resource's desired status has not been applied yet. - ToApply RolloutStatus = iota - // Progressing indicates that the resource's desired status is applied and last applied status is not updated. - Progressing - // Succeeded indicates that the resource's desired status is applied and last applied status is successful. - Succeeded - // Failed indicates that the resource's desired status is applied and last applied status has failed. - Failed - // TimeOut indicates that the rollout status is progressing or failed and the status remains - // for longer than the timeout, resulting in a timeout status. - TimeOut - // Skip indicates that the rollout should be skipped on this cluster. - Skip -) - -// ClusterRolloutStatus holds the rollout status information for a cluster. -type ClusterRolloutStatus struct { - // cluster name - ClusterName string - // GroupKey represents the cluster group key (optional field). - GroupKey clusterv1beta1.GroupKey - // Status is the required field indicating the rollout status. - Status RolloutStatus - // LastTransitionTime is the last transition time of the rollout status (optional field). - // Used to calculate timeout for progressing and failed status and minimum success time (i.e. soak - // time) for succeeded status. - LastTransitionTime *metav1.Time - // TimeOutTime is the timeout time when the status is progressing or failed (optional field). - TimeOutTime *metav1.Time -} - -// RolloutResult contains list of clusters that are timeOut, removed and required to rollOut. A -// boolean is also provided signaling that the rollout may be shortened due to the number of failed -// clusters exceeding the MaxFailure threshold. -type RolloutResult struct { - // ClustersToRollout is a slice of ClusterRolloutStatus that will be rolled out. - ClustersToRollout []ClusterRolloutStatus - // ClustersTimeOut is a slice of ClusterRolloutStatus that are timeout. - ClustersTimeOut []ClusterRolloutStatus - // ClustersRemoved is a slice of ClusterRolloutStatus that are removed. - ClustersRemoved []ClusterRolloutStatus - // MaxFailureBreach is a boolean signaling whether the rollout was cut short because of failed clusters. - MaxFailureBreach bool - // RecheckAfter is the time duration to recheck the rollout status. - RecheckAfter *time.Duration -} - -// ClusterRolloutStatusFunc defines a function that return the rollout status for a given workload. -// +k8s:deepcopy-gen=false -type ClusterRolloutStatusFunc[T any] func(clusterName string, workload T) (ClusterRolloutStatus, error) - -// The RolloutHandler required workload type (interface/struct) to be assigned to the generic type. -// The custom implementation of the ClusterRolloutStatusFunc is required to use the RolloutHandler. -// +k8s:deepcopy-gen=false -type RolloutHandler[T any] struct { - // placement decision tracker - pdTracker *clusterv1beta1.PlacementDecisionClustersTracker - statusFunc ClusterRolloutStatusFunc[T] -} - -// NewRolloutHandler creates a new RolloutHandler with the given workload type. -func NewRolloutHandler[T any](pdTracker *clusterv1beta1.PlacementDecisionClustersTracker, statusFunc ClusterRolloutStatusFunc[T]) (*RolloutHandler[T], error) { - if pdTracker == nil { - return nil, fmt.Errorf("invalid placement decision tracker %v", pdTracker) - } - - return &RolloutHandler[T]{pdTracker: pdTracker, statusFunc: statusFunc}, nil -} - -// The inputs are a RolloutStrategy and existingClusterRolloutStatus list. -// The existing ClusterRolloutStatus list should be created using the ClusterRolloutStatusFunc to determine the current workload rollout status. -// The existing ClusterRolloutStatus list should contain all the current workloads rollout status such as ToApply, Progressing, Succeeded, -// Failed, TimeOut and Skip in order to determine the added, removed, timeout clusters and next clusters to rollout. -// -// Return the actual RolloutStrategy that take effect and a RolloutResult contain list of ClusterToRollout, ClustersTimeout and ClusterRemoved. -func (r *RolloutHandler[T]) GetRolloutCluster(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { - switch rolloutStrategy.Type { - case All: - return r.getRolloutAllClusters(rolloutStrategy, existingClusterStatus) - case Progressive: - return r.getProgressiveClusters(rolloutStrategy, existingClusterStatus) - case ProgressivePerGroup: - return r.getProgressivePerGroupClusters(rolloutStrategy, existingClusterStatus) - default: - return nil, RolloutResult{}, fmt.Errorf("incorrect rollout strategy type %v", rolloutStrategy.Type) - } -} - -func (r *RolloutHandler[T]) getRolloutAllClusters(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { - // Prepare the rollout strategy - strategy := RolloutStrategy{Type: All} - strategy.All = rolloutStrategy.All.DeepCopy() - if strategy.All == nil { - strategy.All = &RolloutAll{} - } - - // Parse timeout for the rollout - failureTimeout, err := parseTimeout(strategy.All.ProgressDeadline) - if err != nil { - return &strategy, RolloutResult{}, err - } - - allClusterGroups := r.pdTracker.ExistingClusterGroupsBesides() - allClusters := allClusterGroups.GetClusters().UnsortedList() - - // Check for removed Clusters - currentClusterStatus, removedClusterStatus := r.getRemovedClusters(allClusterGroups, existingClusterStatus) - rolloutResult := progressivePerCluster(allClusterGroups, len(allClusters), len(allClusters), time.Duration(0), failureTimeout, currentClusterStatus) - rolloutResult.ClustersRemoved = removedClusterStatus - - return &strategy, rolloutResult, nil -} - -func (r *RolloutHandler[T]) getProgressiveClusters(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { - // Prepare the rollout strategy - strategy := RolloutStrategy{Type: Progressive} - strategy.Progressive = rolloutStrategy.Progressive.DeepCopy() - if strategy.Progressive == nil { - strategy.Progressive = &RolloutProgressive{} - } - minSuccessTime := strategy.Progressive.MinSuccessTime.Duration - - // Parse timeout for non-mandatory decision groups - failureTimeout, err := parseTimeout(strategy.Progressive.ProgressDeadline) - if err != nil { - return &strategy, RolloutResult{}, err - } - - // Check for removed clusters - clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() - currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) - - // Parse maximum failure threshold for continuing the rollout, defaulting to zero - maxFailures, err := calculateRolloutSize(strategy.Progressive.MaxFailures, len(clusterGroups.GetClusters()), 0) - if err != nil { - return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxFailures: %w", err) - } - - // Upgrade mandatory decision groups first - groupKeys := decisionGroupsToGroupKeys(strategy.Progressive.MandatoryDecisionGroups.MandatoryDecisionGroups) - clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - - // Perform progressive rollOut for mandatory decision groups first, tolerating no failures - if len(clusterGroups) > 0 { - rolloutResult := progressivePerGroup( - clusterGroups, intstr.FromInt32(0), minSuccessTime, failureTimeout, currentClusterStatus, - ) - if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { - rolloutResult.ClustersRemoved = removedClusterStatus - return &strategy, rolloutResult, nil - } - } - - // Calculate the size of progressive rollOut - // If the MaxConcurrency not defined, total clusters length is considered as maxConcurrency. - clusterGroups = r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) - rolloutSize, err := calculateRolloutSize(strategy.Progressive.MaxConcurrency, len(clusterGroups.GetClusters()), len(clusterGroups.GetClusters())) - if err != nil { - return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxConcurrency: %w", err) - } - - // Rollout the remaining clusters - rolloutResult := progressivePerCluster(clusterGroups, rolloutSize, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) - rolloutResult.ClustersRemoved = removedClusterStatus - - return &strategy, rolloutResult, nil -} - -func (r *RolloutHandler[T]) getProgressivePerGroupClusters(rolloutStrategy RolloutStrategy, existingClusterStatus []ClusterRolloutStatus) (*RolloutStrategy, RolloutResult, error) { - // Prepare the rollout strategy - strategy := RolloutStrategy{Type: ProgressivePerGroup} - strategy.ProgressivePerGroup = rolloutStrategy.ProgressivePerGroup.DeepCopy() - if strategy.ProgressivePerGroup == nil { - strategy.ProgressivePerGroup = &RolloutProgressivePerGroup{} - } - minSuccessTime := strategy.ProgressivePerGroup.MinSuccessTime.Duration - maxFailures := strategy.ProgressivePerGroup.MaxFailures - - // Parse timeout for non-mandatory decision groups - failureTimeout, err := parseTimeout(strategy.ProgressivePerGroup.ProgressDeadline) - if err != nil { - return &strategy, RolloutResult{}, err - } - - // Check format of MaxFailures--this value will be re-parsed and used in progressivePerGroup() - err = parseRolloutSize(maxFailures) - if err != nil { - return &strategy, RolloutResult{}, fmt.Errorf("failed to parse the provided maxFailures: %w", err) - } - - // Check for removed Clusters - clusterGroups := r.pdTracker.ExistingClusterGroupsBesides() - currentClusterStatus, removedClusterStatus := r.getRemovedClusters(clusterGroups, existingClusterStatus) - - // Upgrade mandatory decision groups first - mandatoryDecisionGroups := strategy.ProgressivePerGroup.MandatoryDecisionGroups.MandatoryDecisionGroups - groupKeys := decisionGroupsToGroupKeys(mandatoryDecisionGroups) - clusterGroups = r.pdTracker.ExistingClusterGroups(groupKeys...) - - // Perform progressive rollout per group for mandatory decision groups first, tolerating no failures - if len(clusterGroups) > 0 { - rolloutResult := progressivePerGroup(clusterGroups, intstr.FromInt32(0), minSuccessTime, failureTimeout, currentClusterStatus) - - if len(rolloutResult.ClustersToRollout) > 0 || len(rolloutResult.ClustersTimeOut) > 0 { - rolloutResult.ClustersRemoved = removedClusterStatus - return &strategy, rolloutResult, nil - } - } - - // RollOut the rest of the decision groups - restClusterGroups := r.pdTracker.ExistingClusterGroupsBesides(groupKeys...) - - // Perform progressive rollout per group for the remaining decision groups - rolloutResult := progressivePerGroup(restClusterGroups, maxFailures, minSuccessTime, failureTimeout, currentClusterStatus) - rolloutResult.ClustersRemoved = removedClusterStatus - - return &strategy, rolloutResult, nil -} - -func (r *RolloutHandler[T]) getRemovedClusters(clusterGroupsMap clusterv1beta1.ClusterGroupsMap, existingClusterStatus []ClusterRolloutStatus) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { - var currentClusterStatus, removedClusterStatus []ClusterRolloutStatus - - clusters := clusterGroupsMap.GetClusters().UnsortedList() - for _, clusterStatus := range existingClusterStatus { - exist := false - for _, cluster := range clusters { - if clusterStatus.ClusterName == cluster { - exist = true - currentClusterStatus = append(currentClusterStatus, clusterStatus) - break - } - } - - if !exist { - removedClusterStatus = append(removedClusterStatus, clusterStatus) - } - } - return currentClusterStatus, removedClusterStatus -} - -// progressivePerCluster parses the rollout status for the given clusters and returns the rollout -// result. It sorts the clusters alphabetically in order to determine the rollout groupings and the -// rollout group size is determined by the MaxConcurrency setting. -func progressivePerCluster( - clusterGroupsMap clusterv1beta1.ClusterGroupsMap, - rolloutSize int, - maxFailures int, - minSuccessTime time.Duration, - timeout time.Duration, - existingClusterStatus []ClusterRolloutStatus, -) RolloutResult { - var rolloutClusters, timeoutClusters []ClusterRolloutStatus - existingClusters := make(map[string]bool) - failureCount := 0 - failureBreach := false - - // Sort existing cluster status for consistency in case ToApply was determined by the workload applier - sort.Slice(existingClusterStatus, func(i, j int) bool { - return existingClusterStatus[i].ClusterName < existingClusterStatus[j].ClusterName - }) - - // Collect existing cluster status and determine any TimeOut statuses - for _, status := range existingClusterStatus { - if status.ClusterName == "" { - continue - } - - existingClusters[status.ClusterName] = true - - // If there was a breach of MaxFailures, only handle clusters that have already had workload applied - if !failureBreach || failureBreach && status.Status != ToApply { - // For progress per cluster, the length of existing `rolloutClusters` will be compared with the - // target rollout size to determine whether to return or not first. - // The timeoutClusters, as well as failed clusters will be counted into failureCount, the next rollout - // will stop if failureCount > maxFailures. - rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) - } - - // Keep track of TimeOut or Failed clusters and check total against MaxFailures - if status.Status == TimeOut || status.Status == Failed { - failureCount++ - - failureBreach = failureCount > maxFailures - } - - // Return if the list of exsiting rollout clusters has reached the target rollout size - if len(rolloutClusters) >= rolloutSize { - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, - MaxFailureBreach: failureBreach, - RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), - } - } - } - - // Return if the exsiting rollout clusters maxFailures is breached. - if failureBreach { - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, - MaxFailureBreach: failureBreach, - RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), - } - } - - clusters := clusterGroupsMap.GetClusters().UnsortedList() - clusterToGroupKey := clusterGroupsMap.ClusterToGroupKey() - - // Sort the clusters in alphabetical order to ensure consistency. - sort.Strings(clusters) - - // Amend clusters to the rollout up to the rollout size - for _, cluster := range clusters { - if existingClusters[cluster] { - continue - } - - // For clusters without a rollout status, set the status to ToApply - status := ClusterRolloutStatus{ - ClusterName: cluster, - Status: ToApply, - GroupKey: clusterToGroupKey[cluster], - } - rolloutClusters = append(rolloutClusters, status) - - // Return if the list of rollout clusters has reached the target rollout size - if len(rolloutClusters) >= rolloutSize { - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, - RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), - } - } - } - - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, - RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), - } -} - -func progressivePerGroup( - clusterGroupsMap clusterv1beta1.ClusterGroupsMap, - maxFailures intstr.IntOrString, - minSuccessTime time.Duration, - timeout time.Duration, - existingClusterStatus []ClusterRolloutStatus, -) RolloutResult { - var rolloutClusters, timeoutClusters []ClusterRolloutStatus - existingClusters := make(map[string]RolloutStatus) - - // Collect existing cluster status and determine any TimeOut statuses - for _, status := range existingClusterStatus { - if status.ClusterName == "" { - continue - } - - // ToApply will be reconsidered in the decisionGroups iteration. - if status.Status != ToApply { - // For progress per group, the existing rollout clusters and timeout clusters status will be recored in existingClusters first, - // then go through group by group. - rolloutClusters, timeoutClusters = determineRolloutStatus(&status, minSuccessTime, timeout, rolloutClusters, timeoutClusters) - existingClusters[status.ClusterName] = status.Status - } - } - - totalFailureCount := 0 - failureBreach := false - clusterGroupKeys := clusterGroupsMap.GetOrderedGroupKeys() - for _, key := range clusterGroupKeys { - groupFailureCount := 0 - if subclusters, ok := clusterGroupsMap[key]; ok { - // Calculate the max failure threshold for the group--the returned error was checked - // previously, so it's ignored here - maxGroupFailures, _ := calculateRolloutSize(maxFailures, len(subclusters), 0) - // Iterate through clusters in the group - clusters := subclusters.UnsortedList() - sort.Strings(clusters) - for _, cluster := range clusters { - if status, ok := existingClusters[cluster]; ok { - // Keep track of TimeOut or Failed clusters and check total against MaxFailures - if status == TimeOut || status == Failed { - groupFailureCount++ - - failureBreach = groupFailureCount > maxGroupFailures - } - - continue - } - - status := ClusterRolloutStatus{ - ClusterName: cluster, - Status: ToApply, - GroupKey: key, - } - rolloutClusters = append(rolloutClusters, status) - } - - totalFailureCount += groupFailureCount - - // As it is perGroup, return if there are clusters to rollOut that aren't - // Failed/Timeout, or there was a breach of the MaxFailure configuration - if len(rolloutClusters)-totalFailureCount > 0 || failureBreach { - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, - MaxFailureBreach: failureBreach, - RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), - } - } - } - } - - return RolloutResult{ - ClustersToRollout: rolloutClusters, - ClustersTimeOut: timeoutClusters, - MaxFailureBreach: failureBreach, - RecheckAfter: minRecheckAfter(rolloutClusters, minSuccessTime), - } -} - -// determineRolloutStatus checks whether a cluster should continue its rollout based on its current -// status and timeout. The function updates the cluster status and appends it to the expected slice. -// Nothing is done for TimeOut or Skip statuses. -// -// The minSuccessTime parameter is utilized for handling succeeded clusters that are still within -// the configured soak time, in which case the cluster will be returned as a rolloutCluster. -// -// The timeout parameter is utilized for handling progressing and failed statuses and any other -// unknown status: -// 1. If timeout is set to None (maxTimeDuration), the function will append the clusterStatus to -// the rollOut Clusters. -// 2. If timeout is set to 0, the function append the clusterStatus to the timeOut clusters. -func determineRolloutStatus( - status *ClusterRolloutStatus, - minSuccessTime time.Duration, - timeout time.Duration, - rolloutClusters []ClusterRolloutStatus, - timeoutClusters []ClusterRolloutStatus, -) ([]ClusterRolloutStatus, []ClusterRolloutStatus) { - - switch status.Status { - case ToApply: - rolloutClusters = append(rolloutClusters, *status) - case Succeeded: - // If the cluster succeeded but is still within the MinSuccessTime (i.e. "soak" time), - // still add it to the list of rolloutClusters - minSuccessTimeTime := getTimeOutTime(status.LastTransitionTime, minSuccessTime) - if RolloutClock.Now().Before(minSuccessTimeTime.Time) { - rolloutClusters = append(rolloutClusters, *status) - } - - return rolloutClusters, timeoutClusters - case TimeOut, Skip: - return rolloutClusters, timeoutClusters - default: // For progressing, failed, or unknown status. - timeOutTime := getTimeOutTime(status.LastTransitionTime, timeout) - status.TimeOutTime = timeOutTime - // check if current time is before the timeout time - if timeOutTime == nil || RolloutClock.Now().Before(timeOutTime.Time) { - rolloutClusters = append(rolloutClusters, *status) - } else { - status.Status = TimeOut - timeoutClusters = append(timeoutClusters, *status) - } - } - - return rolloutClusters, timeoutClusters -} - -// getTimeOutTime calculates the timeout time given a start time and duration, instantiating the -// RolloutClock if a start time isn't provided. -func getTimeOutTime(startTime *metav1.Time, timeout time.Duration) *metav1.Time { - var timeoutTime time.Time - // if timeout is not set (default to maxTimeDuration), the timeout time should not be set - if timeout == maxTimeDuration { - return nil - } - if startTime == nil { - timeoutTime = RolloutClock.Now().Add(timeout) - } else { - timeoutTime = startTime.Add(timeout) - } - return &metav1.Time{Time: timeoutTime} -} - -// calculateRolloutSize calculates the maximum portion from a total number of clusters by parsing a -// maximum threshold value that can be either a quantity or a percent, returning an error if the -// threshold can't be parsed to either of those. -func calculateRolloutSize(maxThreshold intstr.IntOrString, total int, defaultThreshold int) (int, error) { - length := defaultThreshold - - // Verify the format of the IntOrString value - err := parseRolloutSize(maxThreshold) - if err != nil { - return length, err - } - - // Calculate the rollout size--errors are ignored because - // they were handled in parseRolloutSize() previously - switch maxThreshold.Type { - case intstr.Int: - length = maxThreshold.IntValue() - case intstr.String: - str := maxThreshold.StrVal - f, _ := strconv.ParseFloat(str[:len(str)-1], 64) - length = int(math.Ceil(f / 100 * float64(total))) - } - - if length <= 0 || length > total { - length = defaultThreshold - } - - return length, nil -} - -// parseRolloutSize parses a maximum threshold value that can be either a quantity or a percent, -// returning an error if the threshold can't be parsed to either of those. -func parseRolloutSize(maxThreshold intstr.IntOrString) error { - - switch maxThreshold.Type { - case intstr.Int: - break - case intstr.String: - str := maxThreshold.StrVal - if strings.HasSuffix(str, "%") { - _, err := strconv.ParseFloat(str[:len(str)-1], 64) - if err != nil { - return err - } - } else { - return fmt.Errorf("'%s' is an invalid maximum threshold value: string is not a percentage", str) - } - default: - return fmt.Errorf("invalid maximum threshold type %+v", maxThreshold.Type) - } - - return nil -} - -// ParseTimeout will return the maximum possible duration given "None", an empty string, or an -// invalid duration, otherwise parsing and returning the duration provided. -func parseTimeout(timeoutStr string) (time.Duration, error) { - // Define the regex pattern to match the timeout string - pattern := "^(([0-9])+[h|m|s])|None$" - regex := regexp.MustCompile(pattern) - - if timeoutStr == "None" || timeoutStr == "" { - // If the timeout is "None" or empty, return the maximum duration - return maxTimeDuration, nil - } - - // Check if the timeout string matches the pattern - if !regex.MatchString(timeoutStr) { - return maxTimeDuration, fmt.Errorf("invalid timeout format") - } - - return time.ParseDuration(timeoutStr) -} - -func decisionGroupsToGroupKeys(decisionsGroup []MandatoryDecisionGroup) []clusterv1beta1.GroupKey { - var result []clusterv1beta1.GroupKey - for _, d := range decisionsGroup { - gk := clusterv1beta1.GroupKey{} - // GroupName is considered first to select the decisionGroups then GroupIndex. - if d.GroupName != "" { - gk.GroupName = d.GroupName - } else { - gk.GroupIndex = d.GroupIndex - } - result = append(result, gk) - } - return result -} - -func minRecheckAfter(rolloutClusters []ClusterRolloutStatus, minSuccessTime time.Duration) *time.Duration { - var minRecheckAfter *time.Duration - for _, r := range rolloutClusters { - if r.TimeOutTime != nil { - timeOut := r.TimeOutTime.Sub(RolloutClock.Now()) - if minRecheckAfter == nil || *minRecheckAfter > timeOut { - minRecheckAfter = &timeOut - } - } - } - if minSuccessTime != 0 && (minRecheckAfter == nil || minSuccessTime < *minRecheckAfter) { - minRecheckAfter = &minSuccessTime - } - - return minRecheckAfter -} diff --git a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go index 85e494258..e40f74e4d 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1alpha1/zz_generated.deepcopy.go @@ -1,14 +1,13 @@ //go:build !ignore_autogenerated // +build !ignore_autogenerated -// Code generated by controller-gen. DO NOT EDIT. +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - timex "time" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -17,6 +16,7 @@ func (in *AddOnPlacementScore) DeepCopyInto(out *AddOnPlacementScore) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Status.DeepCopyInto(&out.Status) + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScore. @@ -40,6 +40,7 @@ func (in *AddOnPlacementScore) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AddOnPlacementScoreItem) DeepCopyInto(out *AddOnPlacementScoreItem) { *out = *in + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScoreItem. @@ -64,6 +65,7 @@ func (in *AddOnPlacementScoreList) DeepCopyInto(out *AddOnPlacementScoreList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScoreList. @@ -103,6 +105,7 @@ func (in *AddOnPlacementScoreStatus) DeepCopyInto(out *AddOnPlacementScoreStatus in, out := &in.ValidUntil, &out.ValidUntil *out = (*in).DeepCopy() } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AddOnPlacementScoreStatus. @@ -121,6 +124,7 @@ func (in *ClusterClaim) DeepCopyInto(out *ClusterClaim) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) out.Spec = in.Spec + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaim. @@ -153,6 +157,7 @@ func (in *ClusterClaimList) DeepCopyInto(out *ClusterClaimList) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimList. @@ -176,6 +181,7 @@ func (in *ClusterClaimList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterClaimSpec) DeepCopyInto(out *ClusterClaimSpec) { *out = *in + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterClaimSpec. @@ -188,33 +194,10 @@ func (in *ClusterClaimSpec) DeepCopy() *ClusterClaimSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRolloutStatus) DeepCopyInto(out *ClusterRolloutStatus) { - *out = *in - out.GroupKey = in.GroupKey - if in.LastTransitionTime != nil { - in, out := &in.LastTransitionTime, &out.LastTransitionTime - *out = (*in).DeepCopy() - } - if in.TimeOutTime != nil { - in, out := &in.TimeOutTime, &out.TimeOutTime - *out = (*in).DeepCopy() - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRolloutStatus. -func (in *ClusterRolloutStatus) DeepCopy() *ClusterRolloutStatus { - if in == nil { - return nil - } - out := new(ClusterRolloutStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MandatoryDecisionGroup) DeepCopyInto(out *MandatoryDecisionGroup) { *out = *in + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MandatoryDecisionGroup. @@ -235,6 +218,7 @@ func (in *MandatoryDecisionGroups) DeepCopyInto(out *MandatoryDecisionGroups) { *out = make([]MandatoryDecisionGroup, len(*in)) copy(*out, *in) } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MandatoryDecisionGroups. @@ -251,6 +235,7 @@ func (in *MandatoryDecisionGroups) DeepCopy() *MandatoryDecisionGroups { func (in *RolloutAll) DeepCopyInto(out *RolloutAll) { *out = *in out.RolloutConfig = in.RolloutConfig + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutAll. @@ -268,6 +253,7 @@ func (in *RolloutConfig) DeepCopyInto(out *RolloutConfig) { *out = *in out.MinSuccessTime = in.MinSuccessTime out.MaxFailures = in.MaxFailures + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutConfig. @@ -286,6 +272,7 @@ func (in *RolloutProgressive) DeepCopyInto(out *RolloutProgressive) { out.RolloutConfig = in.RolloutConfig in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) out.MaxConcurrency = in.MaxConcurrency + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressive. @@ -303,6 +290,7 @@ func (in *RolloutProgressivePerGroup) DeepCopyInto(out *RolloutProgressivePerGro *out = *in out.RolloutConfig = in.RolloutConfig in.MandatoryDecisionGroups.DeepCopyInto(&out.MandatoryDecisionGroups) + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutProgressivePerGroup. @@ -315,47 +303,6 @@ func (in *RolloutProgressivePerGroup) DeepCopy() *RolloutProgressivePerGroup { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RolloutResult) DeepCopyInto(out *RolloutResult) { - *out = *in - if in.ClustersToRollout != nil { - in, out := &in.ClustersToRollout, &out.ClustersToRollout - *out = make([]ClusterRolloutStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ClustersTimeOut != nil { - in, out := &in.ClustersTimeOut, &out.ClustersTimeOut - *out = make([]ClusterRolloutStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ClustersRemoved != nil { - in, out := &in.ClustersRemoved, &out.ClustersRemoved - *out = make([]ClusterRolloutStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.RecheckAfter != nil { - in, out := &in.RecheckAfter, &out.RecheckAfter - *out = new(timex.Duration) - **out = **in - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutResult. -func (in *RolloutResult) DeepCopy() *RolloutResult { - if in == nil { - return nil - } - out := new(RolloutResult) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) { *out = *in @@ -374,6 +321,7 @@ func (in *RolloutStrategy) DeepCopyInto(out *RolloutStrategy) { *out = new(RolloutProgressivePerGroup) (*in).DeepCopyInto(*out) } + return } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RolloutStrategy. diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go deleted file mode 100644 index 503dbc7b6..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/helpers.go +++ /dev/null @@ -1,274 +0,0 @@ -package v1beta1 - -import ( - "fmt" - "sort" - "strconv" - "sync" - - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/util/sets" -) - -type PlacementDecisionGetter interface { - List(selector labels.Selector, namespace string) (ret []*PlacementDecision, err error) -} - -// +k8s:deepcopy-gen=false -type PlacementDecisionClustersTracker struct { - placement *Placement - placementDecisionGetter PlacementDecisionGetter - existingScheduledClusterGroups ClusterGroupsMap - clusterGroupsIndexToName map[int32]string - clusterGroupsNameToIndex map[string][]int32 - lock sync.RWMutex -} - -// +k8s:deepcopy-gen=false -type GroupKey struct { - GroupName string `json:"groupName,omitempty"` - GroupIndex int32 `json:"groupIndex,omitempty"` -} - -// NewPlacementDecisionClustersTracker initializes a PlacementDecisionClustersTracker -// using existing clusters. Clusters are added to the default cluster group with index 0. -// Set existingScheduledClusters to nil if there are no existing clusters. -func NewPlacementDecisionClustersTracker(placement *Placement, pdl PlacementDecisionGetter, existingScheduledClusters sets.Set[string]) *PlacementDecisionClustersTracker { - pdct := &PlacementDecisionClustersTracker{ - placement: placement, - placementDecisionGetter: pdl, - existingScheduledClusterGroups: ClusterGroupsMap{{GroupIndex: 0}: existingScheduledClusters}, - } - - // Generate group name indices for the tracker. - pdct.generateGroupsNameIndex() - return pdct -} - -// NewPlacementDecisionClustersTrackerWithGroups initializes a PlacementDecisionClustersTracker -// using existing cluster groups. Set existingScheduledClusterGroups to nil if no groups exist. -func NewPlacementDecisionClustersTrackerWithGroups(placement *Placement, pdl PlacementDecisionGetter, existingScheduledClusterGroups ClusterGroupsMap) *PlacementDecisionClustersTracker { - pdct := &PlacementDecisionClustersTracker{ - placement: placement, - placementDecisionGetter: pdl, - existingScheduledClusterGroups: existingScheduledClusterGroups, - } - - // Generate group name indices for the tracker. - pdct.generateGroupsNameIndex() - return pdct -} - -// Refresh refreshes the tracker's decisionClusters. -func (pdct *PlacementDecisionClustersTracker) Refresh() error { - pdct.lock.Lock() - defer pdct.lock.Unlock() - - if pdct.placement == nil || pdct.placementDecisionGetter == nil { - return nil - } - - // Get the generated PlacementDecisions - decisionSelector := labels.SelectorFromSet(labels.Set{ - PlacementLabel: pdct.placement.Name, - }) - decisions, err := pdct.placementDecisionGetter.List(decisionSelector, pdct.placement.Namespace) - if err != nil { - return fmt.Errorf("failed to list PlacementDecisions: %w", err) - } - - // Get the decision cluster names and groups - newScheduledClusterGroups := map[GroupKey]sets.Set[string]{} - for _, d := range decisions { - groupKey, err := parseGroupKeyFromDecision(d) - if err != nil { - return err - } - - if _, exist := newScheduledClusterGroups[groupKey]; !exist { - newScheduledClusterGroups[groupKey] = sets.New[string]() - } - - for _, sd := range d.Status.Decisions { - newScheduledClusterGroups[groupKey].Insert(sd.ClusterName) - } - } - - // Update the existing decision cluster groups - pdct.existingScheduledClusterGroups = newScheduledClusterGroups - pdct.generateGroupsNameIndex() - - return nil -} - -// GetClusterChanges updates the tracker's decisionClusters and returns added and deleted cluster names. -func (pdct *PlacementDecisionClustersTracker) GetClusterChanges() (sets.Set[string], sets.Set[string], error) { - // Get existing clusters - existingScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters() - - // Refresh clusters - err := pdct.Refresh() - if err != nil { - return nil, nil, err - } - newScheduledClusters := pdct.existingScheduledClusterGroups.GetClusters() - - // Compare the difference - added := newScheduledClusters.Difference(existingScheduledClusters) - deleted := existingScheduledClusters.Difference(newScheduledClusters) - - return added, deleted, nil -} - -func (pdct *PlacementDecisionClustersTracker) generateGroupsNameIndex() { - pdct.clusterGroupsIndexToName = map[int32]string{} - pdct.clusterGroupsNameToIndex = map[string][]int32{} - - for groupkey := range pdct.existingScheduledClusterGroups { - // index to name - pdct.clusterGroupsIndexToName[groupkey.GroupIndex] = groupkey.GroupName - // name to index - if index, exist := pdct.clusterGroupsNameToIndex[groupkey.GroupName]; exist { - pdct.clusterGroupsNameToIndex[groupkey.GroupName] = append(index, groupkey.GroupIndex) - } else { - pdct.clusterGroupsNameToIndex[groupkey.GroupName] = []int32{groupkey.GroupIndex} - } - } - - // sort index order - for _, index := range pdct.clusterGroupsNameToIndex { - sort.Slice(index, func(i, j int) bool { - return index[i] < index[j] - }) - } -} - -// ExistingClusterGroups returns the tracker's existing decision cluster groups for groups listed in groupKeys. -// Return empty set when groupKeys is empty. -func (pdct *PlacementDecisionClustersTracker) ExistingClusterGroups(groupKeys ...GroupKey) ClusterGroupsMap { - pdct.lock.RLock() - defer pdct.lock.RUnlock() - - resultClusterGroups := make(map[GroupKey]sets.Set[string]) - - includeGroupKeys := pdct.fulfillGroupKeys(groupKeys) - for _, groupKey := range includeGroupKeys { - if clusters, found := pdct.existingScheduledClusterGroups[groupKey]; found { - resultClusterGroups[groupKey] = clusters - } - } - - return resultClusterGroups -} - -// ExistingClusterGroupsBesides returns the tracker's existing decision cluster groups except cluster groups listed in groupKeys. -// Return all the clusters when groupKeys is empty. -func (pdct *PlacementDecisionClustersTracker) ExistingClusterGroupsBesides(groupKeys ...GroupKey) ClusterGroupsMap { - pdct.lock.RLock() - defer pdct.lock.RUnlock() - - resultClusterGroups := make(map[GroupKey]sets.Set[string]) - - excludeGroupKeys := pdct.fulfillGroupKeys(groupKeys) - includeGroupKeys := pdct.getGroupKeysBesides(excludeGroupKeys) - for _, groupKey := range includeGroupKeys { - if clusters, found := pdct.existingScheduledClusterGroups[groupKey]; found { - resultClusterGroups[groupKey] = clusters - } - } - - return resultClusterGroups -} - -// Fulfill the expect groupkeys with group name or group index, the returned groupkeys are ordered by input group name then group index. -// For example, the input is []GroupKey{{GroupName: "group1"}, {GroupIndex: 2}}, -// the returned is []GroupKey{{GroupName: "group1", GroupIndex: 0}, {GroupName: "group1", GroupIndex: 1}, {GroupName: "group2", GroupIndex: 2}} -func (pdct *PlacementDecisionClustersTracker) fulfillGroupKeys(groupKeys []GroupKey) []GroupKey { - fulfilledGroupKeys := []GroupKey{} - for _, gk := range groupKeys { - if gk.GroupName != "" { - if indexes, exist := pdct.clusterGroupsNameToIndex[gk.GroupName]; exist { - for _, groupIndex := range indexes { - fulfilledGroupKeys = append(fulfilledGroupKeys, GroupKey{GroupName: gk.GroupName, GroupIndex: groupIndex}) - } - } - } else { - if groupName, exist := pdct.clusterGroupsIndexToName[gk.GroupIndex]; exist { - fulfilledGroupKeys = append(fulfilledGroupKeys, GroupKey{GroupName: groupName, GroupIndex: gk.GroupIndex}) - } - } - } - return fulfilledGroupKeys -} - -func (pdct *PlacementDecisionClustersTracker) getGroupKeysBesides(groupKeyToExclude []GroupKey) []GroupKey { - groupKey := []GroupKey{} - for i := 0; i < len(pdct.clusterGroupsIndexToName); i++ { - gKey := GroupKey{GroupName: pdct.clusterGroupsIndexToName[int32(i)], GroupIndex: int32(i)} - if !containsGroupKey(groupKeyToExclude, gKey) { - groupKey = append(groupKey, gKey) - } - } - - return groupKey -} - -// ClusterGroupsMap is a custom type representing a map of group keys to sets of cluster names. -type ClusterGroupsMap map[GroupKey]sets.Set[string] - -// GetOrderedGroupKeys returns an ordered slice of GroupKeys, sorted by group index. -func (g ClusterGroupsMap) GetOrderedGroupKeys() []GroupKey { - groupKeys := []GroupKey{} - for groupKey := range g { - groupKeys = append(groupKeys, groupKey) - } - - // sort by group index index - sort.Slice(groupKeys, func(i, j int) bool { - return groupKeys[i].GroupIndex < groupKeys[j].GroupIndex - }) - - return groupKeys -} - -// GetClusters returns a set containing all clusters from all group sets. -func (g ClusterGroupsMap) GetClusters() sets.Set[string] { - clusterSet := sets.New[string]() - for _, clusterGroup := range g { - clusterSet = clusterSet.Union(clusterGroup) - } - return clusterSet -} - -// ClusterToGroupKey returns a mapping of cluster names to their respective group keys. -func (g ClusterGroupsMap) ClusterToGroupKey() map[string]GroupKey { - clusterToGroupKey := map[string]GroupKey{} - - for groupKey, clusterGroup := range g { - for c := range clusterGroup { - clusterToGroupKey[c] = groupKey - } - } - - return clusterToGroupKey -} - -// Helper function to check if a groupKey is present in the groupKeys slice. -func containsGroupKey(groupKeys []GroupKey, groupKey GroupKey) bool { - for _, gk := range groupKeys { - if gk == groupKey { - return true - } - } - return false -} - -func parseGroupKeyFromDecision(d *PlacementDecision) (GroupKey, error) { - groupName := d.Labels[DecisionGroupNameLabel] - groupIndex := d.Labels[DecisionGroupIndexLabel] - groupIndexNum, err := strconv.Atoi(groupIndex) - if err != nil { - return GroupKey{}, fmt.Errorf("incorrect group index: %w", err) - } - return GroupKey{GroupName: groupName, GroupIndex: int32(groupIndexNum)}, nil -} diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go index 347b89457..cf2c2f1f0 100644 --- a/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/cluster/v1beta1/zz_generated.deepcopy.go @@ -8,7 +8,6 @@ package v1beta1 import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" - sets "k8s.io/apimachinery/pkg/util/sets" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. @@ -66,38 +65,6 @@ func (in *ClusterDecision) DeepCopy() *ClusterDecision { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ClusterGroupsMap) DeepCopyInto(out *ClusterGroupsMap) { - { - in := &in - *out = make(ClusterGroupsMap, len(*in)) - for key, val := range *in { - var outVal map[string]sets.Empty - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(sets.Set[string], len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - (*out)[key] = outVal - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterGroupsMap. -func (in ClusterGroupsMap) DeepCopy() ClusterGroupsMap { - if in == nil { - return nil - } - out := new(ClusterGroupsMap) - in.DeepCopyInto(out) - return *out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterPredicate) DeepCopyInto(out *ClusterPredicate) { *out = *in diff --git a/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go b/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go deleted file mode 100644 index 25847109d..000000000 --- a/vendor/open-cluster-management.io/api/cluster/v1beta2/helpers.go +++ /dev/null @@ -1,110 +0,0 @@ -package v1beta2 - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - v1 "open-cluster-management.io/api/cluster/v1" -) - -type ManagedClustersGetter interface { - List(selector labels.Selector) (ret []*v1.ManagedCluster, err error) -} - -type ManagedClusterSetsGetter interface { - List(selector labels.Selector) (ret []*ManagedClusterSet, err error) -} - -type ManagedClusterSetBindingsGetter interface { - List(namespace string, selector labels.Selector) (ret []*ManagedClusterSetBinding, err error) -} - -// GetClustersFromClusterSet return the ManagedClusterSet's managedClusters -func GetClustersFromClusterSet(clusterSet *ManagedClusterSet, - clustersGetter ManagedClustersGetter) ([]*v1.ManagedCluster, error) { - var clusters []*v1.ManagedCluster - - if clusterSet == nil { - return nil, nil - } - - clusterSelector, err := BuildClusterSelector(clusterSet) - if err != nil { - return nil, err - } - if clusterSelector == nil { - return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) - } - clusters, err = clustersGetter.List(clusterSelector) - if err != nil { - return nil, fmt.Errorf("failed to list ManagedClusters: %w", err) - } - return clusters, nil -} - -// GetClusterSetsOfClusterByCluster return the managedClusterSets of a managedCluster -func GetClusterSetsOfCluster(cluster *v1.ManagedCluster, - clusterSetsGetter ManagedClusterSetsGetter) ([]*ManagedClusterSet, error) { - var returnClusterSets []*ManagedClusterSet - - if cluster == nil { - return nil, nil - } - - allClusterSets, err := clusterSetsGetter.List(labels.Everything()) - if err != nil { - return nil, err - } - for _, clusterSet := range allClusterSets { - clusterSelector, err := BuildClusterSelector(clusterSet) - if err != nil { - return nil, err - } - if clusterSelector == nil { - return nil, fmt.Errorf("failed to build ClusterSelector with clusterSet: %v", clusterSet) - } - if clusterSelector.Matches(labels.Set(cluster.Labels)) { - returnClusterSets = append(returnClusterSets, clusterSet) - } - } - return returnClusterSets, nil -} - -func BuildClusterSelector(clusterSet *ManagedClusterSet) (labels.Selector, error) { - if clusterSet == nil { - return nil, nil - } - selectorType := clusterSet.Spec.ClusterSelector.SelectorType - - switch selectorType { - case "", ExclusiveClusterSetLabel: - return labels.SelectorFromSet(labels.Set{ - ClusterSetLabel: clusterSet.Name, - }), nil - case LabelSelector: - return metav1.LabelSelectorAsSelector(clusterSet.Spec.ClusterSelector.LabelSelector) - default: - return nil, fmt.Errorf("selectorType is not right: %s", clusterSet.Spec.ClusterSelector.SelectorType) - } -} - -// GetBoundManagedClusterSetBindings returns all bindings that are bounded to clustersets in the given namespace. -func GetBoundManagedClusterSetBindings(namespace string, - clusterSetBindingsGetter ManagedClusterSetBindingsGetter) ([]*ManagedClusterSetBinding, error) { - // get all clusterset bindings under the namespace - bindings, err := clusterSetBindingsGetter.List(namespace, labels.Everything()) - if err != nil { - return nil, err - } - - boundBindings := []*ManagedClusterSetBinding{} - for _, binding := range bindings { - if meta.IsStatusConditionTrue(binding.Status.Conditions, ClusterSetBindingBoundType) { - boundBindings = append(boundBindings, binding) - } - } - - return boundBindings, nil -} diff --git a/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml index 9ccbdae5d..071da4342 100644 --- a/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/crdsv1beta1/0001_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -156,15 +156,53 @@ spec: description: RegistrationImagePullSpec represents the desired image configuration of registration agent. quay.io/open-cluster-management.io/registration:latest will be used if unspecified. type: string resourceRequirement: - description: ResourceRequirement specify QoS classes of klusterlet deployment + description: ResourceRequirement specify QoS classes of deployments managed by klusterlet. It applies to all the containers in the deployments. type: object properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and limits when Type is ResourceQosClassResourceRequirement + type: object + properties: + claims: + description: "Claims lists the names of resources, defined in spec.resourceClaims, that are used by this container. \n This is an alpha field and requires enabling the DynamicResourceAllocation feature gate. \n This field is immutable. It can only be set for containers." + type: array + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + type: object + required: + - name + properties: + name: + description: Name must match the name of one entry in pod.spec.resourceClaims of the Pod where this field is used. It makes that resource available inside a container. + type: string + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + description: 'Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true + requests: + description: 'Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. Requests cannot exceed Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + additionalProperties: + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + anyOf: + - type: integer + - type: string + x-kubernetes-int-or-string: true type: type: string default: Default enum: - Default - BestEffort + - ResourceRequirement workConfiguration: description: WorkConfiguration contains the configuration of work type: object diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml index df8d8324d..ad6e6434b 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_00_operator.open-cluster-management.io_klusterlets.crd.yaml @@ -244,14 +244,65 @@ spec: will be used if unspecified. type: string resourceRequirement: - description: ResourceRequirement specify QoS classes of klusterlet - deployment + description: ResourceRequirement specify QoS classes of deployments + managed by klusterlet. It applies to all the containers in the deployments. properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and + limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object type: default: Default enum: - Default - BestEffort + - ResourceRequirement type: string type: object workConfiguration: diff --git a/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml b/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml index b57e95f5f..8cc24234b 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml +++ b/vendor/open-cluster-management.io/api/operator/v1/0000_01_operator.open-cluster-management.io_clustermanagers.crd.yaml @@ -259,6 +259,69 @@ spec: description: RegistrationImagePullSpec represents the desired image of registration controller/webhook installed on hub. type: string + resourceRequirement: + description: ResourceRequirement specify QoS classes of deployments + managed by clustermanager. It applies to all the containers in the + deployments. + properties: + resourceRequirements: + description: ResourceRequirements defines resource requests and + limits when Type is ResourceQosClassResourceRequirement + properties: + claims: + description: "Claims lists the names of resources, defined + in spec.resourceClaims, that are used by this container. + \n This is an alpha field and requires enabling the DynamicResourceAllocation + feature gate. \n This field is immutable. It can only be + set for containers." + items: + description: ResourceClaim references one entry in PodSpec.ResourceClaims. + properties: + name: + description: Name must match the name of one entry in + pod.spec.resourceClaims of the Pod where this field + is used. It makes that resource available inside a + container. + type: string + required: + - name + type: object + type: array + x-kubernetes-list-map-keys: + - name + x-kubernetes-list-type: map + limits: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Limits describes the maximum amount of compute + resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + requests: + additionalProperties: + anyOf: + - type: integer + - type: string + pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$ + x-kubernetes-int-or-string: true + description: 'Requests describes the minimum amount of compute + resources required. If Requests is omitted for a container, + it defaults to Limits if that is explicitly specified, otherwise + to an implementation-defined value. Requests cannot exceed + Limits. More info: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/' + type: object + type: object + type: + default: Default + enum: + - Default + - BestEffort + - ResourceRequirement + type: string + type: object workConfiguration: description: WorkConfiguration contains the configuration of work properties: diff --git a/vendor/open-cluster-management.io/api/operator/v1/funcs_clustermanager.go b/vendor/open-cluster-management.io/api/operator/v1/funcs_clustermanager.go new file mode 100644 index 000000000..8ed923b8c --- /dev/null +++ b/vendor/open-cluster-management.io/api/operator/v1/funcs_clustermanager.go @@ -0,0 +1,5 @@ +package v1 + +func (cm *ClusterManager) GetResourceRequirement() *ResourceRequirement { + return cm.Spec.ResourceRequirement +} diff --git a/vendor/open-cluster-management.io/api/operator/v1/funcs_klusterlet.go b/vendor/open-cluster-management.io/api/operator/v1/funcs_klusterlet.go new file mode 100644 index 000000000..3bd56472f --- /dev/null +++ b/vendor/open-cluster-management.io/api/operator/v1/funcs_klusterlet.go @@ -0,0 +1,5 @@ +package v1 + +func (k *Klusterlet) GetResourceRequirement() *ResourceRequirement { + return k.Spec.ResourceRequirement +} diff --git a/vendor/open-cluster-management.io/api/operator/v1/type_resourcerequirement.go b/vendor/open-cluster-management.io/api/operator/v1/type_resourcerequirement.go new file mode 100644 index 000000000..e2f9ba6b3 --- /dev/null +++ b/vendor/open-cluster-management.io/api/operator/v1/type_resourcerequirement.go @@ -0,0 +1,28 @@ +package v1 + +import corev1 "k8s.io/api/core/v1" + +type ResourceRequirementAcquirer interface { + GetResourceRequirement() *ResourceRequirement +} + +// ResourceRequirement allow user override the default pod QoS classes +type ResourceRequirement struct { + // +kubebuilder:validation:Enum=Default;BestEffort;ResourceRequirement + // +kubebuilder:default:=Default + Type ResourceQosClass `json:"type"` + // ResourceRequirements defines resource requests and limits when Type is ResourceQosClassResourceRequirement + // +optional + ResourceRequirements *corev1.ResourceRequirements `json:"resourceRequirements,omitempty"` +} + +type ResourceQosClass string + +const ( + // Default use resource setting in the template file (with requests but no limits in the resources) + ResourceQosClassDefault ResourceQosClass = "Default" + // If all containers in the pod don't set resource request and limits, the pod is treated as BestEffort. + ResourceQosClassBestEffort ResourceQosClass = "BestEffort" + // Configurable resource requirements with requests and limits + ResourceQosClassResourceRequirement ResourceQosClass = "ResourceRequirement" +) diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go b/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go index ed97ffd4c..33b0804f9 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_clustermanager.go @@ -70,6 +70,11 @@ type ClusterManagerSpec struct { // AddOnManagerConfiguration contains the configuration of addon manager // +optional AddOnManagerConfiguration *AddOnManagerConfiguration `json:"addOnManagerConfiguration,omitempty"` + + // ResourceRequirement specify QoS classes of deployments managed by clustermanager. + // It applies to all the containers in the deployments. + // +optional + ResourceRequirement *ResourceRequirement `json:"resourceRequirement,omitempty"` } // NodePlacement describes node scheduling configuration for the pods. diff --git a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go index 2ae4bef9b..bef65984f 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go +++ b/vendor/open-cluster-management.io/api/operator/v1/types_klusterlet.go @@ -1,6 +1,8 @@ package v1 -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) // +genclient // +genclient:nonNamespaced @@ -87,27 +89,12 @@ type KlusterletSpec struct { // +optional HubApiServerHostAlias *HubApiServerHostAlias `json:"hubApiServerHostAlias,omitempty"` - // ResourceRequirement specify QoS classes of klusterlet deployment + // ResourceRequirement specify QoS classes of deployments managed by klusterlet. + // It applies to all the containers in the deployments. // +optional ResourceRequirement *ResourceRequirement `json:"resourceRequirement,omitempty"` } -type ResourceQosClass string - -const ( - // Default use resource setting in the template file - ResourceQosClassDefault ResourceQosClass = "Default" - // If all containers in the pod don't set resource request and limits, the pod is treated as BestEffort. - ResourceQosClassBestEffort ResourceQosClass = "BestEffort" -) - -// ResourceRequirement allow user override the default pod QoS classes -type ResourceRequirement struct { - // +kubebuilder:validation:Enum=Default;BestEffort - // +kubebuilder:default:=Default - Type ResourceQosClass `json:"type"` -} - // ServerURL represents the apiserver url and ca bundle that is accessible externally type ServerURL struct { // URL is the url of apiserver endpoint of the managed cluster. diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go index 0de27326b..aaddf5b36 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.deepcopy.go @@ -134,6 +134,11 @@ func (in *ClusterManagerSpec) DeepCopyInto(out *ClusterManagerSpec) { *out = new(AddOnManagerConfiguration) (*in).DeepCopyInto(*out) } + if in.ResourceRequirement != nil { + in, out := &in.ResourceRequirement, &out.ResourceRequirement + *out = new(ResourceRequirement) + (*in).DeepCopyInto(*out) + } return } @@ -353,7 +358,7 @@ func (in *KlusterletSpec) DeepCopyInto(out *KlusterletSpec) { if in.ResourceRequirement != nil { in, out := &in.ResourceRequirement, &out.ResourceRequirement *out = new(ResourceRequirement) - **out = **in + (*in).DeepCopyInto(*out) } return } @@ -504,6 +509,11 @@ func (in *RelatedResourceMeta) DeepCopy() *RelatedResourceMeta { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ResourceRequirement) DeepCopyInto(out *ResourceRequirement) { *out = *in + if in.ResourceRequirements != nil { + in, out := &in.ResourceRequirements, &out.ResourceRequirements + *out = new(corev1.ResourceRequirements) + (*in).DeepCopyInto(*out) + } return } diff --git a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go index 330df7997..8d8a14f20 100644 --- a/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go +++ b/vendor/open-cluster-management.io/api/operator/v1/zz_generated.swagger_doc_generated.go @@ -60,6 +60,7 @@ var map_ClusterManagerSpec = map[string]string{ "registrationConfiguration": "RegistrationConfiguration contains the configuration of registration", "workConfiguration": "WorkConfiguration contains the configuration of work", "addOnManagerConfiguration": "AddOnManagerConfiguration contains the configuration of addon manager", + "resourceRequirement": "ResourceRequirement specify QoS classes of deployments managed by clustermanager. It applies to all the containers in the deployments.", } func (ClusterManagerSpec) SwaggerDoc() map[string]string { @@ -213,7 +214,7 @@ var map_KlusterletSpec = map[string]string{ "registrationConfiguration": "RegistrationConfiguration contains the configuration of registration", "workConfiguration": "WorkConfiguration contains the configuration of work", "hubApiServerHostAlias": "HubApiServerHostAlias contains the host alias for hub api server. registration-agent and work-agent will use it to communicate with hub api server.", - "resourceRequirement": "ResourceRequirement specify QoS classes of klusterlet deployment", + "resourceRequirement": "ResourceRequirement specify QoS classes of deployments managed by klusterlet. It applies to all the containers in the deployments.", } func (KlusterletSpec) SwaggerDoc() map[string]string { @@ -244,14 +245,6 @@ func (RegistrationConfiguration) SwaggerDoc() map[string]string { return map_RegistrationConfiguration } -var map_ResourceRequirement = map[string]string{ - "": "ResourceRequirement allow user override the default pod QoS classes", -} - -func (ResourceRequirement) SwaggerDoc() map[string]string { - return map_ResourceRequirement -} - var map_ServerURL = map[string]string{ "": "ServerURL represents the apiserver url and ca bundle that is accessible externally", "url": "URL is the url of apiserver endpoint of the managed cluster.", diff --git a/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go b/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder/workbuilder.go similarity index 99% rename from vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go rename to vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder/workbuilder.go index d186d08ea..df16b2e12 100644 --- a/vendor/open-cluster-management.io/api/utils/work/v1/workbuilder/workbuilder.go +++ b/vendor/open-cluster-management.io/sdk-go/pkg/apis/work/v1/builder/workbuilder.go @@ -1,4 +1,4 @@ -package workbuilder +package builder import ( "context"