diff --git a/changelogs/unreleased/7715-27149chen b/changelogs/unreleased/7715-27149chen new file mode 100644 index 0000000000..be2255c512 --- /dev/null +++ b/changelogs/unreleased/7715-27149chen @@ -0,0 +1 @@ +Fix condition matching in resource modifier when there are multiple rules \ No newline at end of file diff --git a/internal/resourcemodifiers/resource_modifiers.go b/internal/resourcemodifiers/resource_modifiers.go index c11b99cd8b..e50e80d1cd 100644 --- a/internal/resourcemodifiers/resource_modifiers.go +++ b/internal/resourcemodifiers/resource_modifiers.go @@ -86,8 +86,22 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) []error { var errs []error + origin := obj + // If there are more than one rules, we need to keep the original object for condition matching + if len(p.ResourceModifierRules) > 1 { + origin = obj.DeepCopy() + } for _, rule := range p.ResourceModifierRules { - err := rule.apply(obj, groupResource, scheme, log) + matched, err := rule.match(origin, groupResource, log) + if err != nil { + errs = append(errs, err) + continue + } else if !matched { + continue + } + + log.Infof("Applying resource modifier patch on %s/%s", origin.GetNamespace(), origin.GetName()) + err = rule.applyPatch(obj, scheme, log) if err != nil { errs = append(errs, err) } @@ -96,59 +110,54 @@ func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstruc return errs } -func (r *ResourceModifierRule) apply(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) error { +func (r *ResourceModifierRule) match(obj *unstructured.Unstructured, groupResource string, log logrus.FieldLogger) (bool, error) { ns := obj.GetNamespace() if ns != "" { namespaceInclusion := collections.NewIncludesExcludes().Includes(r.Conditions.Namespaces...) if !namespaceInclusion.ShouldInclude(ns) { - return nil + return false, nil } } g, err := glob.Compile(r.Conditions.GroupResource, '.') if err != nil { log.Errorf("Bad glob pattern of groupResource in condition, groupResource: %s, err: %s", r.Conditions.GroupResource, err) - return err + return false, err } if !g.Match(groupResource) { - return nil + return false, nil } if r.Conditions.ResourceNameRegex != "" { match, err := regexp.MatchString(r.Conditions.ResourceNameRegex, obj.GetName()) if err != nil { - return errors.Errorf("error in matching regex %s", err.Error()) + return false, errors.Errorf("error in matching regex %s", err.Error()) } if !match { - return nil + return false, nil } } if r.Conditions.LabelSelector != nil { selector, err := metav1.LabelSelectorAsSelector(r.Conditions.LabelSelector) if err != nil { - return errors.Errorf("error in creating label selector %s", err.Error()) + return false, errors.Errorf("error in creating label selector %s", err.Error()) } if !selector.Matches(labels.Set(obj.GetLabels())) { - return nil + return false, nil } } match, err := matchConditions(obj, r.Conditions.Matches, log) if err != nil { - return err + return false, err } else if !match { log.Info("Conditions do not match, skip it") - return nil + return false, nil } - log.Infof("Applying resource modifier patch on %s/%s", obj.GetNamespace(), obj.GetName()) - err = r.applyPatch(obj, scheme, log) - if err != nil { - return err - } - return nil + return true, nil } func matchConditions(u *unstructured.Unstructured, rules []MatchRule, _ logrus.FieldLogger) (bool, error) { diff --git a/internal/resourcemodifiers/resource_modifiers_test.go b/internal/resourcemodifiers/resource_modifiers_test.go index 8b4384ef1e..aff80b2daf 100644 --- a/internal/resourcemodifiers/resource_modifiers_test.go +++ b/internal/resourcemodifiers/resource_modifiers_test.go @@ -456,6 +456,20 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) { }, } + pvcGoldSc := &unstructured.Unstructured{ + Object: map[string]interface{}{ + "apiVersion": "v1", + "kind": "PersistentVolumeClaim", + "metadata": map[string]interface{}{ + "name": "test-pvc", + "namespace": "foo", + }, + "spec": map[string]interface{}{ + "storageClassName": "gold", + }, + }, + } + deployNginxOneReplica := &unstructured.Unstructured{ Object: map[string]interface{}{ "apiVersion": "apps/v1", @@ -679,6 +693,110 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) { wantErr: false, wantObj: pvcPremiumSc.DeepCopy(), }, + { + name: "pvc with standard storage class should be patched to premium, even when rules are [standard => premium, premium => gold]", + fields: fields{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "persistentvolumeclaims", + ResourceNameRegex: ".*", + Matches: []MatchRule{ + { + Path: "/spec/storageClassName", + Value: "standard", + }, + }, + }, + Patches: []JSONPatch{ + { + Operation: "replace", + Path: "/spec/storageClassName", + Value: "premium", + }, + }, + }, + { + Conditions: Conditions{ + GroupResource: "persistentvolumeclaims", + ResourceNameRegex: ".*", + Matches: []MatchRule{ + { + Path: "/spec/storageClassName", + Value: "premium", + }, + }, + }, + Patches: []JSONPatch{ + { + Operation: "replace", + Path: "/spec/storageClassName", + Value: "gold", + }, + }, + }, + }, + }, + args: args{ + obj: pvcStandardSc.DeepCopy(), + groupResource: "persistentvolumeclaims", + }, + wantErr: false, + wantObj: pvcPremiumSc.DeepCopy(), + }, + { + name: "pvc with standard storage class should be patched to gold, even when rules are [standard => premium, standard => gold]", + fields: fields{ + Version: "v1", + ResourceModifierRules: []ResourceModifierRule{ + { + Conditions: Conditions{ + GroupResource: "persistentvolumeclaims", + ResourceNameRegex: ".*", + Matches: []MatchRule{ + { + Path: "/spec/storageClassName", + Value: "standard", + }, + }, + }, + Patches: []JSONPatch{ + { + Operation: "replace", + Path: "/spec/storageClassName", + Value: "premium", + }, + }, + }, + { + Conditions: Conditions{ + GroupResource: "persistentvolumeclaims", + ResourceNameRegex: ".*", + Matches: []MatchRule{ + { + Path: "/spec/storageClassName", + Value: "standard", + }, + }, + }, + Patches: []JSONPatch{ + { + Operation: "replace", + Path: "/spec/storageClassName", + Value: "gold", + }, + }, + }, + }, + }, + args: args{ + obj: pvcStandardSc.DeepCopy(), + groupResource: "persistentvolumeclaims", + }, + wantErr: false, + wantObj: pvcGoldSc.DeepCopy(), + }, { name: "nginx deployment: 1 -> 2 replicas", fields: fields{ diff --git a/site/content/docs/v1.13/node-agent-concurrency.md b/site/content/docs/v1.13/node-agent-concurrency.md index 20564eac58..9a7e1b255e 100644 --- a/site/content/docs/v1.13/node-agent-concurrency.md +++ b/site/content/docs/v1.13/node-agent-concurrency.md @@ -8,7 +8,7 @@ Varying from the data size, data complexity, resource availability, the tasks ma Node-agent concurrency configurations allow you to configure the concurrent number of node-agent loads per node. When the resources are sufficient in nodes, you can set a large concurrent number, so as to reduce the backup/restore time; otherwise, the concurrency should be reduced, otherwise, the backup/restore may encounter problems, i.e., time lagging, hang or OOM kill. -To set Node-agent concurrency configurations, a configMap named ```node-agent-configs``` should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. +To set Node-agent concurrency configurations, a configMap named ```node-agent-config``` should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only. Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted. ### Global concurrent number @@ -32,7 +32,7 @@ At least one node is expected to have a label with the specified ```RuledConfigs If one node falls into more than one rules, e.g., if node1 also has the label ```beta.kubernetes.io/instance-type=Standard_B4ms```, the smallest number (3) will be used. ### Sample -A sample of the complete ```node-agent-configs``` configMap is as below: +A sample of the complete ```node-agent-config``` configMap is as below: ```json { "loadConcurrency": { @@ -60,7 +60,7 @@ A sample of the complete ```node-agent-configs``` configMap is as below: ``` To create the configMap, save something like the above sample to a json file and then run below command: ``` -kubectl create cm node-agent-configs -n velero --from-file= +kubectl create cm node-agent-config -n velero --from-file= ```