Skip to content

Commit

Permalink
Modify the wrong ConfigMap name in v1.13 node-agent-concurrency docum…
Browse files Browse the repository at this point in the history
…ent.

Fix condition matching in resource modifier when there are multiple rules

Signed-off-by: lou <[email protected]>
  • Loading branch information
blackpiglet committed May 14, 2024
1 parent 3c37c84 commit b0f96cd
Show file tree
Hide file tree
Showing 4 changed files with 148 additions and 20 deletions.
1 change: 1 addition & 0 deletions changelogs/unreleased/7715-27149chen
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Fix condition matching in resource modifier when there are multiple rules
43 changes: 26 additions & 17 deletions internal/resourcemodifiers/resource_modifiers.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,8 +86,22 @@ func GetResourceModifiersFromConfig(cm *v1.ConfigMap) (*ResourceModifiers, error

func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) []error {
var errs []error
origin := obj
// If there are more than one rules, we need to keep the original object for condition matching
if len(p.ResourceModifierRules) > 1 {
origin = obj.DeepCopy()
}
for _, rule := range p.ResourceModifierRules {
err := rule.apply(obj, groupResource, scheme, log)
matched, err := rule.match(origin, groupResource, log)
if err != nil {
errs = append(errs, err)
continue
} else if !matched {
continue
}

log.Infof("Applying resource modifier patch on %s/%s", origin.GetNamespace(), origin.GetName())
err = rule.applyPatch(obj, scheme, log)
if err != nil {
errs = append(errs, err)
}
Expand All @@ -96,59 +110,54 @@ func (p *ResourceModifiers) ApplyResourceModifierRules(obj *unstructured.Unstruc
return errs
}

func (r *ResourceModifierRule) apply(obj *unstructured.Unstructured, groupResource string, scheme *runtime.Scheme, log logrus.FieldLogger) error {
func (r *ResourceModifierRule) match(obj *unstructured.Unstructured, groupResource string, log logrus.FieldLogger) (bool, error) {
ns := obj.GetNamespace()
if ns != "" {
namespaceInclusion := collections.NewIncludesExcludes().Includes(r.Conditions.Namespaces...)
if !namespaceInclusion.ShouldInclude(ns) {
return nil
return false, nil
}
}

g, err := glob.Compile(r.Conditions.GroupResource, '.')
if err != nil {
log.Errorf("Bad glob pattern of groupResource in condition, groupResource: %s, err: %s", r.Conditions.GroupResource, err)
return err
return false, err

Check warning on line 125 in internal/resourcemodifiers/resource_modifiers.go

View check run for this annotation

Codecov / codecov/patch

internal/resourcemodifiers/resource_modifiers.go#L125

Added line #L125 was not covered by tests
}

if !g.Match(groupResource) {
return nil
return false, nil

Check warning on line 129 in internal/resourcemodifiers/resource_modifiers.go

View check run for this annotation

Codecov / codecov/patch

internal/resourcemodifiers/resource_modifiers.go#L129

Added line #L129 was not covered by tests
}

if r.Conditions.ResourceNameRegex != "" {
match, err := regexp.MatchString(r.Conditions.ResourceNameRegex, obj.GetName())
if err != nil {
return errors.Errorf("error in matching regex %s", err.Error())
return false, errors.Errorf("error in matching regex %s", err.Error())
}
if !match {
return nil
return false, nil

Check warning on line 138 in internal/resourcemodifiers/resource_modifiers.go

View check run for this annotation

Codecov / codecov/patch

internal/resourcemodifiers/resource_modifiers.go#L138

Added line #L138 was not covered by tests
}
}

if r.Conditions.LabelSelector != nil {
selector, err := metav1.LabelSelectorAsSelector(r.Conditions.LabelSelector)
if err != nil {
return errors.Errorf("error in creating label selector %s", err.Error())
return false, errors.Errorf("error in creating label selector %s", err.Error())

Check warning on line 145 in internal/resourcemodifiers/resource_modifiers.go

View check run for this annotation

Codecov / codecov/patch

internal/resourcemodifiers/resource_modifiers.go#L145

Added line #L145 was not covered by tests
}
if !selector.Matches(labels.Set(obj.GetLabels())) {
return nil
return false, nil
}
}

match, err := matchConditions(obj, r.Conditions.Matches, log)
if err != nil {
return err
return false, err

Check warning on line 154 in internal/resourcemodifiers/resource_modifiers.go

View check run for this annotation

Codecov / codecov/patch

internal/resourcemodifiers/resource_modifiers.go#L154

Added line #L154 was not covered by tests
} else if !match {
log.Info("Conditions do not match, skip it")
return nil
return false, nil
}

log.Infof("Applying resource modifier patch on %s/%s", obj.GetNamespace(), obj.GetName())
err = r.applyPatch(obj, scheme, log)
if err != nil {
return err
}
return nil
return true, nil
}

func matchConditions(u *unstructured.Unstructured, rules []MatchRule, _ logrus.FieldLogger) (bool, error) {
Expand Down
118 changes: 118 additions & 0 deletions internal/resourcemodifiers/resource_modifiers_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -456,6 +456,20 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
},
}

pvcGoldSc := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": map[string]interface{}{
"name": "test-pvc",
"namespace": "foo",
},
"spec": map[string]interface{}{
"storageClassName": "gold",
},
},
}

deployNginxOneReplica := &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": "apps/v1",
Expand Down Expand Up @@ -679,6 +693,110 @@ func TestResourceModifiers_ApplyResourceModifierRules(t *testing.T) {
wantErr: false,
wantObj: pvcPremiumSc.DeepCopy(),
},
{
name: "pvc with standard storage class should be patched to premium, even when rules are [standard => premium, premium => gold]",
fields: fields{
Version: "v1",
ResourceModifierRules: []ResourceModifierRule{
{
Conditions: Conditions{
GroupResource: "persistentvolumeclaims",
ResourceNameRegex: ".*",
Matches: []MatchRule{
{
Path: "/spec/storageClassName",
Value: "standard",
},
},
},
Patches: []JSONPatch{
{
Operation: "replace",
Path: "/spec/storageClassName",
Value: "premium",
},
},
},
{
Conditions: Conditions{
GroupResource: "persistentvolumeclaims",
ResourceNameRegex: ".*",
Matches: []MatchRule{
{
Path: "/spec/storageClassName",
Value: "premium",
},
},
},
Patches: []JSONPatch{
{
Operation: "replace",
Path: "/spec/storageClassName",
Value: "gold",
},
},
},
},
},
args: args{
obj: pvcStandardSc.DeepCopy(),
groupResource: "persistentvolumeclaims",
},
wantErr: false,
wantObj: pvcPremiumSc.DeepCopy(),
},
{
name: "pvc with standard storage class should be patched to gold, even when rules are [standard => premium, standard => gold]",
fields: fields{
Version: "v1",
ResourceModifierRules: []ResourceModifierRule{
{
Conditions: Conditions{
GroupResource: "persistentvolumeclaims",
ResourceNameRegex: ".*",
Matches: []MatchRule{
{
Path: "/spec/storageClassName",
Value: "standard",
},
},
},
Patches: []JSONPatch{
{
Operation: "replace",
Path: "/spec/storageClassName",
Value: "premium",
},
},
},
{
Conditions: Conditions{
GroupResource: "persistentvolumeclaims",
ResourceNameRegex: ".*",
Matches: []MatchRule{
{
Path: "/spec/storageClassName",
Value: "standard",
},
},
},
Patches: []JSONPatch{
{
Operation: "replace",
Path: "/spec/storageClassName",
Value: "gold",
},
},
},
},
},
args: args{
obj: pvcStandardSc.DeepCopy(),
groupResource: "persistentvolumeclaims",
},
wantErr: false,
wantObj: pvcGoldSc.DeepCopy(),
},
{
name: "nginx deployment: 1 -> 2 replicas",
fields: fields{
Expand Down
6 changes: 3 additions & 3 deletions site/content/docs/v1.13/node-agent-concurrency.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ Varying from the data size, data complexity, resource availability, the tasks ma

Node-agent concurrency configurations allow you to configure the concurrent number of node-agent loads per node. When the resources are sufficient in nodes, you can set a large concurrent number, so as to reduce the backup/restore time; otherwise, the concurrency should be reduced, otherwise, the backup/restore may encounter problems, i.e., time lagging, hang or OOM kill.

To set Node-agent concurrency configurations, a configMap named ```node-agent-configs``` should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only.
To set Node-agent concurrency configurations, a configMap named ```node-agent-config``` should be created manually. The configMap should be in the same namespace where Velero is installed. If multiple Velero instances are installed in different namespaces, there should be one configMap in each namespace which applies to node-agent in that namespace only.
Node-agent server checks these configurations at startup time. Therefore, you could edit this configMap any time, but in order to make the changes effective, node-agent server needs to be restarted.

### Global concurrent number
Expand All @@ -32,7 +32,7 @@ At least one node is expected to have a label with the specified ```RuledConfigs
If one node falls into more than one rules, e.g., if node1 also has the label ```beta.kubernetes.io/instance-type=Standard_B4ms```, the smallest number (3) will be used.

### Sample
A sample of the complete ```node-agent-configs``` configMap is as below:
A sample of the complete ```node-agent-config``` configMap is as below:
```json
{
"loadConcurrency": {
Expand Down Expand Up @@ -60,7 +60,7 @@ A sample of the complete ```node-agent-configs``` configMap is as below:
```
To create the configMap, save something like the above sample to a json file and then run below command:
```
kubectl create cm node-agent-configs -n velero --from-file=<json file name>
kubectl create cm node-agent-config -n velero --from-file=<json file name>
```


0 comments on commit b0f96cd

Please sign in to comment.