Skip to content

Commit

Permalink
fix: fix waiting to reach failureThreshold before fallback
Browse files Browse the repository at this point in the history
Signed-off-by: Youssef Rabie <[email protected]>
  • Loading branch information
y-rabie committed Feb 4, 2025
1 parent d76aa17 commit 58e7586
Show file tree
Hide file tree
Showing 3 changed files with 6 additions and 87 deletions.
7 changes: 0 additions & 7 deletions pkg/scaling/executor/scale_executor.go
Original file line number Diff line number Diff line change
Expand Up @@ -125,10 +125,3 @@ func (e *scaleExecutor) setActiveCondition(ctx context.Context, logger logr.Logg
}
return e.setCondition(ctx, logger, object, status, reason, message, active)
}

func (e *scaleExecutor) setFallbackCondition(ctx context.Context, logger logr.Logger, object interface{}, status metav1.ConditionStatus, reason string, message string) error {
fallback := func(conditions kedav1alpha1.Conditions, status metav1.ConditionStatus, reason string, message string) {
conditions.SetFallbackCondition(status, reason, message)
}
return e.setCondition(ctx, logger, object, status, reason, message, fallback)
}
24 changes: 6 additions & 18 deletions pkg/scaling/executor/scale_scaledobjects.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,12 +161,12 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al
// isActive == false
switch {
case isError && scaledObject.Spec.Fallback != nil && scaledObject.Spec.Fallback.Replicas != 0:
// there are no active triggers, but a scaler responded with an error
// AND
// there is a fallback replicas count defined

// Scale to the fallback replicas count
e.doFallbackScaling(ctx, scaledObject, currentScale, logger, currentReplicas)
// We need to have this switch case even if just for logging.
// Otherwise, if we have `minReplicas=zero`, we will fall into the third case expression,
// which will scale the target to 0. Scaling the target to 0 means the HPA will not scale it to fallback.replicas
// after fallback.failureThreshold has passed because of what's described here:
// https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#implicit-maintenance-mode-deactivation
logger.V(1).Info("ScaleTarget will fallback to Fallback.Replicas after Fallback.FailureThreshold")
case isError && scaledObject.Spec.Fallback == nil:
// there are no active triggers, but a scaler responded with an error
// AND
Expand Down Expand Up @@ -230,18 +230,6 @@ func (e *scaleExecutor) RequestScale(ctx context.Context, scaledObject *kedav1al
}
}

func (e *scaleExecutor) doFallbackScaling(ctx context.Context, scaledObject *kedav1alpha1.ScaledObject, currentScale *autoscalingv1.Scale, logger logr.Logger, currentReplicas int32) {
_, err := e.updateScaleOnScaleTarget(ctx, scaledObject, currentScale, scaledObject.Spec.Fallback.Replicas)
if err == nil {
logger.Info("Successfully set ScaleTarget replicas count to ScaledObject fallback.replicas",
"Original Replicas Count", currentReplicas,
"New Replicas Count", scaledObject.Spec.Fallback.Replicas)
}
if e := e.setFallbackCondition(ctx, logger, scaledObject, metav1.ConditionTrue, "FallbackExists", "At least one trigger is falling back on this scaled object"); e != nil {
logger.Error(e, "Error setting fallback condition")
}
}

// An object will be scaled down to 0 only if it's passed its cooldown period
// or if LastActiveTime is nil
func (e *scaleExecutor) scaleToZeroOrIdle(ctx context.Context, logger logr.Logger, scaledObject *kedav1alpha1.ScaledObject, scale *autoscalingv1.Scale) {
Expand Down
62 changes: 0 additions & 62 deletions pkg/scaling/executor/scale_scaledobjects_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,68 +33,6 @@ import (
"github.com/kedacore/keda/v2/pkg/mock/mock_scale"
)

func TestScaleToFallbackReplicasWhenNotActiveAndIsError(t *testing.T) {
ctrl := gomock.NewController(t)
client := mock_client.NewMockClient(ctrl)
recorder := record.NewFakeRecorder(1)
mockScaleClient := mock_scale.NewMockScalesGetter(ctrl)
mockScaleInterface := mock_scale.NewMockScaleInterface(ctrl)
statusWriter := mock_client.NewMockStatusWriter(ctrl)

scaleExecutor := NewScaleExecutor(client, mockScaleClient, nil, recorder)

scaledObject := v1alpha1.ScaledObject{
ObjectMeta: v1.ObjectMeta{
Name: "name",
Namespace: "namespace",
},
Spec: v1alpha1.ScaledObjectSpec{
ScaleTargetRef: &v1alpha1.ScaleTarget{
Name: "name",
},
Fallback: &v1alpha1.Fallback{
FailureThreshold: 3,
Replicas: 5,
},
},
Status: v1alpha1.ScaledObjectStatus{
ScaleTargetGVKR: &v1alpha1.GroupVersionKindResource{
Group: "apps",
Kind: "Deployment",
},
},
}

scaledObject.Status.Conditions = *v1alpha1.GetInitializedConditions()

numberOfReplicas := int32(2)

client.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any()).SetArg(2, appsv1.Deployment{
Spec: appsv1.DeploymentSpec{
Replicas: &numberOfReplicas,
},
})

scale := &autoscalingv1.Scale{
Spec: autoscalingv1.ScaleSpec{
Replicas: numberOfReplicas,
},
}

mockScaleClient.EXPECT().Scales(gomock.Any()).Return(mockScaleInterface).Times(2)
mockScaleInterface.EXPECT().Get(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(scale, nil)
mockScaleInterface.EXPECT().Update(gomock.Any(), gomock.Any(), gomock.Eq(scale), gomock.Any())

client.EXPECT().Status().Times(2).Return(statusWriter)
statusWriter.EXPECT().Patch(gomock.Any(), gomock.Any(), gomock.Any()).Times(2)

scaleExecutor.RequestScale(context.TODO(), &scaledObject, false, true, &ScaleExecutorOptions{})

assert.Equal(t, int32(5), scale.Spec.Replicas)
condition := scaledObject.Status.Conditions.GetFallbackCondition()
assert.Equal(t, true, condition.IsTrue())
}

func TestScaleToMinReplicasWhenNotActive(t *testing.T) {
ctrl := gomock.NewController(t)
client := mock_client.NewMockClient(ctrl)
Expand Down

0 comments on commit 58e7586

Please sign in to comment.