From 51cc3e1987bbdcc73359ce1db0a47167c01334d1 Mon Sep 17 00:00:00 2001 From: liubo02 Date: Fri, 3 Jan 2025 17:37:53 +0800 Subject: [PATCH 1/4] feat(group): move tiflashgroup to v3 and extract some common tasks Signed-off-by: liubo02 --- pkg/controllers/common/interfaces.go | 30 + pkg/controllers/common/interfaces_test.go | 50 ++ pkg/controllers/common/resource.go | 38 +- pkg/controllers/common/resource_test.go | 6 +- pkg/controllers/common/revision.go | 127 ++++ pkg/controllers/common/revision_test.go | 172 +++++ pkg/controllers/common/task.go | 157 +---- pkg/controllers/common/task_finalizer.go | 82 +++ pkg/controllers/common/task_finalizer_test.go | 388 ++++++++++++ pkg/controllers/common/task_status.go | 201 ++++++ pkg/controllers/common/task_status_test.go | 508 +++++++++++++++ pkg/controllers/common/task_test.go | 587 ------------------ pkg/controllers/pd/tasks/state.go | 4 +- pkg/controllers/pdgroup/builder.go | 3 +- pkg/controllers/pdgroup/tasks/ctx.go | 4 - pkg/controllers/pdgroup/tasks/state.go | 53 +- pkg/controllers/pdgroup/tasks/status.go | 115 ---- pkg/controllers/pdgroup/tasks/status_test.go | 312 ---------- pkg/controllers/pdgroup/tasks/updater.go | 68 +- pkg/controllers/pdgroup/tasks/updater_test.go | 92 +-- pkg/controllers/tidbgroup/builder.go | 5 +- pkg/controllers/tidbgroup/tasks/ctx.go | 4 - pkg/controllers/tidbgroup/tasks/state.go | 50 +- pkg/controllers/tidbgroup/tasks/status.go | 86 --- .../tidbgroup/tasks/status_test.go | 276 -------- pkg/controllers/tidbgroup/tasks/updater.go | 56 +- .../tidbgroup/tasks/updater_test.go | 74 +-- pkg/controllers/tiflashgroup/builder.go | 56 ++ pkg/controllers/tiflashgroup/controller.go | 20 +- pkg/controllers/tiflashgroup/tasks/ctx.go | 104 +--- .../tiflashgroup/tasks/finalizer.go | 81 --- pkg/controllers/tiflashgroup/tasks/state.go | 135 ++++ pkg/controllers/tiflashgroup/tasks/status.go | 94 --- pkg/controllers/tiflashgroup/tasks/svc.go | 43 +- pkg/controllers/tiflashgroup/tasks/updater.go | 169 ++--- pkg/controllers/tikvgroup/builder.go | 4 +- pkg/controllers/tikvgroup/tasks/ctx.go | 4 - pkg/controllers/tikvgroup/tasks/state.go | 50 +- pkg/controllers/tikvgroup/tasks/status.go | 110 ---- .../tikvgroup/tasks/status_test.go | 276 -------- pkg/controllers/tikvgroup/tasks/updater.go | 57 +- .../tikvgroup/tasks/updater_test.go | 73 +-- pkg/runtime/group.go | 12 +- pkg/runtime/instance.go | 3 + pkg/runtime/pd.go | 57 +- pkg/runtime/tidb.go | 57 +- pkg/runtime/tiflash.go | 57 +- pkg/runtime/tikv.go | 57 +- pkg/updater/actor.go | 32 - pkg/updater/builder.go | 2 +- pkg/updater/hook.go | 51 ++ pkg/updater/policy/keep.go | 34 - pkg/updater/policy/topology.go | 10 +- pkg/utils/k8s/revision/controller_revision.go | 61 +- .../k8s/revision/controller_revision_test.go | 10 +- tests/e2e/data/data.go | 7 +- tests/e2e/data/pd.go | 2 +- tests/e2e/pd/pd.go | 12 +- tests/e2e/utils/waiter/pod.go | 14 +- .../controller/history/controller_history.go | 4 +- 60 files changed, 2470 insertions(+), 2836 deletions(-) create mode 100644 pkg/controllers/common/revision.go create mode 100644 pkg/controllers/common/revision_test.go create mode 100644 pkg/controllers/common/task_finalizer.go create mode 100644 pkg/controllers/common/task_finalizer_test.go create mode 100644 pkg/controllers/common/task_status.go create mode 100644 pkg/controllers/common/task_status_test.go delete mode 100644 pkg/controllers/pdgroup/tasks/status.go delete mode 100644 pkg/controllers/pdgroup/tasks/status_test.go delete mode 100644 pkg/controllers/tidbgroup/tasks/status_test.go create mode 100644 pkg/controllers/tiflashgroup/builder.go delete mode 100644 pkg/controllers/tiflashgroup/tasks/finalizer.go create mode 100644 pkg/controllers/tiflashgroup/tasks/state.go delete mode 100644 pkg/controllers/tiflashgroup/tasks/status.go delete mode 100644 pkg/controllers/tikvgroup/tasks/status.go delete mode 100644 pkg/controllers/tikvgroup/tasks/status_test.go create mode 100644 pkg/updater/hook.go delete mode 100644 pkg/updater/policy/keep.go diff --git a/pkg/controllers/common/interfaces.go b/pkg/controllers/common/interfaces.go index 8055ce4519..822dc43736 100644 --- a/pkg/controllers/common/interfaces.go +++ b/pkg/controllers/common/interfaces.go @@ -63,6 +63,10 @@ type ( } ) +type GroupStateInitializer[G runtime.GroupSet] interface { + GroupInitializer() ResourceInitializer[G] +} + type GroupState[G runtime.Group] interface { Group() G } @@ -79,6 +83,32 @@ type GroupAndInstanceSliceState[ InstanceSliceState[I] } +type RevisionStateInitializer interface { + RevisionInitializer() RevisionInitializer +} + +type RevisionState interface { + Revision() (update, current string, collisionCount int32) +} + +type GroupAndInstanceSliceAndRevisionState[ + G runtime.Group, + I runtime.Instance, +] interface { + GroupState[G] + InstanceSliceState[I] + RevisionState +} + +type ClusterAndGroupAndInstanceSliceState[ + G runtime.Group, + I runtime.Instance, +] interface { + ClusterState + GroupState[G] + InstanceSliceState[I] +} + type ( PDGroupStateInitializer interface { PDGroupInitializer() PDGroupInitializer diff --git a/pkg/controllers/common/interfaces_test.go b/pkg/controllers/common/interfaces_test.go index 44452c4852..165b15e739 100644 --- a/pkg/controllers/common/interfaces_test.go +++ b/pkg/controllers/common/interfaces_test.go @@ -148,3 +148,53 @@ func FakeGroupAndInstanceSliceState[ InstanceSliceState: FakeInstanceSliceState[RI](s), } } + +type fakeRevisionState struct { + updateRevision string + currentRevision string + collisionCount int32 +} + +func (f *fakeRevisionState) Set(update, current string, collisionCount int32) { + f.updateRevision = update + f.currentRevision = current + f.collisionCount = collisionCount +} + +func (f *fakeRevisionState) Revision() (update, current string, collisionCount int32) { + return f.updateRevision, f.currentRevision, f.collisionCount +} + +func FakeRevisionState(update, current string, collisionCount int32) RevisionState { + return &fakeRevisionState{ + updateRevision: update, + currentRevision: current, + collisionCount: collisionCount, + } +} + +type fakeGroupAndInstanceSliceAndRevisionState[ + RG runtime.Group, + RI runtime.Instance, +] struct { + GroupState[RG] + InstanceSliceState[RI] + RevisionState +} + +func FakeGroupAndInstanceSliceAndRevisionState[ + RG runtime.Group, + RI runtime.Instance, +]( + update string, + current string, + collisionCount int32, + g RG, + s ...RI, +) GroupAndInstanceSliceAndRevisionState[RG, RI] { + return &fakeGroupAndInstanceSliceAndRevisionState[RG, RI]{ + GroupState: FakeGroupState[RG](g), + InstanceSliceState: FakeInstanceSliceState[RI](s), + RevisionState: FakeRevisionState(update, current, collisionCount), + } +} diff --git a/pkg/controllers/common/resource.go b/pkg/controllers/common/resource.go index 2f0c419e53..05cd198a3e 100644 --- a/pkg/controllers/common/resource.go +++ b/pkg/controllers/common/resource.go @@ -14,6 +14,8 @@ package common +import "github.com/pingcap/tidb-operator/pkg/client" + type Setter[T any] interface { Set(T) } @@ -40,13 +42,43 @@ type ListOptions interface { LabelsOption } -type NameFunc func() string +type CurrentRevisionOption interface { + CurrentRevision() string +} + +type CollisionCountOption interface { + CollisionCount() *int32 +} + +type ParentOption interface { + Parent() client.Object +} + +type ( + Lazy[T any] func() T +) + +func (f Lazy[T]) Namespace() T { + return f() +} + +func (f Lazy[T]) Name() T { + return f() +} + +func (f Lazy[T]) CurrentRevision() T { + return f() +} + +func (f Lazy[T]) CollisionCount() T { + return f() +} -func (f NameFunc) Namespace() string { +func (f Lazy[T]) Labels() T { return f() } -func (f NameFunc) Name() string { +func (f Lazy[T]) Parent() T { return f() } diff --git a/pkg/controllers/common/resource_test.go b/pkg/controllers/common/resource_test.go index d85d527c4b..8a4129b76d 100644 --- a/pkg/controllers/common/resource_test.go +++ b/pkg/controllers/common/resource_test.go @@ -42,8 +42,8 @@ func TestResource(t *testing.T) { }, { desc: "use name func", - ns: NameFunc(func() string { return "aaa" }), - name: NameFunc(func() string { return "bbb" }), + ns: Lazy[string](func() string { return "aaa" }), + name: Lazy[string](func() string { return "bbb" }), obj: 42, expectedNs: "aaa", expectedName: "bbb", @@ -96,7 +96,7 @@ func TestResourceSlice(t *testing.T) { }, { desc: "use func", - ns: NameFunc(func() string { return "aaa" }), + ns: Lazy[string](func() string { return "aaa" }), labels: LabelsFunc(func() map[string]string { return map[string]string{"xxx": "yyy"} }), objs: []*int{ptr.To(42)}, expectedNs: "aaa", diff --git a/pkg/controllers/common/revision.go b/pkg/controllers/common/revision.go new file mode 100644 index 0000000000..5cd8aa9302 --- /dev/null +++ b/pkg/controllers/common/revision.go @@ -0,0 +1,127 @@ +package common + +import ( + "context" + + "k8s.io/apimachinery/pkg/labels" + + "github.com/pingcap/tidb-operator/pkg/client" + revisionutil "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" + "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" +) + +type RevisionSetter interface { + Set(update, current string, collisionCount int32) +} + +type RevisionSetterFunc func(update, current string, collisionCount int32) + +func (f RevisionSetterFunc) Set(update, current string, collisionCount int32) { + f(update, current, collisionCount) +} + +type Revision interface { + WithCurrentRevision(CurrentRevisionOption) Revision + WithCollisionCount(CollisionCountOption) Revision + WithParent(ParentOption) Revision + WithLabels(LabelsOption) Revision + Initializer() RevisionInitializer +} + +type RevisionInitializer interface { + LabelsOption + ParentOption + CurrentRevision() string + CollisionCount() *int32 + RevisionSetter +} + +type revision struct { + RevisionSetter + + parent ParentOption + currentRevision CurrentRevisionOption + collisionCount CollisionCountOption + labels LabelsOption +} + +func NewRevision(setter RevisionSetter) Revision { + return &revision{ + RevisionSetter: setter, + } +} + +func (r *revision) WithParent(parent ParentOption) Revision { + r.parent = parent + return r +} + +func (r *revision) WithCurrentRevision(rev CurrentRevisionOption) Revision { + r.currentRevision = rev + return r +} + +func (r *revision) WithCollisionCount(collisionCount CollisionCountOption) Revision { + r.collisionCount = collisionCount + return r +} + +func (r *revision) WithLabels(ls LabelsOption) Revision { + r.labels = ls + return r +} + +func (r *revision) Initializer() RevisionInitializer { + return r +} + +func (r *revision) Parent() client.Object { + return r.parent.Parent() +} + +func (r *revision) CurrentRevision() string { + return r.currentRevision.CurrentRevision() +} + +func (r *revision) CollisionCount() *int32 { + return r.collisionCount.CollisionCount() +} + +func (r *revision) Labels() map[string]string { + return r.labels.Labels() +} + +func TaskRevision(state RevisionStateInitializer, c client.Client) task.Task { + w := state.RevisionInitializer() + return task.NameTaskFunc("ContextRevision", func(ctx context.Context) task.Result { + historyCli := history.NewClient(c) + parent := w.Parent() + + lbs := w.Labels() + selector := labels.SelectorFromSet(labels.Set(lbs)) + + revisions, err := historyCli.ListControllerRevisions(parent, selector) + if err != nil { + return task.Fail().With("cannot list controller revisions: %w", err) + } + history.SortControllerRevisions(revisions) + + // Get the current(old) and update(new) ControllerRevisions. + currentRevision, updateRevision, collisionCount, err := revisionutil.GetCurrentAndUpdate( + ctx, + parent, + lbs, + revisions, + historyCli, + w.CurrentRevision(), + w.CollisionCount(), + ) + if err != nil { + return task.Fail().With("cannot get revisions: %w", err) + } + + w.Set(updateRevision.Name, currentRevision.Name, collisionCount) + return task.Complete().With("revision is set") + }) +} diff --git a/pkg/controllers/common/revision_test.go b/pkg/controllers/common/revision_test.go new file mode 100644 index 0000000000..b8889ab8ba --- /dev/null +++ b/pkg/controllers/common/revision_test.go @@ -0,0 +1,172 @@ +package common + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +type fakeRevisionStateInitializer struct { + fakeRevisionState + + currentRevision CurrentRevisionOption + collisionCount CollisionCountOption + parent ParentOption + labels LabelsOption +} + +func (f *fakeRevisionStateInitializer) RevisionInitializer() RevisionInitializer { + return NewRevision(&f.fakeRevisionState). + WithCurrentRevision(f.currentRevision). + WithCollisionCount(f.collisionCount). + WithParent(f.parent). + WithLabels(f.labels). + Initializer() +} + +const ( + fakeOldRevision = "aaa-c9f48df69" + fakeNewRevision = "aaa-c9f48df68" +) + +func TestTaskRevision(t *testing.T) { + cases := []struct { + desc string + state *fakeRevisionStateInitializer + objs []client.Object + + expectedResult task.Status + expectedUpdateRevision string + expectedCurrentRevision string + expectedCollisionCount int32 + }{ + { + desc: "no revisions", + state: &fakeRevisionStateInitializer{ + currentRevision: Lazy[string](func() string { + return "" + }), + collisionCount: Lazy[*int32](func() *int32 { + return nil + }), + parent: Lazy[client.Object](func() client.Object { + return fake.FakeObj("aaa", fake.Label[v1alpha1.PDGroup]("aaa", "bbb")) + }), + labels: Labels{ + "ccc": "ddd", + }, + }, + expectedUpdateRevision: fakeOldRevision, + expectedCurrentRevision: fakeOldRevision, + }, + { + desc: "has a revision", + state: &fakeRevisionStateInitializer{ + currentRevision: Lazy[string](func() string { + return "xxx" + }), + collisionCount: Lazy[*int32](func() *int32 { + return nil + }), + parent: Lazy[client.Object](func() client.Object { + return fake.FakeObj("aaa", fake.Label[v1alpha1.PDGroup]("aaa", "bbb")) + }), + labels: Labels{ + "ccc": "ddd", + }, + }, + objs: []client.Object{ + fake.FakeObj("xxx", fake.Label[appsv1.ControllerRevision]("ccc", "ddd")), + }, + expectedUpdateRevision: fakeOldRevision, + expectedCurrentRevision: "xxx", + }, + { + desc: "has a coflict revision", + state: &fakeRevisionStateInitializer{ + currentRevision: Lazy[string](func() string { + return fakeOldRevision + }), + collisionCount: Lazy[*int32](func() *int32 { + return nil + }), + parent: Lazy[client.Object](func() client.Object { + return fake.FakeObj("aaa", fake.Label[v1alpha1.PDGroup]("aaa", "bbb")) + }), + labels: Labels{ + "ccc": "ddd", + }, + }, + objs: []client.Object{ + fake.FakeObj(fakeOldRevision, fake.Label[appsv1.ControllerRevision]("ccc", "ddd")), + }, + expectedUpdateRevision: fakeNewRevision, + expectedCurrentRevision: fakeOldRevision, + expectedCollisionCount: 1, + }, + { + desc: "has two coflict revision", + state: &fakeRevisionStateInitializer{ + currentRevision: Lazy[string](func() string { + return fakeOldRevision + }), + collisionCount: Lazy[*int32](func() *int32 { + return nil + }), + parent: Lazy[client.Object](func() client.Object { + return fake.FakeObj("aaa", fake.Label[v1alpha1.PDGroup]("aaa", "bbb")) + }), + labels: Labels{ + "ccc": "ddd", + }, + }, + objs: []client.Object{ + fake.FakeObj(fakeOldRevision, fake.Label[appsv1.ControllerRevision]("ccc", "ddd")), + fake.FakeObj(fakeNewRevision, fake.Label[appsv1.ControllerRevision]("ccc", "ddd")), + }, + expectedUpdateRevision: "aaa-c9f48df6c", + expectedCurrentRevision: fakeOldRevision, + expectedCollisionCount: 2, + }, + } + + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + fc := client.NewFakeClient(c.objs...) + + res, done := task.RunTask(context.Background(), TaskRevision(c.state, fc)) + assert.Equal(tt, c.expectedResult.String(), res.Status().String(), c.desc) + assert.False(tt, done, c.desc) + update, current, collisionCount := c.state.Revision() + assert.Equal(tt, c.expectedUpdateRevision, update, c.desc) + assert.Equal(tt, c.expectedCurrentRevision, current, c.desc) + assert.Equal(tt, c.expectedCollisionCount, collisionCount, c.desc) + + // record currentRevision and collisionCount + c.state.currentRevision = Lazy[string](func() string { + return current + }) + c.state.collisionCount = Lazy[*int32](func() *int32 { + return &collisionCount + }) + + // rerun Revision task and make sure that status is unchanged + res2, _ := task.RunTask(context.Background(), TaskRevision(c.state, fc)) + assert.Equal(tt, c.expectedResult.String(), res2.Status().String(), c.desc) + update, current, collisionCount = c.state.Revision() + assert.Equal(tt, c.expectedUpdateRevision, update, c.desc) + assert.Equal(tt, c.expectedCurrentRevision, current, c.desc) + assert.Equal(tt, c.expectedCollisionCount, collisionCount, c.desc) + }) + } +} diff --git a/pkg/controllers/common/task.go b/pkg/controllers/common/task.go index 71bb26691b..634e68df4a 100644 --- a/pkg/controllers/common/task.go +++ b/pkg/controllers/common/task.go @@ -20,20 +20,14 @@ import ( "fmt" "slices" "strings" - "time" - corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" kuberuntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" - utilerr "k8s.io/apimachinery/pkg/util/errors" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/runtime" - "github.com/pingcap/tidb-operator/pkg/utils/k8s" "github.com/pingcap/tidb-operator/pkg/utils/task/v3" ) @@ -60,13 +54,13 @@ func taskContextResource[T any, PT Object[T]](name string, w ResourceInitializer }) } -func taskContextResourceSlice[T any, L any, PT Object[T], PL ObjectList[L]]( +func taskContextResourceSlice[T any, PT Object[T]]( name string, w ResourceSliceInitializer[T], + l client.ObjectList, c client.Client, ) task.Task { return task.NameTaskFunc("Context"+name, func(ctx context.Context) task.Result { - var l PL = new(L) ns := w.Namespace() labels := w.Labels() @@ -129,19 +123,29 @@ func TaskContextTiDBGroup(state TiDBGroupStateInitializer, c client.Client) task return taskContextResource("TiDBGroup", w, c, false) } +func TaskContextTiFlashGroup(state TiFlashGroupStateInitializer, c client.Client) task.Task { + w := state.TiFlashGroupInitializer() + return taskContextResource("TiFlashGroup", w, c, false) +} + func TaskContextPDSlice(state PDSliceStateInitializer, c client.Client) task.Task { w := state.PDSliceInitializer() - return taskContextResourceSlice[v1alpha1.PD, v1alpha1.PDList]("PDSlice", w, c) + return taskContextResourceSlice("PDSlice", w, &v1alpha1.PDList{}, c) } func TaskContextTiKVSlice(state TiKVSliceStateInitializer, c client.Client) task.Task { w := state.TiKVSliceInitializer() - return taskContextResourceSlice[v1alpha1.TiKV, v1alpha1.TiKVList]("TiKVSlice", w, c) + return taskContextResourceSlice("TiKVSlice", w, &v1alpha1.TiKVList{}, c) } func TaskContextTiDBSlice(state TiDBSliceStateInitializer, c client.Client) task.Task { w := state.TiDBSliceInitializer() - return taskContextResourceSlice[v1alpha1.TiDB, v1alpha1.TiDBList]("TiDBSlice", w, c) + return taskContextResourceSlice("TiDBSlice", w, &v1alpha1.TiDBList{}, c) +} + +func TaskContextTiFlashSlice(state TiFlashSliceStateInitializer, c client.Client) task.Task { + w := state.TiFlashSliceInitializer() + return taskContextResourceSlice("TiFlashSlice", w, &v1alpha1.TiFlashList{}, c) } func TaskSuspendPod(state PodState, c client.Client) task.Task { @@ -163,134 +167,3 @@ func TaskSuspendPod(state PodState, c client.Client) task.Task { return task.Wait().With("pod is deleting") }) } - -func TaskGroupFinalizerAdd[ - GT runtime.GroupTuple[OG, RG], - OG client.Object, - RG runtime.Group, -](state GroupState[RG], c client.Client) task.Task { - return task.NameTaskFunc("FinalizerAdd", func(ctx context.Context) task.Result { - var t GT - if err := k8s.EnsureFinalizer(ctx, c, t.To(state.Group())); err != nil { - return task.Fail().With("failed to ensure finalizer has been added: %v", err) - } - return task.Complete().With("finalizer is added") - }) -} - -const defaultDelWaitTime = 10 * time.Second - -func TaskGroupFinalizerDel[ - GT runtime.GroupTuple[OG, RG], - IT runtime.InstanceTuple[OI, RI], - OG client.Object, - RG runtime.Group, - OI client.Object, - RI runtime.Instance, -](state GroupAndInstanceSliceState[RG, RI], c client.Client) task.Task { - var it IT - var gt GT - return task.NameTaskFunc("FinalizerDel", func(ctx context.Context) task.Result { - var errList []error - var names []string - for _, peer := range state.Slice() { - names = append(names, peer.GetName()) - if peer.GetDeletionTimestamp().IsZero() { - if err := c.Delete(ctx, it.To(peer)); err != nil { - if errors.IsNotFound(err) { - continue - } - errList = append(errList, fmt.Errorf("try to delete the instance %v failed: %w", peer.GetName(), err)) - continue - } - } - } - - if len(errList) != 0 { - return task.Fail().With("failed to delete all instances: %v", utilerr.NewAggregate(errList)) - } - - if len(names) != 0 { - return task.Retry(defaultDelWaitTime).With("wait for all instances being removed, %v still exists", names) - } - - wait, err := k8s.DeleteGroupSubresource(ctx, c, state.Group(), &corev1.ServiceList{}) - if err != nil { - return task.Fail().With("cannot delete subresources: %w", err) - } - if wait { - return task.Wait().With("wait all subresources deleted") - } - - if err := k8s.RemoveFinalizer(ctx, c, gt.To(state.Group())); err != nil { - return task.Fail().With("failed to ensure finalizer has been removed: %w", err) - } - - return task.Complete().With("finalizer has been removed") - }) -} - -func TaskGroupStatusSuspend[ - GT runtime.GroupTuple[OG, RG], - OG client.Object, - RG runtime.Group, - I runtime.Instance, -](state GroupAndInstanceSliceState[RG, I], c client.Client) task.Task { - return task.NameTaskFunc("StatusSuspend", func(ctx context.Context) task.Result { - status := metav1.ConditionFalse - reason := v1alpha1.ReasonSuspending - message := "group is suspending" - - suspended := true - g := state.Group() - objs := state.Slice() - for _, obj := range objs { - if !meta.IsStatusConditionTrue(obj.Conditions(), v1alpha1.CondSuspended) { - suspended = false - } - } - if suspended { - status = metav1.ConditionTrue - reason = v1alpha1.ReasonSuspended - message = "group is suspended" - } - needUpdate := SetStatusConditionIfChanged(g, &metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: status, - ObservedGeneration: g.GetGeneration(), - Reason: reason, - Message: message, - }) - needUpdate = SetStatusObservedGeneration(g) || needUpdate - - var t GT - if needUpdate { - if err := c.Status().Update(ctx, t.To(g)); err != nil { - return task.Fail().With("cannot update status: %v", err) - } - } - - return task.Complete().With("status is updated") - }) -} - -func SetStatusConditionIfChanged[O runtime.Object](obj O, cond *metav1.Condition) bool { - conds := obj.Conditions() - if meta.SetStatusCondition(&conds, *cond) { - obj.SetConditions(conds) - return true - } - return false -} - -func SetStatusObservedGeneration[O runtime.Object](obj O) bool { - actual := obj.GetGeneration() - current := obj.ObservedGeneration() - - if current != actual { - obj.SetObservedGeneration(actual) - return true - } - - return false -} diff --git a/pkg/controllers/common/task_finalizer.go b/pkg/controllers/common/task_finalizer.go new file mode 100644 index 0000000000..9392dd9e55 --- /dev/null +++ b/pkg/controllers/common/task_finalizer.go @@ -0,0 +1,82 @@ +package common + +import ( + "context" + "fmt" + "time" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + utilerr "k8s.io/apimachinery/pkg/util/errors" + + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/utils/k8s" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +func TaskGroupFinalizerAdd[ + GT runtime.GroupTuple[OG, RG], + OG client.Object, + RG runtime.Group, +](state GroupState[RG], c client.Client) task.Task { + return task.NameTaskFunc("FinalizerAdd", func(ctx context.Context) task.Result { + var t GT + if err := k8s.EnsureFinalizer(ctx, c, t.To(state.Group())); err != nil { + return task.Fail().With("failed to ensure finalizer has been added: %v", err) + } + return task.Complete().With("finalizer is added") + }) +} + +const defaultDelWaitTime = 10 * time.Second + +func TaskGroupFinalizerDel[ + GT runtime.GroupTuple[OG, RG], + IT runtime.InstanceTuple[OI, RI], + OG client.Object, + RG runtime.Group, + OI client.Object, + RI runtime.Instance, +](state GroupAndInstanceSliceState[RG, RI], c client.Client) task.Task { + var it IT + var gt GT + return task.NameTaskFunc("FinalizerDel", func(ctx context.Context) task.Result { + var errList []error + var names []string + for _, peer := range state.Slice() { + names = append(names, peer.GetName()) + if peer.GetDeletionTimestamp().IsZero() { + if err := c.Delete(ctx, it.To(peer)); err != nil { + if errors.IsNotFound(err) { + continue + } + errList = append(errList, fmt.Errorf("try to delete the instance %v failed: %w", peer.GetName(), err)) + continue + } + } + } + + if len(errList) != 0 { + return task.Fail().With("failed to delete all instances: %v", utilerr.NewAggregate(errList)) + } + + if len(names) != 0 { + return task.Retry(defaultDelWaitTime).With("wait for all instances being removed, %v still exists", names) + } + + wait, err := k8s.DeleteGroupSubresource(ctx, c, state.Group(), &corev1.ServiceList{}) + if err != nil { + return task.Fail().With("cannot delete subresources: %w", err) + } + if wait { + return task.Wait().With("wait all subresources deleted") + } + + if err := k8s.RemoveFinalizer(ctx, c, gt.To(state.Group())); err != nil { + return task.Fail().With("failed to ensure finalizer has been removed: %w", err) + } + + return task.Complete().With("finalizer has been removed") + }) +} diff --git a/pkg/controllers/common/task_finalizer_test.go b/pkg/controllers/common/task_finalizer_test.go new file mode 100644 index 0000000000..0be872361e --- /dev/null +++ b/pkg/controllers/common/task_finalizer_test.go @@ -0,0 +1,388 @@ +package common + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +func TestTaskGroupFinalizerAdd(t *testing.T) { + t.Run("PDGroup", testTaskGroupFinalizerAdd[runtime.PDGroupTuple]) + t.Run("TiKVGroup", testTaskGroupFinalizerAdd[runtime.TiKVGroupTuple]) + t.Run("TiDBGroup", testTaskGroupFinalizerAdd[runtime.TiDBGroupTuple]) + t.Run("TiFlashGroup", testTaskGroupFinalizerAdd[runtime.TiFlashGroupTuple]) +} + +func testTaskGroupFinalizerAdd[ + GT runtime.GroupTuple[OG, RG], + OG client.Object, + RG runtime.GroupT[G], + G runtime.GroupSet, +](t *testing.T) { + cases := []struct { + desc string + state GroupState[RG] + unexpectedErr bool + + expectedStatus task.Status + expectedObj RG + }{ + { + desc: "no finalizer", + state: FakeGroupState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + return obj + }), + ), + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetFinalizers([]string{v1alpha1.Finalizer}) + return obj + }), + }, + { + desc: "no finalizer and cannot call api", + state: FakeGroupState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + return obj + }), + ), + unexpectedErr: true, + expectedStatus: task.SFail, + }, + { + desc: "has another finalizer", + state: FakeGroupState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetFinalizers(append(obj.GetFinalizers(), "xxxx")) + return obj + }), + ), + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetFinalizers(append(obj.GetFinalizers(), "xxxx", v1alpha1.Finalizer)) + return obj + }), + }, + { + desc: "already has the finalizer", + state: FakeGroupState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetFinalizers(append(obj.GetFinalizers(), v1alpha1.Finalizer)) + return obj + }), + ), + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetFinalizers(append(obj.GetFinalizers(), v1alpha1.Finalizer)) + return obj + }), + }, + { + desc: "already has the finalizer and cannot call api", + state: FakeGroupState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetFinalizers(append(obj.GetFinalizers(), v1alpha1.Finalizer)) + return obj + }), + ), + unexpectedErr: true, + expectedStatus: task.SComplete, + }, + } + + var gt GT + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + fc := client.NewFakeClient(gt.To(c.state.Group())) + if c.unexpectedErr { + fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) + } + + ctx := context.Background() + res, done := task.RunTask(ctx, TaskGroupFinalizerAdd[GT](c.state, fc)) + assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) + assert.False(tt, done, c.desc) + + // no need to check update result + if c.unexpectedErr { + return + } + + obj := gt.To(new(G)) + require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, obj), c.desc) + assert.Equal(tt, c.expectedObj, gt.From(obj), c.desc) + }) + } +} + +func TestTaskGroupFinalizerDel(t *testing.T) { + t.Run("TiKVGroup", testTaskGroupFinalizerDel[runtime.TiKVGroupTuple, runtime.TiKVTuple]) + t.Run("TiDBGroup", testTaskGroupFinalizerDel[runtime.TiDBGroupTuple, runtime.TiDBTuple]) +} + +func testTaskGroupFinalizerDel[ + GT runtime.GroupTuple[OG, RG], + IT runtime.InstanceTuple[OI, RI], + OG client.Object, + RG runtime.GroupT[G], + OI client.Object, + RI runtime.InstanceT[I], + G runtime.GroupSet, + I runtime.InstanceSet, +](t *testing.T) { + now := metav1.Now() + cases := []struct { + desc string + state GroupAndInstanceSliceState[RG, RI] + subresources []client.Object + unexpectedErr bool + + expectedStatus task.Status + expectedObj RG + }{ + { + desc: "no instances and no sub resources and no finalizer", + state: FakeGroupAndInstanceSliceState[RG, RI]( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + return obj + }), + ), + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + return obj + }), + }, + { + desc: "no instances and no sub resources", + state: FakeGroupAndInstanceSliceState[RG, RI]( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{}) + return obj + }), + }, + { + desc: "no instances and no sub resources but call api failed", + state: FakeGroupAndInstanceSliceState[RG, RI]( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SFail, + }, + { + desc: "no instances but has sub resources", + state: FakeGroupAndInstanceSliceState[RG, RI]( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + subresources: []client.Object{ + fake.FakeObj("aaa", + fake.Label[corev1.Service](v1alpha1.LabelKeyManagedBy, v1alpha1.LabelValManagedByOperator), + fake.Label[corev1.Service](v1alpha1.LabelKeyCluster, ""), + fake.Label[corev1.Service](v1alpha1.LabelKeyComponent, runtime.Component[G, RG]()), + fake.Label[corev1.Service](v1alpha1.LabelKeyGroup, "aaa"), + ), + }, + + expectedStatus: task.SWait, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + }, + { + desc: "no instances but has sub resources and call api failed", + state: FakeGroupAndInstanceSliceState[RG, RI]( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + subresources: []client.Object{ + fake.FakeObj("aaa", + fake.Label[corev1.Service](v1alpha1.LabelKeyManagedBy, v1alpha1.LabelValManagedByOperator), + fake.Label[corev1.Service](v1alpha1.LabelKeyCluster, ""), + fake.Label[corev1.Service](v1alpha1.LabelKeyComponent, runtime.Component[G, RG]()), + fake.Label[corev1.Service](v1alpha1.LabelKeyGroup, "aaa"), + ), + }, + unexpectedErr: true, + + expectedStatus: task.SFail, + }, + { + desc: "has instances with finalizer", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + expectedStatus: task.SRetry, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + }, + { + desc: "has instances with finalizer but call api failed", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SFail, + }, + { + desc: "has deleting instances with finalizer but call api failed", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetDeletionTimestamp(&now) + obj.SetFinalizers([]string{ + v1alpha1.Finalizer, + }) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SRetry, + }, + } + + var gt GT + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + objs := []client.Object{ + gt.To(c.state.Group()), + } + + objs = append(objs, c.subresources...) + + fc := client.NewFakeClient(objs...) + if c.unexpectedErr { + // cannot remove finalizer + fc.WithError("update", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) + // cannot delete sub resources + fc.WithError("delete", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) + } + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + res, done := task.RunTask(ctx, TaskGroupFinalizerDel[GT, IT](c.state, fc)) + assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) + assert.False(tt, done, c.desc) + + // no need to check update result + if c.unexpectedErr { + return + } + + obj := gt.To(new(G)) + require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, obj), c.desc) + assert.Equal(tt, c.expectedObj, gt.From(obj), c.desc) + }) + } +} diff --git a/pkg/controllers/common/task_status.go b/pkg/controllers/common/task_status.go new file mode 100644 index 0000000000..ebe6af4f00 --- /dev/null +++ b/pkg/controllers/common/task_status.go @@ -0,0 +1,201 @@ +package common + +import ( + "context" + + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +func TaskGroupStatusSuspend[ + GT runtime.GroupTuple[OG, RG], + OG client.Object, + RG runtime.Group, + I runtime.Instance, +](state GroupAndInstanceSliceState[RG, I], c client.Client) task.Task { + return task.NameTaskFunc("StatusSuspend", func(ctx context.Context) task.Result { + status := metav1.ConditionFalse + reason := v1alpha1.ReasonSuspending + message := "group is suspending" + + suspended := true + g := state.Group() + objs := state.Slice() + for _, obj := range objs { + if !meta.IsStatusConditionTrue(obj.Conditions(), v1alpha1.CondSuspended) { + suspended = false + } + } + if suspended { + status = metav1.ConditionTrue + reason = v1alpha1.ReasonSuspended + message = "group is suspended" + } + needUpdate := SetStatusCondition(g, &metav1.Condition{ + Type: v1alpha1.CondSuspended, + Status: status, + ObservedGeneration: g.GetGeneration(), + Reason: reason, + Message: message, + }) + needUpdate = SetStatusObservedGeneration(g) || needUpdate + + var t GT + if needUpdate { + if err := c.Status().Update(ctx, t.To(g)); err != nil { + return task.Fail().With("cannot update status: %v", err) + } + } + + return task.Complete().With("status is updated") + }) +} + +func TaskGroupStatus[ + GT runtime.GroupTuple[OG, RG], + OG client.Object, + RG runtime.Group, + I runtime.Instance, +](state GroupAndInstanceSliceAndRevisionState[RG, I], c client.Client) task.Task { + var gt GT + return task.NameTaskFunc("Status", func(ctx context.Context) task.Result { + g := state.Group() + + needUpdate := SetStatusCondition(g, &metav1.Condition{ + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: g.GetGeneration(), + Reason: v1alpha1.ReasonUnsuspended, + Message: "group is not suspended", + }) + + updateRevision, currentRevision, collisionCount := state.Revision() + + replicas, readyReplicas, updateReplicas, currentReplicas := calcReplicas(state.Slice(), updateRevision, currentRevision) + + // all instances are updated + if updateReplicas == replicas { + // update current revision + currentRevision = updateRevision + // update status of pdg version + // TODO(liubo02): version of a group is hard to understand + // We need to change it to a more meaningful field + needUpdate = SetStatusVersion(g) + } + + needUpdate = SetStatusObservedGeneration(g) || needUpdate + needUpdate = SetStatusReplicas(g, replicas, readyReplicas, updateReplicas, currentReplicas) || needUpdate + needUpdate = SetStatusRevision(g, updateRevision, currentRevision, collisionCount) || needUpdate + + if needUpdate { + if err := c.Status().Update(ctx, gt.To(g)); err != nil { + return task.Fail().With("cannot update status: %w", err) + } + } + + return task.Complete().With("status is synced") + }) +} + +func calcReplicas[I runtime.Instance](instances []I, updateRevision, currentRevision string) ( + replicas, + readyReplicas, + updateReplicas, + currentReplicas int32, +) { + for _, peer := range instances { + replicas++ + if peer.IsHealthy() { + readyReplicas++ + } + if peer.CurrentRevision() == currentRevision { + currentReplicas++ + } + if peer.CurrentRevision() == updateRevision { + updateReplicas++ + } + } + + return +} + +func SetStatusCondition[O runtime.Object](obj O, cond *metav1.Condition) bool { + conds := obj.Conditions() + if meta.SetStatusCondition(&conds, *cond) { + obj.SetConditions(conds) + return true + } + return false +} + +func SetStatusVersion[O runtime.Group](obj O) bool { + v := obj.StatusVersion() + if SetIfChanged(&v, obj.Version()) { + obj.SetStatusVersion(v) + return true + } + + return false +} + +func SetStatusObservedGeneration[O runtime.Object](obj O) bool { + gen := obj.ObservedGeneration() + if SetIfChanged(&gen, obj.GetGeneration()) { + obj.SetObservedGeneration(gen) + return true + } + + return false +} + +func SetStatusReplicas[O runtime.Group](obj O, newReplicas, newReady, newUpdate, newCurrent int32) bool { + replicas, ready, update, current := obj.StatusReplicas() + changed := SetIfChanged(&replicas, newReplicas) + changed = SetIfChanged(&ready, newReady) || changed + changed = SetIfChanged(&update, newUpdate) || changed + changed = SetIfChanged(¤t, newCurrent) || changed + if changed { + obj.SetStatusReplicas(replicas, ready, update, current) + return changed + } + + return false +} + +func SetStatusRevision[O runtime.Group](obj O, newUpdate, newCurrent string, newCollisionCount int32) bool { + update, current, collisionCount := obj.StatusRevision() + changed := SetIfChanged(&update, newUpdate) + changed = SetIfChanged(¤t, newCurrent) || changed + changed = NewAndSetIfChanged(&collisionCount, newCollisionCount) || changed + if changed { + obj.SetStatusRevision(update, current, collisionCount) + return changed + } + + return false +} + +func SetIfChanged[T comparable](dst *T, src T) bool { + if *dst != src { + *dst = src + return true + } + + return false +} + +func NewAndSetIfChanged[T comparable](dst **T, src T) bool { + if *dst == nil { + zero := new(T) + if *zero == src { + return false + } + *dst = zero + } + return SetIfChanged(*dst, src) +} diff --git a/pkg/controllers/common/task_status_test.go b/pkg/controllers/common/task_status_test.go new file mode 100644 index 0000000000..455082b805 --- /dev/null +++ b/pkg/controllers/common/task_status_test.go @@ -0,0 +1,508 @@ +package common + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/utils/ptr" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/utils/fake" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +const ( + oldRevision = "old" + newRevision = "new" +) + +func TestTaskGroupStatusSuspend(t *testing.T) { + t.Run("PDGroup", testTaskGroupStatusSuspend[runtime.PDGroupTuple, runtime.PD]) + t.Run("TiKVGroup", testTaskGroupStatusSuspend[runtime.TiKVGroupTuple, runtime.TiKV]) + t.Run("TiDBGroup", testTaskGroupStatusSuspend[runtime.TiDBGroupTuple, runtime.TiDB]) + t.Run("TiFlashGroup", testTaskGroupStatusSuspend[runtime.TiFlashGroupTuple, runtime.TiFlash]) +} + +func testTaskGroupStatusSuspend[ + GT runtime.GroupTuple[OG, RG], + I runtime.InstanceSet, + OG client.Object, + RG runtime.GroupT[G], + G runtime.GroupSet, + RI runtime.InstanceT[I], +](t *testing.T) { + cases := []struct { + desc string + state GroupAndInstanceSliceState[RG, RI] + unexpectedErr bool + + expectedStatus task.Status + expectedObj RG + }{ + { + desc: "no instance", + state: FakeGroupAndInstanceSliceState[RG, RI]( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetObservedGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonSuspended, + Message: "group is suspended", + }, + }) + return obj + }), + }, + { + desc: "all instances are suspended", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionTrue, + }, + }) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetObservedGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonSuspended, + Message: "group is suspended", + }, + }) + return obj + }), + }, + { + desc: "one instance is not suspended", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + }, + }) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetObservedGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonSuspending, + Message: "group is suspending", + }, + }) + return obj + }), + }, + { + desc: "all instances are suspended but cannot call api", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionTrue, + }, + }) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SFail, + }, + { + desc: "all instances are suspended and group is up to date and cannot call api", + state: FakeGroupAndInstanceSliceState( + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetObservedGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionTrue, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonSuspended, + Message: "group is suspended", + }, + }) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionTrue, + }, + }) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SComplete, + }, + } + + var gt GT + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + fc := client.NewFakeClient(gt.To(c.state.Group())) + if c.unexpectedErr { + fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) + } + + ctx := context.Background() + res, done := task.RunTask(ctx, TaskGroupStatusSuspend[GT](c.state, fc)) + assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) + assert.False(tt, done, c.desc) + + // no need to check update result + if c.unexpectedErr { + return + } + + obj := gt.To(new(G)) + require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, obj), c.desc) + rg := gt.From(obj) + conds := rg.Conditions() + for i := range conds { + cond := &conds[i] + cond.LastTransitionTime = metav1.Time{} + } + assert.Equal(tt, c.expectedObj, rg, c.desc) + }) + } +} + +func TestTaskGroupStatus(t *testing.T) { + t.Run("PDGroup", testTaskGroupStatus[runtime.PDGroupTuple, runtime.PD]) + t.Run("TiKVGroup", testTaskGroupStatus[runtime.TiKVGroupTuple, runtime.TiKV]) + t.Run("TiDBGroup", testTaskGroupStatus[runtime.TiDBGroupTuple, runtime.TiDB]) + t.Run("TiFlashGroup", testTaskGroupStatus[runtime.TiFlashGroupTuple, runtime.TiFlash]) +} + +func testTaskGroupStatus[ + GT runtime.GroupTuple[OG, RG], + I runtime.InstanceSet, + OG client.Object, + RG runtime.GroupT[G], + G runtime.GroupSet, + RI runtime.InstanceT[I], +](t *testing.T) { + cases := []struct { + desc string + state GroupAndInstanceSliceAndRevisionState[RG, RI] + unexpectedErr bool + + expectedStatus task.Status + expectedObj RG + }{ + { + desc: "no instances", + state: FakeGroupAndInstanceSliceAndRevisionState[RG, RI]( + newRevision, + oldRevision, + 3, + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonUnsuspended, + Message: "group is not suspended", + }, + }) + obj.SetObservedGeneration(3) + obj.SetStatusReplicas(0, 0, 0, 0) + obj.SetStatusRevision(newRevision, newRevision, ptr.To[int32](3)) + return obj + }), + }, + { + desc: "all instances are outdated and healthy", + state: FakeGroupAndInstanceSliceAndRevisionState( + newRevision, + oldRevision, + 0, + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetCurrentRevision(oldRevision) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondHealth, + Status: metav1.ConditionTrue, + }, + }) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonUnsuspended, + Message: "group is not suspended", + }, + }) + obj.SetObservedGeneration(3) + obj.SetStatusReplicas(1, 1, 0, 1) + obj.SetStatusRevision(newRevision, oldRevision, nil) + return obj + }), + }, + { + desc: "all instances are updated and healthy", + state: FakeGroupAndInstanceSliceAndRevisionState( + newRevision, + oldRevision, + 0, + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetCurrentRevision(newRevision) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondHealth, + Status: metav1.ConditionTrue, + }, + }) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonUnsuspended, + Message: "group is not suspended", + }, + }) + obj.SetObservedGeneration(3) + obj.SetStatusReplicas(1, 1, 1, 0) + obj.SetStatusRevision(newRevision, newRevision, nil) + return obj + }), + }, + { + desc: "all instances are updated but not healthy", + state: FakeGroupAndInstanceSliceAndRevisionState( + newRevision, + oldRevision, + 0, + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetCurrentRevision(newRevision) + return obj + }), + ), + + expectedStatus: task.SComplete, + expectedObj: fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonUnsuspended, + Message: "group is not suspended", + }, + }) + obj.SetObservedGeneration(3) + obj.SetStatusReplicas(1, 0, 1, 0) + obj.SetStatusRevision(newRevision, newRevision, nil) + return obj + }), + }, + { + desc: "status changed but cannot call api", + state: FakeGroupAndInstanceSliceAndRevisionState( + newRevision, + oldRevision, + 0, + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetCurrentRevision(newRevision) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SFail, + }, + { + desc: "status is not changed and cannot call api", + state: FakeGroupAndInstanceSliceAndRevisionState( + newRevision, + oldRevision, + 0, + fake.Fake(func(obj RG) RG { + obj.SetName("aaa") + obj.SetGeneration(3) + obj.SetConditions([]metav1.Condition{ + { + Type: v1alpha1.CondSuspended, + Status: metav1.ConditionFalse, + ObservedGeneration: 3, + Reason: v1alpha1.ReasonUnsuspended, + Message: "group is not suspended", + }, + }) + obj.SetObservedGeneration(3) + obj.SetStatusReplicas(1, 0, 1, 0) + obj.SetStatusRevision(newRevision, newRevision, nil) + return obj + }), + fake.Fake(func(obj RI) RI { + obj.SetName("aaa") + obj.SetCurrentRevision(newRevision) + return obj + }), + ), + unexpectedErr: true, + + expectedStatus: task.SComplete, + }, + } + + var gt GT + for i := range cases { + c := &cases[i] + t.Run(c.desc, func(tt *testing.T) { + tt.Parallel() + + fc := client.NewFakeClient(gt.To(c.state.Group())) + if c.unexpectedErr { + fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) + } + + ctx := context.Background() + res, done := task.RunTask(ctx, TaskGroupStatus[GT](c.state, fc)) + assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) + assert.False(tt, done, c.desc) + + // no need to check update result + if c.unexpectedErr { + return + } + + var g RG = new(G) + require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, gt.To(g)), c.desc) + + conds := g.Conditions() + for i := range conds { + cond := &conds[i] + cond.LastTransitionTime = metav1.Time{} + } + assert.Equal(tt, c.expectedObj, g, c.desc) + }) + } +} diff --git a/pkg/controllers/common/task_test.go b/pkg/controllers/common/task_test.go index a3544af1f8..82cd79b621 100644 --- a/pkg/controllers/common/task_test.go +++ b/pkg/controllers/common/task_test.go @@ -20,14 +20,12 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/utils/fake" "github.com/pingcap/tidb-operator/pkg/utils/task/v3" ) @@ -397,588 +395,3 @@ func TestTaskSuspendPod(t *testing.T) { }) } } - -func TestTaskGroupFinalizerAdd(t *testing.T) { - t.Run("PDGroup", testTaskGroupFinalizerAdd[runtime.PDGroupTuple]) - t.Run("TiKVGroup", testTaskGroupFinalizerAdd[runtime.TiKVGroupTuple]) - t.Run("TiDBGroup", testTaskGroupFinalizerAdd[runtime.TiDBGroupTuple]) - t.Run("TiFlashGroup", testTaskGroupFinalizerAdd[runtime.TiFlashGroupTuple]) -} - -func testTaskGroupFinalizerAdd[ - GT runtime.GroupTuple[OG, RG], - OG client.Object, - RG runtime.GroupT[G], - G runtime.GroupSet, -](t *testing.T) { - cases := []struct { - desc string - state GroupState[RG] - unexpectedErr bool - - expectedStatus task.Status - expectedObj RG - }{ - { - desc: "no finalizer", - state: FakeGroupState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - return obj - }), - ), - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetFinalizers([]string{v1alpha1.Finalizer}) - return obj - }), - }, - { - desc: "no finalizer and cannot call api", - state: FakeGroupState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - return obj - }), - ), - unexpectedErr: true, - expectedStatus: task.SFail, - }, - { - desc: "has another finalizer", - state: FakeGroupState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetFinalizers(append(obj.GetFinalizers(), "xxxx")) - return obj - }), - ), - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetFinalizers(append(obj.GetFinalizers(), "xxxx", v1alpha1.Finalizer)) - return obj - }), - }, - { - desc: "already has the finalizer", - state: FakeGroupState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetFinalizers(append(obj.GetFinalizers(), v1alpha1.Finalizer)) - return obj - }), - ), - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetFinalizers(append(obj.GetFinalizers(), v1alpha1.Finalizer)) - return obj - }), - }, - { - desc: "already has the finalizer and cannot call api", - state: FakeGroupState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetFinalizers(append(obj.GetFinalizers(), v1alpha1.Finalizer)) - return obj - }), - ), - unexpectedErr: true, - expectedStatus: task.SComplete, - }, - } - - var gt GT - for i := range cases { - c := &cases[i] - t.Run(c.desc, func(tt *testing.T) { - tt.Parallel() - - fc := client.NewFakeClient(gt.To(c.state.Group())) - if c.unexpectedErr { - fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - } - - ctx := context.Background() - res, done := task.RunTask(ctx, TaskGroupFinalizerAdd[GT](c.state, fc)) - assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) - assert.False(tt, done, c.desc) - - // no need to check update result - if c.unexpectedErr { - return - } - - obj := gt.To(new(G)) - require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, obj), c.desc) - assert.Equal(tt, c.expectedObj, gt.From(obj), c.desc) - }) - } -} - -func TestTaskGroupFinalizerDel(t *testing.T) { - t.Run("TiKVGroup", testTaskGroupFinalizerDel[runtime.TiKVGroupTuple, runtime.TiKVTuple]) - t.Run("TiDBGroup", testTaskGroupFinalizerDel[runtime.TiDBGroupTuple, runtime.TiDBTuple]) -} - -func testTaskGroupFinalizerDel[ - GT runtime.GroupTuple[OG, RG], - IT runtime.InstanceTuple[OI, RI], - OG client.Object, - RG runtime.GroupT[G], - OI client.Object, - RI runtime.InstanceT[I], - G runtime.GroupSet, - I runtime.InstanceSet, -](t *testing.T) { - now := metav1.Now() - cases := []struct { - desc string - state GroupAndInstanceSliceState[RG, RI] - subresources []client.Object - unexpectedErr bool - - expectedStatus task.Status - expectedObj RG - }{ - { - desc: "no instances and no sub resources and no finalizer", - state: FakeGroupAndInstanceSliceState[RG, RI]( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - return obj - }), - ), - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - return obj - }), - }, - { - desc: "no instances and no sub resources", - state: FakeGroupAndInstanceSliceState[RG, RI]( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{}) - return obj - }), - }, - { - desc: "no instances and no sub resources but call api failed", - state: FakeGroupAndInstanceSliceState[RG, RI]( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "no instances but has sub resources", - state: FakeGroupAndInstanceSliceState[RG, RI]( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - subresources: []client.Object{ - fake.FakeObj("aaa", - fake.Label[corev1.Service](v1alpha1.LabelKeyManagedBy, v1alpha1.LabelValManagedByOperator), - fake.Label[corev1.Service](v1alpha1.LabelKeyCluster, ""), - fake.Label[corev1.Service](v1alpha1.LabelKeyComponent, runtime.Component[G, RG]()), - fake.Label[corev1.Service](v1alpha1.LabelKeyGroup, "aaa"), - ), - }, - - expectedStatus: task.SWait, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - }, - { - desc: "no instances but has sub resources and call api failed", - state: FakeGroupAndInstanceSliceState[RG, RI]( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - subresources: []client.Object{ - fake.FakeObj("aaa", - fake.Label[corev1.Service](v1alpha1.LabelKeyManagedBy, v1alpha1.LabelValManagedByOperator), - fake.Label[corev1.Service](v1alpha1.LabelKeyCluster, ""), - fake.Label[corev1.Service](v1alpha1.LabelKeyComponent, runtime.Component[G, RG]()), - fake.Label[corev1.Service](v1alpha1.LabelKeyGroup, "aaa"), - ), - }, - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "has instances with finalizer", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - expectedStatus: task.SRetry, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - }, - { - desc: "has instances with finalizer but call api failed", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "has deleting instances with finalizer but call api failed", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetDeletionTimestamp(&now) - obj.SetFinalizers([]string{ - v1alpha1.Finalizer, - }) - return obj - }), - ), - unexpectedErr: true, - - expectedStatus: task.SRetry, - }, - } - - var gt GT - for i := range cases { - c := &cases[i] - t.Run(c.desc, func(tt *testing.T) { - tt.Parallel() - - objs := []client.Object{ - gt.To(c.state.Group()), - } - - objs = append(objs, c.subresources...) - - fc := client.NewFakeClient(objs...) - if c.unexpectedErr { - // cannot remove finalizer - fc.WithError("update", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - // cannot delete sub resources - fc.WithError("delete", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - } - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - res, done := task.RunTask(ctx, TaskGroupFinalizerDel[GT, IT](c.state, fc)) - assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) - assert.False(tt, done, c.desc) - - // no need to check update result - if c.unexpectedErr { - return - } - - obj := gt.To(new(G)) - require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, obj), c.desc) - assert.Equal(tt, c.expectedObj, gt.From(obj), c.desc) - }) - } -} - -func TestTaskGroupStatusSuspend(t *testing.T) { - t.Run("PDGroup", testTaskGroupStatusSuspend[runtime.PDGroupTuple, runtime.PD]) - t.Run("TiKVGroup", testTaskGroupStatusSuspend[runtime.TiKVGroupTuple, runtime.TiKV]) - t.Run("TiDBGroup", testTaskGroupStatusSuspend[runtime.TiDBGroupTuple, runtime.TiDB]) - t.Run("TiFlashGroup", testTaskGroupStatusSuspend[runtime.TiFlashGroupTuple, runtime.TiFlash]) -} - -func testTaskGroupStatusSuspend[ - GT runtime.GroupTuple[OG, RG], - I runtime.InstanceSet, - OG client.Object, - RG runtime.GroupT[G], - G runtime.GroupSet, - RI runtime.InstanceT[I], -](t *testing.T) { - cases := []struct { - desc string - state GroupAndInstanceSliceState[RG, RI] - unexpectedErr bool - - expectedStatus task.Status - expectedObj RG - }{ - { - desc: "no instance", - state: FakeGroupAndInstanceSliceState[RG, RI]( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - return obj - }), - ), - - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - obj.SetObservedGeneration(3) - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionTrue, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonSuspended, - Message: "group is suspended", - }, - }) - return obj - }), - }, - { - desc: "all instances are suspended", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionTrue, - }, - }) - return obj - }), - ), - - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - obj.SetObservedGeneration(3) - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionTrue, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonSuspended, - Message: "group is suspended", - }, - }) - return obj - }), - }, - { - desc: "one instance is not suspended", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - }, - }) - return obj - }), - ), - - expectedStatus: task.SComplete, - expectedObj: fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - obj.SetObservedGeneration(3) - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonSuspending, - Message: "group is suspending", - }, - }) - return obj - }), - }, - { - desc: "all instances are suspended but cannot call api", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionTrue, - }, - }) - return obj - }), - ), - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "all instances are suspended and group is up to date and cannot call api", - state: FakeGroupAndInstanceSliceState( - fake.Fake(func(obj RG) RG { - obj.SetName("aaa") - obj.SetGeneration(3) - obj.SetObservedGeneration(3) - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionTrue, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonSuspended, - Message: "group is suspended", - }, - }) - return obj - }), - fake.Fake(func(obj RI) RI { - obj.SetName("aaa") - obj.SetConditions([]metav1.Condition{ - { - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionTrue, - }, - }) - return obj - }), - ), - unexpectedErr: true, - - expectedStatus: task.SComplete, - }, - } - - var gt GT - for i := range cases { - c := &cases[i] - t.Run(c.desc, func(tt *testing.T) { - tt.Parallel() - - fc := client.NewFakeClient(gt.To(c.state.Group())) - if c.unexpectedErr { - fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - } - - ctx := context.Background() - res, done := task.RunTask(ctx, TaskGroupStatusSuspend[GT](c.state, fc)) - assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) - assert.False(tt, done, c.desc) - - // no need to check update result - if c.unexpectedErr { - return - } - - obj := gt.To(new(G)) - require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, obj), c.desc) - rg := gt.From(obj) - conds := rg.Conditions() - for i := range conds { - cond := &conds[i] - cond.LastTransitionTime = metav1.Time{} - } - assert.Equal(tt, c.expectedObj, rg, c.desc) - }) - } -} diff --git a/pkg/controllers/pd/tasks/state.go b/pkg/controllers/pd/tasks/state.go index 3c84ec27b5..0f88f11ff2 100644 --- a/pkg/controllers/pd/tasks/state.go +++ b/pkg/controllers/pd/tasks/state.go @@ -82,7 +82,7 @@ func (s *state) PDInitializer() common.PDInitializer { func (s *state) ClusterInitializer() common.ClusterInitializer { return common.NewResource(func(cluster *v1alpha1.Cluster) { s.cluster = cluster }). WithNamespace(common.Namespace(s.key.Namespace)). - WithName(common.NameFunc(func() string { + WithName(common.Lazy[string](func() string { return s.pd.Spec.Cluster.Name })). Initializer() @@ -91,7 +91,7 @@ func (s *state) ClusterInitializer() common.ClusterInitializer { func (s *state) PodInitializer() common.PodInitializer { return common.NewResource(s.SetPod). WithNamespace(common.Namespace(s.key.Namespace)). - WithName(common.NameFunc(func() string { + WithName(common.Lazy[string](func() string { return s.pd.PodName() })). Initializer() diff --git a/pkg/controllers/pdgroup/builder.go b/pkg/controllers/pdgroup/builder.go index c383fd8352..e68e598b7f 100644 --- a/pkg/controllers/pdgroup/builder.go +++ b/pkg/controllers/pdgroup/builder.go @@ -46,10 +46,11 @@ func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.Task common.TaskGroupStatusSuspend[runtime.PDGroupTuple](state, r.Client), ), tasks.TaskContextPDClient(state, r.PDClientManager), + common.TaskRevision(state, r.Client), tasks.TaskBoot(state, r.Client), tasks.TaskService(state, r.Client), tasks.TaskUpdater(state, r.Client), - tasks.TaskStatus(state, r.Client), + common.TaskGroupStatus[runtime.PDGroupTuple](state, r.Client), ) return runner diff --git a/pkg/controllers/pdgroup/tasks/ctx.go b/pkg/controllers/pdgroup/tasks/ctx.go index f13c59e09b..b71d61e3f9 100644 --- a/pkg/controllers/pdgroup/tasks/ctx.go +++ b/pkg/controllers/pdgroup/tasks/ctx.go @@ -32,10 +32,6 @@ type ReconcileContext struct { // mark pdgroup is bootstrapped if cache of pd is synced IsBootstrapped bool - - UpdateRevision string - CurrentRevision string - CollisionCount int32 } // TODO: move to pdapi diff --git a/pkg/controllers/pdgroup/tasks/state.go b/pkg/controllers/pdgroup/tasks/state.go index ca99747c45..48972ef4fa 100644 --- a/pkg/controllers/pdgroup/tasks/state.go +++ b/pkg/controllers/pdgroup/tasks/state.go @@ -18,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" "github.com/pingcap/tidb-operator/pkg/controllers/common" "github.com/pingcap/tidb-operator/pkg/runtime" ) @@ -28,16 +29,22 @@ type state struct { cluster *v1alpha1.Cluster pdg *v1alpha1.PDGroup pds []*v1alpha1.PD + + updateRevision string + currentRevision string + collisionCount int32 } type State interface { common.PDGroupStateInitializer common.ClusterStateInitializer common.PDSliceStateInitializer + common.RevisionStateInitializer common.PDGroupState common.ClusterState common.PDSliceState + common.RevisionState common.GroupState[*runtime.PDGroup] common.InstanceSliceState[*runtime.PD] @@ -80,7 +87,7 @@ func (s *state) PDGroupInitializer() common.PDGroupInitializer { func (s *state) ClusterInitializer() common.ClusterInitializer { return common.NewResource(func(cluster *v1alpha1.Cluster) { s.cluster = cluster }). WithNamespace(common.Namespace(s.key.Namespace)). - WithName(common.NameFunc(func() string { + WithName(common.Lazy[string](func() string { return s.pdg.Spec.Cluster.Name })). Initializer() @@ -89,13 +96,43 @@ func (s *state) ClusterInitializer() common.ClusterInitializer { func (s *state) PDSliceInitializer() common.PDSliceInitializer { return common.NewResourceSlice(func(pds []*v1alpha1.PD) { s.pds = pds }). WithNamespace(common.Namespace(s.key.Namespace)). - WithLabels(common.LabelsFunc(func() map[string]string { - return map[string]string{ - v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, - v1alpha1.LabelKeyCluster: s.cluster.Name, - v1alpha1.LabelKeyGroup: s.pdg.Name, - } + WithLabels(s.Labels()). + Initializer() +} + +func (s *state) RevisionInitializer() common.RevisionInitializer { + return common.NewRevision(common.RevisionSetterFunc(func(update, current string, collisionCount int32) { + s.updateRevision = update + s.currentRevision = current + s.collisionCount = collisionCount + })). + WithCurrentRevision(common.Lazy[string](func() string { + return s.pdg.Status.CurrentRevision + })). + WithCollisionCount(common.Lazy[*int32](func() *int32 { + return s.pdg.Status.CollisionCount })). + WithParent(common.Lazy[client.Object](func() client.Object { + pdg := s.pdg.DeepCopy() + // always ignore bootstrapped field in spec + pdg.Spec.Bootstrapped = false + return pdg + })). + WithLabels(s.Labels()). Initializer() } + +func (s *state) Revision() (update, current string, collisionCount int32) { + return s.updateRevision, s.currentRevision, s.collisionCount +} + +func (s *state) Labels() common.LabelsOption { + return common.Lazy[map[string]string](func() map[string]string { + return map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, + v1alpha1.LabelKeyCluster: s.cluster.Name, + v1alpha1.LabelKeyGroup: s.pdg.Name, + } + }) +} diff --git a/pkg/controllers/pdgroup/tasks/status.go b/pkg/controllers/pdgroup/tasks/status.go deleted file mode 100644 index f4ad731ef4..0000000000 --- a/pkg/controllers/pdgroup/tasks/status.go +++ /dev/null @@ -1,115 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "context" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/task/v3" -) - -func TaskStatus(state *ReconcileContext, c client.Client) task.Task { - return task.NameTaskFunc("Status", func(ctx context.Context) task.Result { - pdg := state.PDGroup() - - needUpdate := meta.SetStatusCondition(&pdg.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: pdg.Generation, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - - replicas, readyReplicas, updateReplicas, currentReplicas := calcReplicas(state.PDSlice(), state.CurrentRevision, state.UpdateRevision) - - // all instances are updated - if updateReplicas == replicas { - // update current revision - state.CurrentRevision = state.UpdateRevision - // update status of pdg version - // TODO(liubo02): version of a group is hard to understand - // We need to change it to a more meaningful field - needUpdate = SetIfChanged(&pdg.Status.Version, pdg.Spec.Version) || needUpdate - } - - needUpdate = SetIfChanged(&pdg.Status.ObservedGeneration, pdg.Generation) || needUpdate - needUpdate = SetIfChanged(&pdg.Status.Replicas, replicas) || needUpdate - needUpdate = SetIfChanged(&pdg.Status.ReadyReplicas, readyReplicas) || needUpdate - needUpdate = SetIfChanged(&pdg.Status.UpdatedReplicas, updateReplicas) || needUpdate - needUpdate = SetIfChanged(&pdg.Status.CurrentReplicas, currentReplicas) || needUpdate - needUpdate = SetIfChanged(&pdg.Status.UpdateRevision, state.UpdateRevision) || needUpdate - needUpdate = SetIfChanged(&pdg.Status.CurrentRevision, state.CurrentRevision) || needUpdate - needUpdate = NewAndSetIfChanged(&pdg.Status.CollisionCount, state.CollisionCount) || needUpdate - - if needUpdate { - if err := c.Status().Update(ctx, pdg); err != nil { - return task.Fail().With("cannot update status: %w", err) - } - } - - if !state.IsBootstrapped { - return task.Wait().With("pd group may not be available, wait") - } - - return task.Complete().With("status is synced") - }) -} - -func calcReplicas(pds []*v1alpha1.PD, currentRevision, updateRevision string) ( - replicas, - readyReplicas, - updateReplicas, - currentReplicas int32, -) { - for _, peer := range pds { - replicas++ - if peer.IsHealthy() { - readyReplicas++ - } - if peer.CurrentRevision() == currentRevision { - currentReplicas++ - } - if peer.CurrentRevision() == updateRevision { - updateReplicas++ - } - } - - return -} - -func NewAndSetIfChanged[T comparable](dst **T, src T) bool { - if *dst == nil { - zero := new(T) - if *zero == src { - return false - } - *dst = zero - } - return SetIfChanged(*dst, src) -} - -func SetIfChanged[T comparable](dst *T, src T) bool { - if *dst != src { - *dst = src - return true - } - - return false -} diff --git a/pkg/controllers/pdgroup/tasks/status_test.go b/pkg/controllers/pdgroup/tasks/status_test.go deleted file mode 100644 index 674a73d4dc..0000000000 --- a/pkg/controllers/pdgroup/tasks/status_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/fake" - "github.com/pingcap/tidb-operator/pkg/utils/task/v3" -) - -func TestTaskStatus(t *testing.T) { - cases := []struct { - desc string - state *ReconcileContext - unexpectedErr bool - - expectedStatus task.Status - expectedObj *v1alpha1.PDGroup - }{ - { - desc: "no pds", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3)), - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - CollisionCount: 3, - IsBootstrapped: true, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3), func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 0 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = ptr.To[int32](3) - return obj - }), - }, - { - desc: "no pds and not bootstrapped", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3)), - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SWait, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3), func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 0 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all pds are outdated and healthy", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3)), - pds: []*v1alpha1.PD{ - fake.FakeObj("aaa", func(obj *v1alpha1.PD) *v1alpha1.PD { - obj.Status.CurrentRevision = oldRevision - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.PDCondHealth, - Status: metav1.ConditionTrue, - }) - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - IsBootstrapped: true, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3), func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 1 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 1 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = oldRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all pds are updated and healthy", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3)), - pds: []*v1alpha1.PD{ - fake.FakeObj("aaa", func(obj *v1alpha1.PD) *v1alpha1.PD { - obj.Status.CurrentRevision = newRevision - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.PDCondHealth, - Status: metav1.ConditionTrue, - }) - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - IsBootstrapped: true, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3), func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 1 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all pds are updated but not healthy", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3)), - pds: []*v1alpha1.PD{ - fake.FakeObj("aaa", func(obj *v1alpha1.PD) *v1alpha1.PD { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - IsBootstrapped: true, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3), func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "status changed but cannot call api", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3)), - pds: []*v1alpha1.PD{ - fake.FakeObj("aaa", func(obj *v1alpha1.PD) *v1alpha1.PD { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - IsBootstrapped: true, - }, - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "status is not changed and cannot call api", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.PDGroup](3), func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - pds: []*v1alpha1.PD{ - fake.FakeObj("aaa", func(obj *v1alpha1.PD) *v1alpha1.PD { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - IsBootstrapped: true, - }, - unexpectedErr: true, - - expectedStatus: task.SComplete, - }, - } - - for i := range cases { - c := &cases[i] - t.Run(c.desc, func(tt *testing.T) { - tt.Parallel() - - fc := client.NewFakeClient(c.state.PDGroup()) - if c.unexpectedErr { - fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - } - - ctx := context.Background() - res, done := task.RunTask(ctx, TaskStatus(c.state, fc)) - assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) - assert.False(tt, done, c.desc) - - // no need to check update result - if c.unexpectedErr { - return - } - - pdg := &v1alpha1.PDGroup{} - require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, pdg), c.desc) - for i := range pdg.Status.Conditions { - cond := &pdg.Status.Conditions[i] - cond.LastTransitionTime = metav1.Time{} - } - assert.Equal(tt, c.expectedObj, pdg, c.desc) - }) - } -} diff --git a/pkg/controllers/pdgroup/tasks/updater.go b/pkg/controllers/pdgroup/tasks/updater.go index 83229ca1b7..fe260f1ebc 100644 --- a/pkg/controllers/pdgroup/tasks/updater.go +++ b/pkg/controllers/pdgroup/tasks/updater.go @@ -20,11 +20,8 @@ import ( "strconv" "time" - appsv1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "github.com/go-logr/logr" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/action" @@ -32,11 +29,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/updater" "github.com/pingcap/tidb-operator/pkg/updater/policy" - "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" maputil "github.com/pingcap/tidb-operator/pkg/utils/map" "github.com/pingcap/tidb-operator/pkg/utils/random" "github.com/pingcap/tidb-operator/pkg/utils/task/v3" - "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" ) const ( @@ -47,45 +42,8 @@ const ( func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { return task.NameTaskFunc("Updater", func(ctx context.Context) task.Result { logger := logr.FromContextOrDiscard(ctx) - historyCli := history.NewClient(c) pdg := state.PDGroup() - selector := labels.SelectorFromSet(labels.Set{ - // TODO(liubo02): add label of managed by operator ? - v1alpha1.LabelKeyCluster: pdg.Spec.Cluster.Name, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, - v1alpha1.LabelKeyGroup: pdg.Name, - }) - - revisions, err := historyCli.ListControllerRevisions(pdg, selector) - if err != nil { - return task.Fail().With("cannot list controller revisions: %w", err) - } - history.SortControllerRevisions(revisions) - - // Get the current(old) and update(new) ControllerRevisions - currentRevision, updateRevision, collisionCount, err := func() (*appsv1.ControllerRevision, *appsv1.ControllerRevision, int32, error) { - // always ignore bootstrapped field in spec - bootstrapped := pdg.Spec.Bootstrapped - pdg.Spec.Bootstrapped = false - defer func() { - pdg.Spec.Bootstrapped = bootstrapped - }() - return revision.GetCurrentAndUpdate(pdg, revisions, historyCli, pdg) - }() - if err != nil { - return task.Fail().With("cannot get revisions: %w", err) - } - state.CurrentRevision = currentRevision.Name - state.UpdateRevision = updateRevision.Name - state.CollisionCount = collisionCount - - // TODO(liubo02): add a controller to do it - if err = revision.TruncateHistory(historyCli, state.PDSlice(), revisions, - currentRevision, updateRevision, state.Cluster().Spec.RevisionHistoryLimit); err != nil { - logger.Error(err, "failed to truncate history") - } - checker := action.NewUpgradeChecker(c, state.Cluster(), logger) if needVersionUpgrade(pdg) && !checker.CanUpgrade(ctx, pdg) { @@ -93,11 +51,6 @@ func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { return task.Retry(defaultUpdateWaitTime).With("wait until preconditions of upgrading is met") } - desired := 1 - if pdg.Spec.Replicas != nil { - desired = int(*pdg.Spec.Replicas) - } - var topos []v1alpha1.ScheduleTopology for _, p := range pdg.Spec.SchedulePolicies { switch p.Type { @@ -108,28 +61,23 @@ func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { } } - topoPolicy, err := policy.NewTopologyPolicy[*runtime.PD](topos) + pds := state.Slice() + topoPolicy, err := policy.NewTopologyPolicy(topos, pds...) if err != nil { return task.Fail().With("invalid topo policy, it should be validated: %w", err) } - for _, pd := range state.PDSlice() { - topoPolicy.Add(runtime.FromPD(pd)) - } + updateRevision, _, _ := state.Revision() wait, err := updater.New[runtime.PDTuple](). - WithInstances(runtime.FromPDSlice(state.PDSlice())...). - WithDesired(desired). + WithInstances(pds...). + WithDesired(int(state.Group().Replicas())). WithClient(c). WithMaxSurge(0). WithMaxUnavailable(1). - WithRevision(state.UpdateRevision). - WithNewFactory(PDNewer(pdg, state.UpdateRevision)). + WithRevision(updateRevision). + WithNewFactory(PDNewer(pdg, updateRevision)). WithAddHooks(topoPolicy). - WithUpdateHooks( - policy.KeepName[*runtime.PD](), - policy.KeepTopology[*runtime.PD](), - ). WithDelHooks(topoPolicy). WithScaleInPreferPolicy( NotLeaderPolicy(), diff --git a/pkg/controllers/pdgroup/tasks/updater_test.go b/pkg/controllers/pdgroup/tasks/updater_test.go index 4cd9ba7017..172604667e 100644 --- a/pkg/controllers/pdgroup/tasks/updater_test.go +++ b/pkg/controllers/pdgroup/tasks/updater_test.go @@ -33,8 +33,8 @@ import ( ) const ( - oldRevision = "aaa-c9f48df69" - newRevision = "aaa-6cd5c46fb8" + oldRevision = "old" + newRevision = "new" ) func TestTaskUpdater(t *testing.T) { @@ -43,10 +43,8 @@ func TestTaskUpdater(t *testing.T) { state *ReconcileContext unexpectedErr bool - expectedStatus task.Status - expectedUpdateRevision string - expectedCurrentRevision string - expectedPDNum int + expectedStatus task.Status + expectedPDNum int }{ { desc: "no pds with 1 replicas", @@ -57,27 +55,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedPDNum: 1, - }, - { - desc: "ignore bootstrapped field", - state: &ReconcileContext{ - State: &state{ - pdg: fake.FakeObj("aaa", func(obj *v1alpha1.PDGroup) *v1alpha1.PDGroup { - obj.Spec.Bootstrapped = true - return obj - }), - cluster: fake.FakeObj[v1alpha1.Cluster]("cluster"), - }, - }, - - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedPDNum: 1, + expectedStatus: task.SComplete, + expectedPDNum: 1, }, { desc: "version upgrade check", @@ -94,9 +73,7 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SRetry, - expectedUpdateRevision: "aaa-6d4b685647", - expectedCurrentRevision: "aaa-6d4b685647", + expectedStatus: task.SRetry, }, { desc: "1 updated pd with 1 replicas", @@ -105,15 +82,14 @@ func TestTaskUpdater(t *testing.T) { pdg: fake.FakeObj[v1alpha1.PDGroup]("aaa"), cluster: fake.FakeObj[v1alpha1.Cluster]("cluster"), pds: []*v1alpha1.PD{ - fakeAvailablePD("aaa-xxx", fake.FakeObj[v1alpha1.PDGroup]("aaa"), oldRevision), + fakeAvailablePD("aaa-xxx", fake.FakeObj[v1alpha1.PDGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedPDNum: 1, + expectedStatus: task.SComplete, + expectedPDNum: 1, }, { desc: "no pds with 2 replicas", @@ -127,10 +103,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedPDNum: 2, + expectedStatus: task.SComplete, + expectedPDNum: 2, }, { desc: "no pds with 2 replicas and call api failed", @@ -145,9 +119,7 @@ func TestTaskUpdater(t *testing.T) { }, unexpectedErr: true, - expectedStatus: task.SFail, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, + expectedStatus: task.SFail, }, { desc: "1 outdated pd with 2 replicas", @@ -161,13 +133,12 @@ func TestTaskUpdater(t *testing.T) { pds: []*v1alpha1.PD{ fakeAvailablePD("aaa-xxx", fake.FakeObj[v1alpha1.PDGroup]("aaa"), oldRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SWait, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedPDNum: 2, + expectedStatus: task.SWait, + expectedPDNum: 2, }, { desc: "1 outdated pd with 2 replicas but cannot call api, will fail", @@ -181,13 +152,12 @@ func TestTaskUpdater(t *testing.T) { pds: []*v1alpha1.PD{ fakeAvailablePD("aaa-xxx", fake.FakeObj[v1alpha1.PDGroup]("aaa"), oldRevision), }, + updateRevision: newRevision, }, }, unexpectedErr: true, - expectedStatus: task.SFail, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, + expectedStatus: task.SFail, }, { desc: "2 updated pd with 2 replicas", @@ -202,13 +172,12 @@ func TestTaskUpdater(t *testing.T) { fakeAvailablePD("aaa-xxx", fake.FakeObj[v1alpha1.PDGroup]("aaa"), newRevision), fakeAvailablePD("aaa-yyy", fake.FakeObj[v1alpha1.PDGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedPDNum: 2, + expectedStatus: task.SComplete, + expectedPDNum: 2, }, { desc: "2 updated pd with 2 replicas and cannot call api, can complete", @@ -223,14 +192,13 @@ func TestTaskUpdater(t *testing.T) { fakeAvailablePD("aaa-xxx", fake.FakeObj[v1alpha1.PDGroup]("aaa"), newRevision), fakeAvailablePD("aaa-yyy", fake.FakeObj[v1alpha1.PDGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, unexpectedErr: true, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedPDNum: 2, + expectedStatus: task.SComplete, + expectedPDNum: 2, }, { // NOTE: it not really check whether the policy is worked @@ -268,10 +236,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: "aaa-5cd4fb5cb4", - expectedCurrentRevision: "aaa-5cd4fb5cb4", - expectedPDNum: 3, + expectedStatus: task.SComplete, + expectedPDNum: 3, }, } @@ -292,11 +258,9 @@ func TestTaskUpdater(t *testing.T) { } res, done := task.RunTask(ctx, TaskUpdater(c.state, fc)) - assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) + assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) assert.False(tt, done, c.desc) - assert.Equal(tt, c.expectedUpdateRevision, c.state.UpdateRevision, c.desc) - assert.Equal(tt, c.expectedCurrentRevision, c.state.CurrentRevision, c.desc) if !c.unexpectedErr { pds := v1alpha1.PDList{} require.NoError(tt, fc.List(ctx, &pds), c.desc) diff --git a/pkg/controllers/tidbgroup/builder.go b/pkg/controllers/tidbgroup/builder.go index 1081524d30..cdbf7bab02 100644 --- a/pkg/controllers/tidbgroup/builder.go +++ b/pkg/controllers/tidbgroup/builder.go @@ -45,10 +45,11 @@ func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.Task common.CondClusterIsSuspending(state), common.TaskGroupStatusSuspend[runtime.TiDBGroupTuple](state, r.Client), ), + + common.TaskRevision(state, r.Client), tasks.TaskService(state, r.Client), tasks.TaskUpdater(state, r.Client), - tasks.TaskStatusAvailable(state, r.Client), - tasks.TaskStatus(state, r.Client), + common.TaskGroupStatus[runtime.TiDBGroupTuple](state, r.Client), ) return runner diff --git a/pkg/controllers/tidbgroup/tasks/ctx.go b/pkg/controllers/tidbgroup/tasks/ctx.go index d3eea8d278..eb9c98a8d9 100644 --- a/pkg/controllers/tidbgroup/tasks/ctx.go +++ b/pkg/controllers/tidbgroup/tasks/ctx.go @@ -16,8 +16,4 @@ package tasks type ReconcileContext struct { State - - UpdateRevision string - CurrentRevision string - CollisionCount int32 } diff --git a/pkg/controllers/tidbgroup/tasks/state.go b/pkg/controllers/tidbgroup/tasks/state.go index 2d9bb9dc2f..b9e5e7e072 100644 --- a/pkg/controllers/tidbgroup/tasks/state.go +++ b/pkg/controllers/tidbgroup/tasks/state.go @@ -18,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" "github.com/pingcap/tidb-operator/pkg/controllers/common" "github.com/pingcap/tidb-operator/pkg/runtime" ) @@ -28,16 +29,22 @@ type state struct { cluster *v1alpha1.Cluster dbg *v1alpha1.TiDBGroup dbs []*v1alpha1.TiDB + + updateRevision string + currentRevision string + collisionCount int32 } type State interface { common.TiDBGroupStateInitializer common.ClusterStateInitializer common.TiDBSliceStateInitializer + common.RevisionStateInitializer common.TiDBGroupState common.ClusterState common.TiDBSliceState + common.RevisionState common.GroupState[*runtime.TiDBGroup] common.InstanceSliceState[*runtime.TiDB] @@ -80,7 +87,7 @@ func (s *state) TiDBGroupInitializer() common.TiDBGroupInitializer { func (s *state) ClusterInitializer() common.ClusterInitializer { return common.NewResource(func(cluster *v1alpha1.Cluster) { s.cluster = cluster }). WithNamespace(common.Namespace(s.key.Namespace)). - WithName(common.NameFunc(func() string { + WithName(common.Lazy[string](func() string { return s.dbg.Spec.Cluster.Name })). Initializer() @@ -89,13 +96,40 @@ func (s *state) ClusterInitializer() common.ClusterInitializer { func (s *state) TiDBSliceInitializer() common.TiDBSliceInitializer { return common.NewResourceSlice(func(dbs []*v1alpha1.TiDB) { s.dbs = dbs }). WithNamespace(common.Namespace(s.key.Namespace)). - WithLabels(common.LabelsFunc(func() map[string]string { - return map[string]string{ - v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, - v1alpha1.LabelKeyCluster: s.cluster.Name, - v1alpha1.LabelKeyGroup: s.dbg.Name, - } + WithLabels(s.Labels()). + Initializer() +} + +func (s *state) RevisionInitializer() common.RevisionInitializer { + return common.NewRevision(common.RevisionSetterFunc(func(update, current string, collisionCount int32) { + s.updateRevision = update + s.currentRevision = current + s.collisionCount = collisionCount + })). + WithCurrentRevision(common.Lazy[string](func() string { + return s.dbg.Status.CurrentRevision + })). + WithCollisionCount(common.Lazy[*int32](func() *int32 { + return s.dbg.Status.CollisionCount })). + WithParent(common.Lazy[client.Object](func() client.Object { + return s.dbg + })). + WithLabels(s.Labels()). Initializer() } + +func (s *state) Revision() (update, current string, collisionCount int32) { + return s.updateRevision, s.currentRevision, s.collisionCount +} + +func (s *state) Labels() common.LabelsOption { + return common.Lazy[map[string]string](func() map[string]string { + return map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, + v1alpha1.LabelKeyCluster: s.cluster.Name, + v1alpha1.LabelKeyGroup: s.dbg.Name, + } + }) +} diff --git a/pkg/controllers/tidbgroup/tasks/status.go b/pkg/controllers/tidbgroup/tasks/status.go index 078e615bb8..a8072c37c2 100644 --- a/pkg/controllers/tidbgroup/tasks/status.go +++ b/pkg/controllers/tidbgroup/tasks/status.go @@ -58,89 +58,3 @@ func TaskStatusAvailable(state State, c client.Client) task.Task { return task.Complete().With("status is synced") }) } - -// TODO(liubo02): extract to common task -func TaskStatus(state *ReconcileContext, c client.Client) task.Task { - return task.NameTaskFunc("Status", func(ctx context.Context) task.Result { - dbg := state.TiDBGroup() - - needUpdate := meta.SetStatusCondition(&dbg.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: dbg.Generation, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - - replicas, readyReplicas, updateReplicas, currentReplicas := calcReplicas(state.TiDBSlice(), state.CurrentRevision, state.UpdateRevision) - - // all instances are updated - if updateReplicas == replicas { - // update current revision - state.CurrentRevision = state.UpdateRevision - // update status of pdg version - // TODO(liubo02): version of a group is hard to understand - // We need to change it to a more meaningful field - needUpdate = SetIfChanged(&dbg.Status.Version, dbg.Spec.Version) || needUpdate - } - - needUpdate = SetIfChanged(&dbg.Status.ObservedGeneration, dbg.Generation) || needUpdate - needUpdate = SetIfChanged(&dbg.Status.Replicas, replicas) || needUpdate - needUpdate = SetIfChanged(&dbg.Status.ReadyReplicas, readyReplicas) || needUpdate - needUpdate = SetIfChanged(&dbg.Status.UpdatedReplicas, updateReplicas) || needUpdate - needUpdate = SetIfChanged(&dbg.Status.CurrentReplicas, currentReplicas) || needUpdate - needUpdate = SetIfChanged(&dbg.Status.UpdateRevision, state.UpdateRevision) || needUpdate - needUpdate = SetIfChanged(&dbg.Status.CurrentRevision, state.CurrentRevision) || needUpdate - needUpdate = NewAndSetIfChanged(&dbg.Status.CollisionCount, state.CollisionCount) || needUpdate - - if needUpdate { - if err := c.Status().Update(ctx, dbg); err != nil { - return task.Fail().With("cannot update status: %w", err) - } - } - - return task.Complete().With("status is synced") - }) -} - -func calcReplicas(dbs []*v1alpha1.TiDB, currentRevision, updateRevision string) ( - replicas, - readyReplicas, - updateReplicas, - currentReplicas int32, -) { - for _, peer := range dbs { - replicas++ - if peer.IsHealthy() { - readyReplicas++ - } - if peer.CurrentRevision() == currentRevision { - currentReplicas++ - } - if peer.CurrentRevision() == updateRevision { - updateReplicas++ - } - } - - return -} - -func NewAndSetIfChanged[T comparable](dst **T, src T) bool { - if *dst == nil { - zero := new(T) - if *zero == src { - return false - } - *dst = zero - } - return SetIfChanged(*dst, src) -} - -func SetIfChanged[T comparable](dst *T, src T) bool { - if *dst != src { - *dst = src - return true - } - - return false -} diff --git a/pkg/controllers/tidbgroup/tasks/status_test.go b/pkg/controllers/tidbgroup/tasks/status_test.go deleted file mode 100644 index f152fc65f0..0000000000 --- a/pkg/controllers/tidbgroup/tasks/status_test.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/fake" - "github.com/pingcap/tidb-operator/pkg/utils/task/v3" -) - -func TestTaskStatus(t *testing.T) { - cases := []struct { - desc string - state *ReconcileContext - unexpectedErr bool - - expectedStatus task.Status - expectedObj *v1alpha1.TiDBGroup - }{ - { - desc: "no tidbs", - state: &ReconcileContext{ - State: &state{ - dbg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3)), - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - CollisionCount: 3, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3), func(obj *v1alpha1.TiDBGroup) *v1alpha1.TiDBGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 0 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = ptr.To[int32](3) - return obj - }), - }, - { - desc: "all tidbs are outdated and healthy", - state: &ReconcileContext{ - State: &state{ - dbg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3)), - dbs: []*v1alpha1.TiDB{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiDB) *v1alpha1.TiDB { - obj.Status.CurrentRevision = oldRevision - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.TiDBCondHealth, - Status: metav1.ConditionTrue, - }) - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3), func(obj *v1alpha1.TiDBGroup) *v1alpha1.TiDBGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 1 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 1 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = oldRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all tidbs are updated and healthy", - state: &ReconcileContext{ - State: &state{ - dbg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3)), - dbs: []*v1alpha1.TiDB{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiDB) *v1alpha1.TiDB { - obj.Status.CurrentRevision = newRevision - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.TiDBCondHealth, - Status: metav1.ConditionTrue, - }) - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3), func(obj *v1alpha1.TiDBGroup) *v1alpha1.TiDBGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 1 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all tidbs are updated but not healthy", - state: &ReconcileContext{ - State: &state{ - dbg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3)), - dbs: []*v1alpha1.TiDB{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiDB) *v1alpha1.TiDB { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3), func(obj *v1alpha1.TiDBGroup) *v1alpha1.TiDBGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "status changed but cannot call api", - state: &ReconcileContext{ - State: &state{ - dbg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3)), - dbs: []*v1alpha1.TiDB{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiDB) *v1alpha1.TiDB { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "status is not changed and cannot call api", - state: &ReconcileContext{ - State: &state{ - dbg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiDBGroup](3), func(obj *v1alpha1.TiDBGroup) *v1alpha1.TiDBGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - dbs: []*v1alpha1.TiDB{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiDB) *v1alpha1.TiDB { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - unexpectedErr: true, - - expectedStatus: task.SComplete, - }, - } - - for i := range cases { - c := &cases[i] - t.Run(c.desc, func(tt *testing.T) { - tt.Parallel() - - fc := client.NewFakeClient(c.state.TiDBGroup()) - if c.unexpectedErr { - fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - } - - ctx := context.Background() - res, done := task.RunTask(ctx, TaskStatus(c.state, fc)) - assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) - assert.False(tt, done, c.desc) - - // no need to check update result - if c.unexpectedErr { - return - } - - dbg := &v1alpha1.TiDBGroup{} - require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, dbg), c.desc) - for i := range dbg.Status.Conditions { - cond := &dbg.Status.Conditions[i] - cond.LastTransitionTime = metav1.Time{} - } - assert.Equal(tt, c.expectedObj, dbg, c.desc) - }) - } -} diff --git a/pkg/controllers/tidbgroup/tasks/updater.go b/pkg/controllers/tidbgroup/tasks/updater.go index 1eca48c845..e630390681 100644 --- a/pkg/controllers/tidbgroup/tasks/updater.go +++ b/pkg/controllers/tidbgroup/tasks/updater.go @@ -21,7 +21,6 @@ import ( "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/action" @@ -29,11 +28,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/updater" "github.com/pingcap/tidb-operator/pkg/updater/policy" - "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" maputil "github.com/pingcap/tidb-operator/pkg/utils/map" "github.com/pingcap/tidb-operator/pkg/utils/random" "github.com/pingcap/tidb-operator/pkg/utils/task/v3" - "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" ) const ( @@ -44,37 +41,8 @@ const ( func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { return task.NameTaskFunc("Updater", func(ctx context.Context) task.Result { logger := logr.FromContextOrDiscard(ctx) - historyCli := history.NewClient(c) dbg := state.TiDBGroup() - selector := labels.SelectorFromSet(labels.Set{ - // TODO(liubo02): add label of managed by operator ? - v1alpha1.LabelKeyCluster: dbg.Spec.Cluster.Name, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiDB, - v1alpha1.LabelKeyGroup: dbg.Name, - }) - - revisions, err := historyCli.ListControllerRevisions(dbg, selector) - if err != nil { - return task.Fail().With("cannot list controller revisions: %w", err) - } - history.SortControllerRevisions(revisions) - - // Get the current(old) and update(new) ControllerRevisions. - currentRevision, updateRevision, collisionCount, err := revision.GetCurrentAndUpdate(dbg, revisions, historyCli, dbg) - if err != nil { - return task.Fail().With("cannot get revisions: %w", err) - } - state.CurrentRevision = currentRevision.Name - state.UpdateRevision = updateRevision.Name - state.CollisionCount = collisionCount - - // TODO(liubo02): add a controller to do it - if err = revision.TruncateHistory(historyCli, state.TiDBSlice(), revisions, - currentRevision, updateRevision, state.Cluster().Spec.RevisionHistoryLimit); err != nil { - logger.Error(err, "failed to truncate history") - } - checker := action.NewUpgradeChecker(c, state.Cluster(), logger) if needVersionUpgrade(dbg) && !checker.CanUpgrade(ctx, dbg) { @@ -82,11 +50,6 @@ func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { return task.Retry(defaultUpdateWaitTime).With("wait until preconditions of upgrading is met") } - desired := 1 - if dbg.Spec.Replicas != nil { - desired = int(*dbg.Spec.Replicas) - } - var topos []v1alpha1.ScheduleTopology for _, p := range dbg.Spec.SchedulePolicies { switch p.Type { @@ -97,28 +60,23 @@ func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { } } - topoPolicy, err := policy.NewTopologyPolicy[*runtime.TiDB](topos) + dbs := state.Slice() + topoPolicy, err := policy.NewTopologyPolicy(topos, dbs...) if err != nil { return task.Fail().With("invalid topo policy, it should be validated: %w", err) } - for _, tidb := range state.TiDBSlice() { - topoPolicy.Add(runtime.FromTiDB(tidb)) - } + updateRevision, _, _ := state.Revision() wait, err := updater.New[runtime.TiDBTuple](). - WithInstances(runtime.FromTiDBSlice(state.TiDBSlice())...). - WithDesired(desired). + WithInstances(dbs...). + WithDesired(int(state.Group().Replicas())). WithClient(c). WithMaxSurge(0). WithMaxUnavailable(1). - WithRevision(state.UpdateRevision). - WithNewFactory(TiDBNewer(dbg, state.UpdateRevision)). + WithRevision(updateRevision). + WithNewFactory(TiDBNewer(dbg, updateRevision)). WithAddHooks(topoPolicy). - WithUpdateHooks( - policy.KeepName[*runtime.TiDB](), - policy.KeepTopology[*runtime.TiDB](), - ). WithDelHooks(topoPolicy). WithScaleInPreferPolicy( topoPolicy, diff --git a/pkg/controllers/tidbgroup/tasks/updater_test.go b/pkg/controllers/tidbgroup/tasks/updater_test.go index 8bffde1f7a..20c8540049 100644 --- a/pkg/controllers/tidbgroup/tasks/updater_test.go +++ b/pkg/controllers/tidbgroup/tasks/updater_test.go @@ -33,9 +33,8 @@ import ( ) const ( - // TODO(liubo02): fake history client to avoid real revision calc - oldRevision = "aaa-7fff59c8" - newRevision = "aaa-69cf8bf4d9" + oldRevision = "old" + newRevision = "new" ) func TestTaskUpdater(t *testing.T) { @@ -44,10 +43,8 @@ func TestTaskUpdater(t *testing.T) { state *ReconcileContext unexpectedErr bool - expectedStatus task.Status - expectedUpdateRevision string - expectedCurrentRevision string - expectedTiDBNum int + expectedStatus task.Status + expectedTiDBNum int }{ { desc: "no dbs with 1 replicas", @@ -58,10 +55,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedTiDBNum: 1, + expectedStatus: task.SComplete, + expectedTiDBNum: 1, }, { desc: "version upgrade check", @@ -78,9 +73,7 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SRetry, - expectedUpdateRevision: "aaa-7847fff478", - expectedCurrentRevision: "aaa-7847fff478", + expectedStatus: task.SRetry, }, { desc: "1 updated tidb with 1 replicas", @@ -89,15 +82,14 @@ func TestTaskUpdater(t *testing.T) { dbg: fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), cluster: fake.FakeObj[v1alpha1.Cluster]("cluster"), dbs: []*v1alpha1.TiDB{ - fakeAvailableTiDB("aaa-xxx", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), oldRevision), + fakeAvailableTiDB("aaa-xxx", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedTiDBNum: 1, + expectedStatus: task.SComplete, + expectedTiDBNum: 1, }, { desc: "no dbs with 2 replicas", @@ -111,10 +103,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiDBNum: 2, + expectedStatus: task.SComplete, + expectedTiDBNum: 2, }, { desc: "no dbs with 2 replicas and call api failed", @@ -129,9 +119,7 @@ func TestTaskUpdater(t *testing.T) { }, unexpectedErr: true, - expectedStatus: task.SFail, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, + expectedStatus: task.SFail, }, { desc: "1 outdated tidb with 2 replicas", @@ -145,13 +133,12 @@ func TestTaskUpdater(t *testing.T) { dbs: []*v1alpha1.TiDB{ fakeAvailableTiDB("aaa-xxx", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), oldRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SWait, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiDBNum: 2, + expectedStatus: task.SWait, + expectedTiDBNum: 2, }, { desc: "1 outdated tidb with 2 replicas but cannot call api, will fail", @@ -165,13 +152,12 @@ func TestTaskUpdater(t *testing.T) { dbs: []*v1alpha1.TiDB{ fakeAvailableTiDB("aaa-xxx", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), oldRevision), }, + updateRevision: newRevision, }, }, unexpectedErr: true, - expectedStatus: task.SFail, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, + expectedStatus: task.SFail, }, { desc: "2 updated tidb with 2 replicas", @@ -186,13 +172,12 @@ func TestTaskUpdater(t *testing.T) { fakeAvailableTiDB("aaa-xxx", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), newRevision), fakeAvailableTiDB("aaa-yyy", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiDBNum: 2, + expectedStatus: task.SComplete, + expectedTiDBNum: 2, }, { desc: "2 updated tidb with 2 replicas and cannot call api, can complete", @@ -207,14 +192,13 @@ func TestTaskUpdater(t *testing.T) { fakeAvailableTiDB("aaa-xxx", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), newRevision), fakeAvailableTiDB("aaa-yyy", fake.FakeObj[v1alpha1.TiDBGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, unexpectedErr: true, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiDBNum: 2, + expectedStatus: task.SComplete, + expectedTiDBNum: 2, }, { // NOTE: it not really check whether the policy is worked @@ -252,10 +236,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: "aaa-655cc6bb8f", - expectedCurrentRevision: "aaa-655cc6bb8f", - expectedTiDBNum: 3, + expectedStatus: task.SComplete, + expectedTiDBNum: 3, }, } @@ -279,8 +261,6 @@ func TestTaskUpdater(t *testing.T) { assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) assert.False(tt, done, c.desc) - assert.Equal(tt, c.expectedUpdateRevision, c.state.UpdateRevision, c.desc) - assert.Equal(tt, c.expectedCurrentRevision, c.state.CurrentRevision, c.desc) if !c.unexpectedErr { dbs := v1alpha1.TiDBList{} require.NoError(tt, fc.List(ctx, &dbs), c.desc) diff --git a/pkg/controllers/tiflashgroup/builder.go b/pkg/controllers/tiflashgroup/builder.go new file mode 100644 index 0000000000..1b27bbc68c --- /dev/null +++ b/pkg/controllers/tiflashgroup/builder.go @@ -0,0 +1,56 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tiflashgroup + +import ( + "github.com/pingcap/tidb-operator/pkg/controllers/common" + "github.com/pingcap/tidb-operator/pkg/controllers/tiflashgroup/tasks" + "github.com/pingcap/tidb-operator/pkg/runtime" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" +) + +func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.TaskReporter) task.TaskRunner { + runner := task.NewTaskRunner(reporter, + // get tiflashgroup + common.TaskContextTiFlashGroup(state, r.Client), + // if it's gone just return + task.IfBreak(common.CondGroupHasBeenDeleted(state)), + + // get cluster + common.TaskContextCluster(state, r.Client), + // if it's paused just return + task.IfBreak(common.CondClusterIsPaused(state)), + + // get all tiflashes + common.TaskContextTiFlashSlice(state, r.Client), + + task.IfBreak(common.CondGroupIsDeleting(state), + common.TaskGroupFinalizerDel[runtime.TiFlashGroupTuple, runtime.TiFlashTuple](state, r.Client), + ), + common.TaskGroupFinalizerAdd[runtime.TiFlashGroupTuple](state, r.Client), + + task.IfBreak( + common.CondClusterIsSuspending(state), + common.TaskGroupStatusSuspend[runtime.TiFlashGroupTuple](state, r.Client), + ), + + common.TaskRevision(state, r.Client), + tasks.TaskService(state, r.Client), + tasks.TaskUpdater(state, r.Client), + common.TaskGroupStatus[runtime.TiFlashGroupTuple](state, r.Client), + ) + + return runner +} diff --git a/pkg/controllers/tiflashgroup/controller.go b/pkg/controllers/tiflashgroup/controller.go index b8e8d4bf07..b59d02df22 100644 --- a/pkg/controllers/tiflashgroup/controller.go +++ b/pkg/controllers/tiflashgroup/controller.go @@ -35,7 +35,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/client" "github.com/pingcap/tidb-operator/pkg/controllers/tiflashgroup/tasks" "github.com/pingcap/tidb-operator/pkg/utils/k8s" - "github.com/pingcap/tidb-operator/pkg/utils/task" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" ) type Reconciler struct { @@ -60,7 +60,8 @@ func Setup(mgr manager.Manager, c client.Client) error { func (r *Reconciler) ClusterEventHandler() handler.TypedEventHandler[client.Object, reconcile.Request] { return handler.TypedFuncs[client.Object, reconcile.Request]{ UpdateFunc: func(ctx context.Context, event event.TypedUpdateEvent[client.Object], - queue workqueue.TypedRateLimitingInterface[reconcile.Request]) { + queue workqueue.TypedRateLimitingInterface[reconcile.Request], + ) { cluster := event.ObjectNew.(*v1alpha1.Cluster) var list v1alpha1.TiFlashGroupList @@ -98,19 +99,10 @@ func (r *Reconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Resu }() rtx := &tasks.ReconcileContext{ - // some fields will be set in the context task - Context: ctx, - Key: req.NamespacedName, + State: tasks.NewState(req.NamespacedName), } - runner := task.NewTaskRunner[tasks.ReconcileContext](reporter) - runner.AddTasks( - tasks.NewTaskContext(logger, r.Client), - tasks.NewTaskFinalizer(logger, r.Client), - tasks.NewTaskService(logger, r.Client), - tasks.NewTaskUpdater(logger, r.Client), - tasks.NewTaskStatus(logger, r.Client), - ) + runner := r.NewRunner(rtx, reporter) - return runner.Run(rtx) + return runner.Run(ctx) } diff --git a/pkg/controllers/tiflashgroup/tasks/ctx.go b/pkg/controllers/tiflashgroup/tasks/ctx.go index 8442d347f5..eb9c98a8d9 100644 --- a/pkg/controllers/tiflashgroup/tasks/ctx.go +++ b/pkg/controllers/tiflashgroup/tasks/ctx.go @@ -14,108 +14,6 @@ package tasks -import ( - "cmp" - "context" - "slices" - - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/types" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/action" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/task" -) - type ReconcileContext struct { - context.Context - - Key types.NamespacedName - - Suspended bool - - Cluster *v1alpha1.Cluster - - TiFlashGroup *v1alpha1.TiFlashGroup - Peers []*v1alpha1.TiFlash - UpgradeChecker action.UpgradeChecker - - // Status fields - v1alpha1.CommonStatus -} - -func (ctx *ReconcileContext) Self() *ReconcileContext { - return ctx -} - -type TaskContext struct { - Logger logr.Logger - Client client.Client -} - -func NewTaskContext(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { - return &TaskContext{ - Logger: logger, - Client: c, - } -} - -func (*TaskContext) Name() string { - return "Context" -} - -func (t *TaskContext) Sync(ctx task.Context[ReconcileContext]) task.Result { - rtx := ctx.Self() - - var flashg v1alpha1.TiFlashGroup - if err := t.Client.Get(ctx, rtx.Key, &flashg); err != nil { - if !errors.IsNotFound(err) { - return task.Fail().With("can't get tiflash group: %w", err) - } - - return task.Complete().Break().With("tiflash group has been deleted") - } - rtx.TiFlashGroup = &flashg - - var cluster v1alpha1.Cluster - if err := t.Client.Get(ctx, client.ObjectKey{ - Name: flashg.Spec.Cluster.Name, - Namespace: flashg.Namespace, - }, &cluster); err != nil { - return task.Fail().With("cannot find cluster %s: %w", flashg.Spec.Cluster.Name, err) - } - rtx.Cluster = &cluster - - if cluster.ShouldPauseReconcile() { - return task.Complete().Break().With("cluster reconciliation is paused") - } - - var flashList v1alpha1.TiFlashList - if err := t.Client.List(ctx, &flashList, client.InNamespace(flashg.Namespace), client.MatchingLabels{ - v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, - v1alpha1.LabelKeyCluster: cluster.Name, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, - v1alpha1.LabelKeyGroup: flashg.Name, - }); err != nil { - return task.Fail().With("cannot list tiflash peers: %w", err) - } - - rtx.Peers = make([]*v1alpha1.TiFlash, len(flashList.Items)) - rtx.Suspended = len(flashList.Items) > 0 - for i := range flashList.Items { - rtx.Peers[i] = &flashList.Items[i] - if !meta.IsStatusConditionTrue(flashList.Items[i].Status.Conditions, v1alpha1.TiKVCondSuspended) { - // TiFlash Group is not suspended if any of its members is not suspended - rtx.Suspended = false - } - } - slices.SortFunc(rtx.Peers, func(a, b *v1alpha1.TiFlash) int { - return cmp.Compare(a.Name, b.Name) - }) - - rtx.UpgradeChecker = action.NewUpgradeChecker(t.Client, rtx.Cluster, t.Logger) - return task.Complete().With("new context completed") + State } diff --git a/pkg/controllers/tiflashgroup/tasks/finalizer.go b/pkg/controllers/tiflashgroup/tasks/finalizer.go deleted file mode 100644 index 8e52d4b30b..0000000000 --- a/pkg/controllers/tiflashgroup/tasks/finalizer.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "fmt" - - "github.com/go-logr/logr" - utilerr "k8s.io/apimachinery/pkg/util/errors" - - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/k8s" - "github.com/pingcap/tidb-operator/pkg/utils/task" -) - -type TaskFinalizer struct { - Client client.Client - Logger logr.Logger -} - -func NewTaskFinalizer(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { - return &TaskFinalizer{ - Client: c, - Logger: logger, - } -} - -func (*TaskFinalizer) Name() string { - return "Finalizer" -} - -func (t *TaskFinalizer) Sync(ctx task.Context[ReconcileContext]) task.Result { - rtx := ctx.Self() - - if !rtx.TiFlashGroup.GetDeletionTimestamp().IsZero() { - errList := []error{} - names := []string{} - for _, peer := range rtx.Peers { - names = append(names, peer.Name) - if peer.GetDeletionTimestamp().IsZero() { - if err := t.Client.Delete(ctx, peer); err != nil { - errList = append(errList, fmt.Errorf("try to delete the tiflash instance %v failed: %w", peer.Name, err)) - } - } - } - - if len(errList) != 0 { - return task.Fail().With("failed to delete all tiflash instances: %v", utilerr.NewAggregate(errList)) - } - - if len(rtx.Peers) != 0 { - return task.Fail().With("wait for all tiflash instances being removed, %v still exists", names) - } - - if err := k8s.EnsureGroupSubResourceDeleted(ctx, t.Client, - rtx.TiFlashGroup.Namespace, rtx.TiFlashGroup.Name); err != nil { - return task.Fail().With("cannot delete subresources: %w", err) - } - if err := k8s.RemoveFinalizer(ctx, t.Client, rtx.TiFlashGroup); err != nil { - return task.Fail().With("failed to ensure finalizer has been removed: %w", err) - } - } else { - if err := k8s.EnsureFinalizer(ctx, t.Client, rtx.TiFlashGroup); err != nil { - return task.Fail().With("failed to ensure finalizer has been added: %w", err) - } - } - - return task.Complete().With("finalizer is synced") -} diff --git a/pkg/controllers/tiflashgroup/tasks/state.go b/pkg/controllers/tiflashgroup/tasks/state.go new file mode 100644 index 0000000000..89993cb8d1 --- /dev/null +++ b/pkg/controllers/tiflashgroup/tasks/state.go @@ -0,0 +1,135 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tasks + +import ( + "k8s.io/apimachinery/pkg/types" + + "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/controllers/common" + "github.com/pingcap/tidb-operator/pkg/runtime" +) + +type state struct { + key types.NamespacedName + + cluster *v1alpha1.Cluster + fg *v1alpha1.TiFlashGroup + fs []*v1alpha1.TiFlash + + updateRevision string + currentRevision string + collisionCount int32 +} + +type State interface { + common.TiFlashGroupStateInitializer + common.ClusterStateInitializer + common.TiFlashSliceStateInitializer + common.RevisionStateInitializer + + common.TiFlashGroupState + common.ClusterState + common.TiFlashSliceState + common.RevisionState + + common.GroupState[*runtime.TiFlashGroup] + common.InstanceSliceState[*runtime.TiFlash] +} + +func NewState(key types.NamespacedName) State { + s := &state{ + key: key, + } + return s +} + +func (s *state) TiFlashGroup() *v1alpha1.TiFlashGroup { + return s.fg +} + +func (s *state) Group() *runtime.TiFlashGroup { + return runtime.FromTiFlashGroup(s.fg) +} + +func (s *state) Cluster() *v1alpha1.Cluster { + return s.cluster +} + +func (s *state) TiFlashSlice() []*v1alpha1.TiFlash { + return s.fs +} + +func (s *state) Slice() []*runtime.TiFlash { + return runtime.FromTiFlashSlice(s.fs) +} + +func (s *state) TiFlashGroupInitializer() common.TiFlashGroupInitializer { + return common.NewResource(func(fg *v1alpha1.TiFlashGroup) { s.fg = fg }). + WithNamespace(common.Namespace(s.key.Namespace)). + WithName(common.Name(s.key.Name)). + Initializer() +} + +func (s *state) ClusterInitializer() common.ClusterInitializer { + return common.NewResource(func(cluster *v1alpha1.Cluster) { s.cluster = cluster }). + WithNamespace(common.Namespace(s.key.Namespace)). + WithName(common.Lazy[string](func() string { + return s.fg.Spec.Cluster.Name + })). + Initializer() +} + +func (s *state) TiFlashSliceInitializer() common.TiFlashSliceInitializer { + return common.NewResourceSlice(func(fs []*v1alpha1.TiFlash) { s.fs = fs }). + WithNamespace(common.Namespace(s.key.Namespace)). + WithLabels(s.Labels()). + Initializer() +} + +func (s *state) RevisionInitializer() common.RevisionInitializer { + return common.NewRevision(common.RevisionSetterFunc(func(update, current string, collisionCount int32) { + s.updateRevision = update + s.currentRevision = current + s.collisionCount = collisionCount + })). + WithCurrentRevision(common.Lazy[string](func() string { + return s.fg.Status.CurrentRevision + })). + WithCollisionCount(common.Lazy[*int32](func() *int32 { + return s.fg.Status.CollisionCount + })). + WithParent(common.Lazy[client.Object](func() client.Object { + return s.fg + })). + WithLabels(s.Labels()). + Initializer() +} + +func (s *state) Revision() (update, current string, collisionCount int32) { + return s.updateRevision, s.currentRevision, s.collisionCount +} + +func (s *state) Labels() common.LabelsOption { + return common.Lazy[map[string]string](func() map[string]string { + return map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, + v1alpha1.LabelKeyCluster: s.cluster.Name, + v1alpha1.LabelKeyGroup: s.fg.Name, + } + }) +} diff --git a/pkg/controllers/tiflashgroup/tasks/status.go b/pkg/controllers/tiflashgroup/tasks/status.go deleted file mode 100644 index fab63a43fd..0000000000 --- a/pkg/controllers/tiflashgroup/tasks/status.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "github.com/go-logr/logr" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/task" -) - -type TaskStatus struct { - Client client.Client - Logger logr.Logger -} - -func NewTaskStatus(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { - return &TaskStatus{ - Client: c, - Logger: logger, - } -} - -func (*TaskStatus) Name() string { - return "Status" -} - -func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { - rtx := ctx.Self() - - suspendStatus := metav1.ConditionFalse - suspendMessage := "tiflash group is not suspended" - if rtx.Suspended { - suspendStatus = metav1.ConditionTrue - suspendMessage = "tiflash group is suspended" - } else if rtx.Cluster.ShouldSuspendCompute() { - suspendMessage = "tiflash group is suspending" - } - conditionChanged := meta.SetStatusCondition(&rtx.TiFlashGroup.Status.Conditions, metav1.Condition{ - Type: v1alpha1.TiFlashGroupCondSuspended, - Status: suspendStatus, - ObservedGeneration: rtx.TiFlashGroup.Generation, - Reason: v1alpha1.TiFlashGroupSuspendReason, - Message: suspendMessage, - }) - - // Update the current revision if all instances are synced. - if int(rtx.TiFlashGroup.GetDesiredReplicas()) == len(rtx.Peers) && v1alpha1.AllInstancesSynced(rtx.Peers, rtx.UpdateRevision) { - if rtx.CurrentRevision != rtx.UpdateRevision || rtx.TiFlashGroup.Status.Version != rtx.TiFlashGroup.Spec.Version { - rtx.CurrentRevision = rtx.UpdateRevision - rtx.TiFlashGroup.Status.Version = rtx.TiFlashGroup.Spec.Version - conditionChanged = true - } - } - var readyReplicas int32 - for _, peer := range rtx.Peers { - if peer.IsHealthy() { - readyReplicas++ - } - } - - if conditionChanged || rtx.TiFlashGroup.Status.ReadyReplicas != readyReplicas || - rtx.TiFlashGroup.Status.Replicas != int32(len(rtx.Peers)) || //nolint:gosec // expected type conversion - !v1alpha1.IsReconciled(rtx.TiFlashGroup) || - v1alpha1.StatusChanged(rtx.TiFlashGroup, rtx.CommonStatus) { - rtx.TiFlashGroup.Status.ReadyReplicas = readyReplicas - rtx.TiFlashGroup.Status.Replicas = int32(len(rtx.Peers)) //nolint:gosec // expected type conversion - rtx.TiFlashGroup.Status.ObservedGeneration = rtx.TiFlashGroup.Generation - rtx.TiFlashGroup.Status.CurrentRevision = rtx.CurrentRevision - rtx.TiFlashGroup.Status.UpdateRevision = rtx.UpdateRevision - rtx.TiFlashGroup.Status.CollisionCount = rtx.CollisionCount - - if err := t.Client.Status().Update(ctx, rtx.TiFlashGroup); err != nil { - return task.Fail().With("cannot update status: %w", err) - } - } - - return task.Complete().With("status is synced") -} diff --git a/pkg/controllers/tiflashgroup/tasks/svc.go b/pkg/controllers/tiflashgroup/tasks/svc.go index e8ff42dfcb..293eb66b2b 100644 --- a/pkg/controllers/tiflashgroup/tasks/svc.go +++ b/pkg/controllers/tiflashgroup/tasks/svc.go @@ -15,49 +15,30 @@ package tasks import ( + "context" "fmt" - "github.com/go-logr/logr" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/task" + "github.com/pingcap/tidb-operator/pkg/controllers/common" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" ) -type TaskService struct { - Logger logr.Logger - Client client.Client -} - -func NewTaskService(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { - return &TaskService{ - Logger: logger, - Client: c, - } -} - -func (*TaskService) Name() string { - return "Service" -} +func TaskService(state common.TiFlashGroupState, c client.Client) task.Task { + return task.NameTaskFunc("Service", func(ctx context.Context) task.Result { + fg := state.TiFlashGroup() -func (t *TaskService) Sync(ctx task.Context[ReconcileContext]) task.Result { - rtx := ctx.Self() - - if rtx.Cluster.ShouldSuspendCompute() { - return task.Complete().With("skip service for suspension") - } - - flashg := rtx.TiFlashGroup - - svc := newHeadlessService(flashg) - if err := t.Client.Apply(ctx, svc); err != nil { - return task.Fail().With(fmt.Sprintf("can't create headless service of tiflash: %v", err)) - } + headless := newHeadlessService(fg) + if err := c.Apply(ctx, headless); err != nil { + return task.Fail().With(fmt.Sprintf("can't create headless service: %v", err)) + } - return task.Complete().With("headless service of tiflash has been applied") + return task.Complete().With("services have been applied") + }) } func newHeadlessService(fg *v1alpha1.TiFlashGroup) *corev1.Service { diff --git a/pkg/controllers/tiflashgroup/tasks/updater.go b/pkg/controllers/tiflashgroup/tasks/updater.go index 3d72c6abb2..cedd08f42f 100644 --- a/pkg/controllers/tiflashgroup/tasks/updater.go +++ b/pkg/controllers/tiflashgroup/tasks/updater.go @@ -15,135 +15,82 @@ package tasks import ( + "context" "fmt" + "time" "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/action" "github.com/pingcap/tidb-operator/pkg/client" "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/updater" "github.com/pingcap/tidb-operator/pkg/updater/policy" - "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" maputil "github.com/pingcap/tidb-operator/pkg/utils/map" "github.com/pingcap/tidb-operator/pkg/utils/random" - "github.com/pingcap/tidb-operator/pkg/utils/task" - "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" + "github.com/pingcap/tidb-operator/pkg/utils/task/v3" ) -// TaskUpdater is a task for updating TiFlashGroup when its spec is changed. -type TaskUpdater struct { - Logger logr.Logger - Client client.Client - CRCli history.Interface -} +const ( + defaultUpdateWaitTime = time.Second * 30 +) -func NewTaskUpdater(logger logr.Logger, c client.Client) task.Task[ReconcileContext] { - return &TaskUpdater{ - Logger: logger, - Client: c, - CRCli: history.NewClient(c), - } -} +// TaskUpdater is a task to scale or update PD when spec of TiFlashGroup is changed. +func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { + return task.NameTaskFunc("Updater", func(ctx context.Context) task.Result { + logger := logr.FromContextOrDiscard(ctx) + fg := state.TiFlashGroup() -func (*TaskUpdater) Name() string { - return "Updater" -} + checker := action.NewUpgradeChecker(c, state.Cluster(), logger) -func (t *TaskUpdater) Sync(ctx task.Context[ReconcileContext]) task.Result { - rtx := ctx.Self() - - // TODO: move to task v2 - if !rtx.TiFlashGroup.GetDeletionTimestamp().IsZero() { - return task.Complete().With("tiflash group has been deleted") - } - - // List all controller revisions for the TiFlashGroup. - selector, _ := metav1.LabelSelectorAsSelector(&metav1.LabelSelector{ - MatchLabels: map[string]string{ - v1alpha1.LabelKeyCluster: rtx.Cluster.Name, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiFlash, - v1alpha1.LabelKeyGroup: rtx.TiFlashGroup.Name, - }, - }) - revisions, err := t.CRCli.ListControllerRevisions(rtx.TiFlashGroup, selector) - if err != nil { - return task.Fail().With("cannot list controller revisions: %w", err) - } - history.SortControllerRevisions(revisions) - - // Get the current(old) and update(new) ControllerRevisions. - currentRevision, updateRevision, collisionCount, err := revision.GetCurrentAndUpdate( - rtx.TiFlashGroup, revisions, t.CRCli, rtx.TiFlashGroup) - if err != nil { - return task.Fail().With("cannot get revisions: %w", err) - } - rtx.CurrentRevision = currentRevision.Name - rtx.UpdateRevision = updateRevision.Name - rtx.CollisionCount = &collisionCount - - if err = revision.TruncateHistory(t.CRCli, rtx.Peers, revisions, - currentRevision, updateRevision, rtx.Cluster.Spec.RevisionHistoryLimit); err != nil { - t.Logger.Error(err, "failed to truncate history") - } - - if needVersionUpgrade(rtx.TiFlashGroup) && !rtx.UpgradeChecker.CanUpgrade(ctx, rtx.TiFlashGroup) { - return task.Fail().Continue().With( - "preconditions of upgrading the tiflash group %s/%s are not met", - rtx.TiFlashGroup.Namespace, rtx.TiFlashGroup.Name) - } - - desired := 1 - if rtx.TiFlashGroup.Spec.Replicas != nil { - desired = int(*rtx.TiFlashGroup.Spec.Replicas) - } - - var topos []v1alpha1.ScheduleTopology - for _, p := range rtx.TiFlashGroup.Spec.SchedulePolicies { - switch p.Type { - case v1alpha1.SchedulePolicyTypeEvenlySpread: - topos = p.EvenlySpread.Topologies - default: - // do nothing + if needVersionUpgrade(fg) && !checker.CanUpgrade(ctx, fg) { + // TODO(liubo02): change to Wait + return task.Retry(defaultUpdateWaitTime).With("wait until preconditions of upgrading is met") + } + + var topos []v1alpha1.ScheduleTopology + for _, p := range fg.Spec.SchedulePolicies { + switch p.Type { + case v1alpha1.SchedulePolicyTypeEvenlySpread: + topos = p.EvenlySpread.Topologies + default: + // do nothing + } + } + + fs := state.Slice() + topoPolicy, err := policy.NewTopologyPolicy(topos, fs...) + if err != nil { + return task.Fail().With("invalid topo policy, it should be validated: %w", err) } - } - - topoPolicy, err := policy.NewTopologyPolicy[*runtime.TiFlash](topos) - if err != nil { - return task.Fail().With("invalid topo policy, it should be validated: %w", err) - } - - for _, tiflash := range rtx.Peers { - topoPolicy.Add(runtime.FromTiFlash(tiflash)) - } - - wait, err := updater.New[runtime.TiFlashTuple](). - WithInstances(runtime.FromTiFlashSlice(rtx.Peers)...). - WithDesired(desired). - WithClient(t.Client). - WithMaxSurge(0). - WithMaxUnavailable(1). - WithRevision(rtx.UpdateRevision). - WithNewFactory(TiFlashNewer(rtx.TiFlashGroup, rtx.UpdateRevision)). - WithAddHooks(topoPolicy). - WithUpdateHooks( - policy.KeepName[*runtime.TiFlash](), - policy.KeepTopology[*runtime.TiFlash](), - ). - WithDelHooks(topoPolicy). - WithScaleInPreferPolicy( - topoPolicy, - ). - Build(). - Do(ctx) - if err != nil { - return task.Fail().With("cannot update instances: %w", err) - } - if wait { - return task.Complete().With("wait for all instances ready") - } - return task.Complete().With("all instances are synced") + + updateRevision, _, _ := state.Revision() + + wait, err := updater.New[runtime.TiFlashTuple](). + WithInstances(fs...). + WithDesired(int(state.Group().Replicas())). + WithClient(c). + WithMaxSurge(0). + WithMaxUnavailable(1). + WithRevision(updateRevision). + WithNewFactory(TiFlashNewer(fg, updateRevision)). + WithAddHooks(topoPolicy). + WithDelHooks(topoPolicy). + WithScaleInPreferPolicy( + topoPolicy, + ). + Build(). + Do(ctx) + if err != nil { + return task.Fail().With("cannot update instances: %w", err) + } + if wait { + return task.Wait().With("wait for all instances ready") + } + return task.Complete().With("all instances are synced") + }) } func needVersionUpgrade(flashg *v1alpha1.TiFlashGroup) bool { diff --git a/pkg/controllers/tikvgroup/builder.go b/pkg/controllers/tikvgroup/builder.go index 3ea6394cb7..015f78e506 100644 --- a/pkg/controllers/tikvgroup/builder.go +++ b/pkg/controllers/tikvgroup/builder.go @@ -45,9 +45,11 @@ func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.Task common.CondClusterIsSuspending(state), common.TaskGroupStatusSuspend[runtime.TiKVGroupTuple](state, r.Client), ), + + common.TaskRevision(state, r.Client), tasks.TaskService(state, r.Client), tasks.TaskUpdater(state, r.Client), - tasks.TaskStatus(state, r.Client), + common.TaskGroupStatus[runtime.TiKVGroupTuple](state, r.Client), ) return runner diff --git a/pkg/controllers/tikvgroup/tasks/ctx.go b/pkg/controllers/tikvgroup/tasks/ctx.go index d3eea8d278..eb9c98a8d9 100644 --- a/pkg/controllers/tikvgroup/tasks/ctx.go +++ b/pkg/controllers/tikvgroup/tasks/ctx.go @@ -16,8 +16,4 @@ package tasks type ReconcileContext struct { State - - UpdateRevision string - CurrentRevision string - CollisionCount int32 } diff --git a/pkg/controllers/tikvgroup/tasks/state.go b/pkg/controllers/tikvgroup/tasks/state.go index 612d201ea1..1a17b8ed2f 100644 --- a/pkg/controllers/tikvgroup/tasks/state.go +++ b/pkg/controllers/tikvgroup/tasks/state.go @@ -18,6 +18,7 @@ import ( "k8s.io/apimachinery/pkg/types" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" + "github.com/pingcap/tidb-operator/pkg/client" "github.com/pingcap/tidb-operator/pkg/controllers/common" "github.com/pingcap/tidb-operator/pkg/runtime" ) @@ -28,16 +29,22 @@ type state struct { cluster *v1alpha1.Cluster kvg *v1alpha1.TiKVGroup kvs []*v1alpha1.TiKV + + updateRevision string + currentRevision string + collisionCount int32 } type State interface { common.TiKVGroupStateInitializer common.ClusterStateInitializer common.TiKVSliceStateInitializer + common.RevisionStateInitializer common.TiKVGroupState common.ClusterState common.TiKVSliceState + common.RevisionState common.GroupState[*runtime.TiKVGroup] common.InstanceSliceState[*runtime.TiKV] @@ -80,7 +87,7 @@ func (s *state) TiKVGroupInitializer() common.TiKVGroupInitializer { func (s *state) ClusterInitializer() common.ClusterInitializer { return common.NewResource(func(cluster *v1alpha1.Cluster) { s.cluster = cluster }). WithNamespace(common.Namespace(s.key.Namespace)). - WithName(common.NameFunc(func() string { + WithName(common.Lazy[string](func() string { return s.kvg.Spec.Cluster.Name })). Initializer() @@ -89,13 +96,40 @@ func (s *state) ClusterInitializer() common.ClusterInitializer { func (s *state) TiKVSliceInitializer() common.TiKVSliceInitializer { return common.NewResourceSlice(func(kvs []*v1alpha1.TiKV) { s.kvs = kvs }). WithNamespace(common.Namespace(s.key.Namespace)). - WithLabels(common.LabelsFunc(func() map[string]string { - return map[string]string{ - v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, - v1alpha1.LabelKeyCluster: s.cluster.Name, - v1alpha1.LabelKeyGroup: s.kvg.Name, - } + WithLabels(s.Labels()). + Initializer() +} + +func (s *state) RevisionInitializer() common.RevisionInitializer { + return common.NewRevision(common.RevisionSetterFunc(func(update, current string, collisionCount int32) { + s.updateRevision = update + s.currentRevision = current + s.collisionCount = collisionCount + })). + WithCurrentRevision(common.Lazy[string](func() string { + return s.kvg.Status.CurrentRevision + })). + WithCollisionCount(common.Lazy[*int32](func() *int32 { + return s.kvg.Status.CollisionCount })). + WithParent(common.Lazy[client.Object](func() client.Object { + return s.kvg + })). + WithLabels(s.Labels()). Initializer() } + +func (s *state) Revision() (update, current string, collisionCount int32) { + return s.updateRevision, s.currentRevision, s.collisionCount +} + +func (s *state) Labels() common.LabelsOption { + return common.Lazy[map[string]string](func() map[string]string { + return map[string]string{ + v1alpha1.LabelKeyManagedBy: v1alpha1.LabelValManagedByOperator, + v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentTiKV, + v1alpha1.LabelKeyCluster: s.cluster.Name, + v1alpha1.LabelKeyGroup: s.kvg.Name, + } + }) +} diff --git a/pkg/controllers/tikvgroup/tasks/status.go b/pkg/controllers/tikvgroup/tasks/status.go deleted file mode 100644 index 0fa1c68a4b..0000000000 --- a/pkg/controllers/tikvgroup/tasks/status.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "context" - - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/task/v3" -) - -func TaskStatus(state *ReconcileContext, c client.Client) task.Task { - return task.NameTaskFunc("Status", func(ctx context.Context) task.Result { - kvg := state.TiKVGroup() - needUpdate := meta.SetStatusCondition(&kvg.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: kvg.Generation, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - - replicas, readyReplicas, updateReplicas, currentReplicas := calcReplicas(state.TiKVSlice(), state.CurrentRevision, state.UpdateRevision) - - // all instances are updated - if updateReplicas == replicas { - // update current revision - state.CurrentRevision = state.UpdateRevision - // update status of kvg version - // TODO(liubo02): version of a group is hard to understand - // We need to change it to a more meaningful field - needUpdate = SetIfChanged(&kvg.Status.Version, kvg.Spec.Version) || needUpdate - } - - needUpdate = SetIfChanged(&kvg.Status.ObservedGeneration, kvg.Generation) || needUpdate - needUpdate = SetIfChanged(&kvg.Status.Replicas, replicas) || needUpdate - needUpdate = SetIfChanged(&kvg.Status.ReadyReplicas, readyReplicas) || needUpdate - needUpdate = SetIfChanged(&kvg.Status.UpdatedReplicas, updateReplicas) || needUpdate - needUpdate = SetIfChanged(&kvg.Status.CurrentReplicas, currentReplicas) || needUpdate - needUpdate = SetIfChanged(&kvg.Status.UpdateRevision, state.UpdateRevision) || needUpdate - needUpdate = SetIfChanged(&kvg.Status.CurrentRevision, state.CurrentRevision) || needUpdate - needUpdate = NewAndSetIfChanged(&kvg.Status.CollisionCount, state.CollisionCount) || needUpdate - - if needUpdate { - if err := c.Status().Update(ctx, kvg); err != nil { - return task.Fail().With("cannot update status: %w", err) - } - } - - return task.Complete().With("status is synced") - }) -} - -func calcReplicas(kvs []*v1alpha1.TiKV, currentRevision, updateRevision string) ( - replicas, - readyReplicas, - updateReplicas, - currentReplicas int32, -) { - for _, peer := range kvs { - replicas++ - if peer.IsHealthy() { - readyReplicas++ - } - if peer.CurrentRevision() == currentRevision { - currentReplicas++ - } - if peer.CurrentRevision() == updateRevision { - updateReplicas++ - } - } - - return -} - -func NewAndSetIfChanged[T comparable](dst **T, src T) bool { - if *dst == nil { - zero := new(T) - if *zero == src { - return false - } - *dst = zero - } - return SetIfChanged(*dst, src) -} - -func SetIfChanged[T comparable](dst *T, src T) bool { - if *dst != src { - *dst = src - return true - } - - return false -} diff --git a/pkg/controllers/tikvgroup/tasks/status_test.go b/pkg/controllers/tikvgroup/tasks/status_test.go deleted file mode 100644 index 2fd774e465..0000000000 --- a/pkg/controllers/tikvgroup/tasks/status_test.go +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package tasks - -import ( - "context" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/utils/ptr" - - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" - "github.com/pingcap/tidb-operator/pkg/utils/fake" - "github.com/pingcap/tidb-operator/pkg/utils/task/v3" -) - -func TestTaskStatus(t *testing.T) { - cases := []struct { - desc string - state *ReconcileContext - unexpectedErr bool - - expectedStatus task.Status - expectedObj *v1alpha1.TiKVGroup - }{ - { - desc: "no tikvs", - state: &ReconcileContext{ - State: &state{ - kvg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3)), - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - CollisionCount: 3, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3), func(obj *v1alpha1.TiKVGroup) *v1alpha1.TiKVGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 0 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = ptr.To[int32](3) - return obj - }), - }, - { - desc: "all tikvs are outdated and healthy", - state: &ReconcileContext{ - State: &state{ - kvg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3)), - kvs: []*v1alpha1.TiKV{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiKV) *v1alpha1.TiKV { - obj.Status.CurrentRevision = oldRevision - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.TiKVCondHealth, - Status: metav1.ConditionTrue, - }) - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3), func(obj *v1alpha1.TiKVGroup) *v1alpha1.TiKVGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 1 - obj.Status.UpdatedReplicas = 0 - obj.Status.CurrentReplicas = 1 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = oldRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all tikvs are updated and healthy", - state: &ReconcileContext{ - State: &state{ - kvg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3)), - kvs: []*v1alpha1.TiKV{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiKV) *v1alpha1.TiKV { - obj.Status.CurrentRevision = newRevision - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.TiKVCondHealth, - Status: metav1.ConditionTrue, - }) - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3), func(obj *v1alpha1.TiKVGroup) *v1alpha1.TiKVGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 1 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "all tikvs are updated but not healthy", - state: &ReconcileContext{ - State: &state{ - kvg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3)), - kvs: []*v1alpha1.TiKV{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiKV) *v1alpha1.TiKV { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - - expectedStatus: task.SComplete, - expectedObj: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3), func(obj *v1alpha1.TiKVGroup) *v1alpha1.TiKVGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - }, - { - desc: "status changed but cannot call api", - state: &ReconcileContext{ - State: &state{ - kvg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3)), - kvs: []*v1alpha1.TiKV{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiKV) *v1alpha1.TiKV { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - unexpectedErr: true, - - expectedStatus: task.SFail, - }, - { - desc: "status is not changed and cannot call api", - state: &ReconcileContext{ - State: &state{ - kvg: fake.FakeObj("aaa", fake.SetGeneration[v1alpha1.TiKVGroup](3), func(obj *v1alpha1.TiKVGroup) *v1alpha1.TiKVGroup { - obj.Status.Conditions = append(obj.Status.Conditions, metav1.Condition{ - Type: v1alpha1.CondSuspended, - Status: metav1.ConditionFalse, - ObservedGeneration: 3, - Reason: v1alpha1.ReasonUnsuspended, - Message: "group is not suspended", - }) - obj.Status.ObservedGeneration = 3 - obj.Status.Replicas = 1 - obj.Status.ReadyReplicas = 0 - obj.Status.UpdatedReplicas = 1 - obj.Status.CurrentReplicas = 0 - obj.Status.UpdateRevision = newRevision - obj.Status.CurrentRevision = newRevision - obj.Status.CollisionCount = nil - return obj - }), - kvs: []*v1alpha1.TiKV{ - fake.FakeObj("aaa", func(obj *v1alpha1.TiKV) *v1alpha1.TiKV { - obj.Status.CurrentRevision = newRevision - return obj - }), - }, - }, - UpdateRevision: newRevision, - CurrentRevision: oldRevision, - }, - unexpectedErr: true, - - expectedStatus: task.SComplete, - }, - } - - for i := range cases { - c := &cases[i] - t.Run(c.desc, func(tt *testing.T) { - tt.Parallel() - - fc := client.NewFakeClient(c.state.TiKVGroup()) - if c.unexpectedErr { - fc.WithError("*", "*", errors.NewInternalError(fmt.Errorf("fake internal err"))) - } - - ctx := context.Background() - res, done := task.RunTask(ctx, TaskStatus(c.state, fc)) - assert.Equal(tt, c.expectedStatus, res.Status(), c.desc) - assert.False(tt, done, c.desc) - - // no need to check update result - if c.unexpectedErr { - return - } - - kvg := &v1alpha1.TiKVGroup{} - require.NoError(tt, fc.Get(ctx, client.ObjectKey{Name: "aaa"}, kvg), c.desc) - for i := range kvg.Status.Conditions { - cond := &kvg.Status.Conditions[i] - cond.LastTransitionTime = metav1.Time{} - } - assert.Equal(tt, c.expectedObj, kvg, c.desc) - }) - } -} diff --git a/pkg/controllers/tikvgroup/tasks/updater.go b/pkg/controllers/tikvgroup/tasks/updater.go index b36e2a706b..0d465670ad 100644 --- a/pkg/controllers/tikvgroup/tasks/updater.go +++ b/pkg/controllers/tikvgroup/tasks/updater.go @@ -21,7 +21,6 @@ import ( "github.com/go-logr/logr" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/action" @@ -29,11 +28,9 @@ import ( "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/updater" "github.com/pingcap/tidb-operator/pkg/updater/policy" - "github.com/pingcap/tidb-operator/pkg/utils/k8s/revision" maputil "github.com/pingcap/tidb-operator/pkg/utils/map" "github.com/pingcap/tidb-operator/pkg/utils/random" "github.com/pingcap/tidb-operator/pkg/utils/task/v3" - "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" ) const ( @@ -44,37 +41,8 @@ const ( func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { return task.NameTaskFunc("Updater", func(ctx context.Context) task.Result { logger := logr.FromContextOrDiscard(ctx) - historyCli := history.NewClient(c) kvg := state.TiKVGroup() - selector := labels.SelectorFromSet(labels.Set{ - // TODO(liubo02): add label of managed by operator ? - v1alpha1.LabelKeyCluster: kvg.Spec.Cluster.Name, - v1alpha1.LabelKeyComponent: v1alpha1.LabelValComponentPD, - v1alpha1.LabelKeyGroup: kvg.Name, - }) - - revisions, err := historyCli.ListControllerRevisions(kvg, selector) - if err != nil { - return task.Fail().With("cannot list controller revisions: %w", err) - } - history.SortControllerRevisions(revisions) - - // Get the current(old) and update(new) ControllerRevisions. - currentRevision, updateRevision, collisionCount, err := revision.GetCurrentAndUpdate(kvg, revisions, historyCli, kvg) - if err != nil { - return task.Fail().With("cannot get revisions: %w", err) - } - state.CurrentRevision = currentRevision.Name - state.UpdateRevision = updateRevision.Name - state.CollisionCount = collisionCount - - // TODO(liubo02): add a controller to do it - if err = revision.TruncateHistory(historyCli, state.TiKVSlice(), revisions, - currentRevision, updateRevision, state.Cluster().Spec.RevisionHistoryLimit); err != nil { - logger.Error(err, "failed to truncate history") - } - checker := action.NewUpgradeChecker(c, state.Cluster(), logger) if needVersionUpgrade(kvg) && !checker.CanUpgrade(ctx, kvg) { @@ -82,11 +50,6 @@ func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { return task.Retry(defaultUpdateWaitTime).With("wait until preconditions of upgrading is met") } - desired := 1 - if kvg.Spec.Replicas != nil { - desired = int(*kvg.Spec.Replicas) - } - var topos []v1alpha1.ScheduleTopology for _, p := range kvg.Spec.SchedulePolicies { switch p.Type { @@ -97,28 +60,24 @@ func TaskUpdater(state *ReconcileContext, c client.Client) task.Task { } } - topoPolicy, err := policy.NewTopologyPolicy[*runtime.TiKV](topos) + kvs := state.Slice() + + topoPolicy, err := policy.NewTopologyPolicy(topos, kvs...) if err != nil { return task.Fail().With("invalid topo policy, it should be validated: %w", err) } - for _, tikv := range state.TiKVSlice() { - topoPolicy.Add(runtime.FromTiKV(tikv)) - } + updateRevision, _, _ := state.Revision() wait, err := updater.New[runtime.TiKVTuple](). - WithInstances(runtime.FromTiKVSlice(state.TiKVSlice())...). - WithDesired(desired). + WithInstances(kvs...). + WithDesired(int(state.Group().Replicas())). WithClient(c). WithMaxSurge(0). WithMaxUnavailable(1). - WithRevision(state.UpdateRevision). - WithNewFactory(TiKVNewer(kvg, state.UpdateRevision)). + WithRevision(updateRevision). + WithNewFactory(TiKVNewer(kvg, updateRevision)). WithAddHooks(topoPolicy). - WithUpdateHooks( - policy.KeepName[*runtime.TiKV](), - policy.KeepTopology[*runtime.TiKV](), - ). WithDelHooks(topoPolicy). WithScaleInPreferPolicy( topoPolicy, diff --git a/pkg/controllers/tikvgroup/tasks/updater_test.go b/pkg/controllers/tikvgroup/tasks/updater_test.go index 6993a705c0..c2b8a23198 100644 --- a/pkg/controllers/tikvgroup/tasks/updater_test.go +++ b/pkg/controllers/tikvgroup/tasks/updater_test.go @@ -33,8 +33,8 @@ import ( ) const ( - oldRevision = "aaa-c9f48df69" - newRevision = "aaa-6cd5c46fb8" + oldRevision = "old" + newRevision = "new" ) func TestTaskUpdater(t *testing.T) { @@ -43,10 +43,8 @@ func TestTaskUpdater(t *testing.T) { state *ReconcileContext unexpectedErr bool - expectedStatus task.Status - expectedUpdateRevision string - expectedCurrentRevision string - expectedTiKVNum int + expectedStatus task.Status + expectedTiKVNum int }{ { desc: "no kvs with 1 replicas", @@ -57,10 +55,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedTiKVNum: 1, + expectedStatus: task.SComplete, + expectedTiKVNum: 1, }, { desc: "version upgrade check", @@ -77,9 +73,7 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SRetry, - expectedUpdateRevision: "aaa-6d4b685647", - expectedCurrentRevision: "aaa-6d4b685647", + expectedStatus: task.SRetry, }, { desc: "1 updated tikv with 1 replicas", @@ -88,15 +82,14 @@ func TestTaskUpdater(t *testing.T) { kvg: fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), cluster: fake.FakeObj[v1alpha1.Cluster]("cluster"), kvs: []*v1alpha1.TiKV{ - fakeAvailableTiKV("aaa-xxx", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), oldRevision), + fakeAvailableTiKV("aaa-xxx", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: oldRevision, - expectedCurrentRevision: oldRevision, - expectedTiKVNum: 1, + expectedStatus: task.SComplete, + expectedTiKVNum: 1, }, { desc: "no kvs with 2 replicas", @@ -110,10 +103,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiKVNum: 2, + expectedStatus: task.SComplete, + expectedTiKVNum: 2, }, { desc: "no kvs with 2 replicas and call api failed", @@ -128,9 +119,7 @@ func TestTaskUpdater(t *testing.T) { }, unexpectedErr: true, - expectedStatus: task.SFail, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, + expectedStatus: task.SFail, }, { desc: "1 outdated tikv with 2 replicas", @@ -144,13 +133,12 @@ func TestTaskUpdater(t *testing.T) { kvs: []*v1alpha1.TiKV{ fakeAvailableTiKV("aaa-xxx", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), oldRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SWait, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiKVNum: 2, + expectedStatus: task.SWait, + expectedTiKVNum: 2, }, { desc: "1 outdated tikv with 2 replicas but cannot call api, will fail", @@ -164,13 +152,12 @@ func TestTaskUpdater(t *testing.T) { kvs: []*v1alpha1.TiKV{ fakeAvailableTiKV("aaa-xxx", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), oldRevision), }, + updateRevision: newRevision, }, }, unexpectedErr: true, - expectedStatus: task.SFail, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, + expectedStatus: task.SFail, }, { desc: "2 updated tikv with 2 replicas", @@ -185,13 +172,12 @@ func TestTaskUpdater(t *testing.T) { fakeAvailableTiKV("aaa-xxx", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), newRevision), fakeAvailableTiKV("aaa-yyy", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiKVNum: 2, + expectedStatus: task.SComplete, + expectedTiKVNum: 2, }, { desc: "2 updated tikv with 2 replicas and cannot call api, can complete", @@ -206,14 +192,13 @@ func TestTaskUpdater(t *testing.T) { fakeAvailableTiKV("aaa-xxx", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), newRevision), fakeAvailableTiKV("aaa-yyy", fake.FakeObj[v1alpha1.TiKVGroup]("aaa"), newRevision), }, + updateRevision: newRevision, }, }, unexpectedErr: true, - expectedStatus: task.SComplete, - expectedUpdateRevision: newRevision, - expectedCurrentRevision: newRevision, - expectedTiKVNum: 2, + expectedStatus: task.SComplete, + expectedTiKVNum: 2, }, { // NOTE: it not really check whether the policy is worked @@ -251,10 +236,8 @@ func TestTaskUpdater(t *testing.T) { }, }, - expectedStatus: task.SComplete, - expectedUpdateRevision: "aaa-5cd4fb5cb4", - expectedCurrentRevision: "aaa-5cd4fb5cb4", - expectedTiKVNum: 3, + expectedStatus: task.SComplete, + expectedTiKVNum: 3, }, } @@ -278,8 +261,6 @@ func TestTaskUpdater(t *testing.T) { assert.Equal(tt, c.expectedStatus.String(), res.Status().String(), c.desc) assert.False(tt, done, c.desc) - assert.Equal(tt, c.expectedUpdateRevision, c.state.UpdateRevision, c.desc) - assert.Equal(tt, c.expectedCurrentRevision, c.state.CurrentRevision, c.desc) if !c.unexpectedErr { kvs := v1alpha1.TiKVList{} require.NoError(tt, fc.List(ctx, &kvs), c.desc) diff --git a/pkg/runtime/group.go b/pkg/runtime/group.go index 7ad86ac8a2..7a1664dacb 100644 --- a/pkg/runtime/group.go +++ b/pkg/runtime/group.go @@ -19,8 +19,16 @@ import "github.com/pingcap/tidb-operator/pkg/client" type Group interface { Object - SetReplicas(replicas *int32) - Replicas() *int32 + SetReplicas(replicas int32) + Replicas() int32 + Version() string + + SetStatusVersion(version string) + StatusVersion() string + SetStatusReplicas(replicas, ready, update, current int32) + StatusReplicas() (replicas, ready, update, current int32) + SetStatusRevision(update, current string, collisionCount *int32) + StatusRevision() (update, current string, collisionCount *int32) } type GroupT[T GroupSet] interface { diff --git a/pkg/runtime/instance.go b/pkg/runtime/instance.go index 5995a6afa1..7b0549ea6d 100644 --- a/pkg/runtime/instance.go +++ b/pkg/runtime/instance.go @@ -31,6 +31,9 @@ type Instance interface { // NOTE: It does not mean the instance is updated to the newest revision // TODO: may be change a more meaningful name? IsUpToDate() bool + + CurrentRevision() string + SetCurrentRevision(rev string) } type InstanceT[T InstanceSet] interface { diff --git a/pkg/runtime/pd.go b/pkg/runtime/pd.go index 99a42cbb55..a008eb21ff 100644 --- a/pkg/runtime/pd.go +++ b/pkg/runtime/pd.go @@ -126,6 +126,14 @@ func (pd *PD) GetUpdateRevision() string { return pd.Labels[v1alpha1.LabelKeyInstanceRevisionHash] } +func (pd *PD) CurrentRevision() string { + return pd.Status.CurrentRevision +} + +func (pd *PD) SetCurrentRevision(rev string) { + pd.Status.CurrentRevision = rev +} + func (pd *PD) IsHealthy() bool { return meta.IsStatusConditionTrue(pd.Status.Conditions, v1alpha1.PDCondHealth) } @@ -168,12 +176,19 @@ func (pdg *PDGroup) To() *v1alpha1.PDGroup { return ToPDGroup(pdg) } -func (pdg *PDGroup) SetReplicas(replicas *int32) { - pdg.Spec.Replicas = replicas +func (pdg *PDGroup) SetReplicas(replicas int32) { + pdg.Spec.Replicas = &replicas +} + +func (pdg *PDGroup) Replicas() int32 { + if pdg.Spec.Replicas == nil { + return 1 + } + return *pdg.Spec.Replicas } -func (pdg *PDGroup) Replicas() *int32 { - return pdg.Spec.Replicas +func (pdg *PDGroup) Version() string { + return pdg.Spec.Version } func (pdg *PDGroup) Cluster() string { @@ -199,3 +214,37 @@ func (pdg *PDGroup) ObservedGeneration() int64 { func (pdg *PDGroup) SetObservedGeneration(g int64) { pdg.Status.ObservedGeneration = g } + +func (pdg *PDGroup) SetStatusVersion(version string) { + pdg.Status.Version = version +} + +func (pdg *PDGroup) StatusVersion() string { + return pdg.Status.Version +} + +func (pdg *PDGroup) SetStatusReplicas(replicas, ready, update, current int32) { + pdg.Status.Replicas = replicas + pdg.Status.ReadyReplicas = ready + pdg.Status.UpdatedReplicas = update + pdg.Status.CurrentReplicas = current +} + +func (pdg *PDGroup) StatusReplicas() (replicas, ready, update, current int32) { + return pdg.Status.Replicas, + pdg.Status.ReadyReplicas, + pdg.Status.UpdatedReplicas, + pdg.Status.CurrentReplicas +} + +func (pdg *PDGroup) SetStatusRevision(update, current string, collisionCount *int32) { + pdg.Status.UpdateRevision = update + pdg.Status.CurrentRevision = current + pdg.Status.CollisionCount = collisionCount +} + +func (pdg *PDGroup) StatusRevision() (update, current string, collisionCount *int32) { + return pdg.Status.UpdateRevision, + pdg.Status.CurrentRevision, + pdg.Status.CollisionCount +} diff --git a/pkg/runtime/tidb.go b/pkg/runtime/tidb.go index b36f458cca..4d5853656d 100644 --- a/pkg/runtime/tidb.go +++ b/pkg/runtime/tidb.go @@ -126,6 +126,14 @@ func (db *TiDB) GetUpdateRevision() string { return db.Labels[v1alpha1.LabelKeyInstanceRevisionHash] } +func (db *TiDB) CurrentRevision() string { + return db.Status.CurrentRevision +} + +func (db *TiDB) SetCurrentRevision(rev string) { + db.Status.CurrentRevision = rev +} + func (db *TiDB) IsHealthy() bool { return meta.IsStatusConditionTrue(db.Status.Conditions, v1alpha1.TiKVCondHealth) } @@ -168,12 +176,19 @@ func (dbg *TiDBGroup) To() *v1alpha1.TiDBGroup { return ToTiDBGroup(dbg) } -func (dbg *TiDBGroup) SetReplicas(replicas *int32) { - dbg.Spec.Replicas = replicas +func (dbg *TiDBGroup) SetReplicas(replicas int32) { + dbg.Spec.Replicas = &replicas +} + +func (dbg *TiDBGroup) Replicas() int32 { + if dbg.Spec.Replicas == nil { + return 1 + } + return *dbg.Spec.Replicas } -func (dbg *TiDBGroup) Replicas() *int32 { - return dbg.Spec.Replicas +func (dbg *TiDBGroup) Version() string { + return dbg.Spec.Version } func (dbg *TiDBGroup) Cluster() string { @@ -199,3 +214,37 @@ func (dbg *TiDBGroup) ObservedGeneration() int64 { func (dbg *TiDBGroup) SetObservedGeneration(g int64) { dbg.Status.ObservedGeneration = g } + +func (dbg *TiDBGroup) SetStatusVersion(version string) { + dbg.Status.Version = version +} + +func (dbg *TiDBGroup) StatusVersion() string { + return dbg.Status.Version +} + +func (dbg *TiDBGroup) SetStatusReplicas(replicas, ready, update, current int32) { + dbg.Status.Replicas = replicas + dbg.Status.ReadyReplicas = ready + dbg.Status.UpdatedReplicas = update + dbg.Status.CurrentReplicas = current +} + +func (dbg *TiDBGroup) StatusReplicas() (replicas, ready, update, current int32) { + return dbg.Status.Replicas, + dbg.Status.ReadyReplicas, + dbg.Status.UpdatedReplicas, + dbg.Status.CurrentReplicas +} + +func (dbg *TiDBGroup) SetStatusRevision(update, current string, collisionCount *int32) { + dbg.Status.UpdateRevision = update + dbg.Status.CurrentRevision = current + dbg.Status.CollisionCount = collisionCount +} + +func (dbg *TiDBGroup) StatusRevision() (update, current string, collisionCount *int32) { + return dbg.Status.UpdateRevision, + dbg.Status.CurrentRevision, + dbg.Status.CollisionCount +} diff --git a/pkg/runtime/tiflash.go b/pkg/runtime/tiflash.go index 14db7b76dc..7ba0968d5b 100644 --- a/pkg/runtime/tiflash.go +++ b/pkg/runtime/tiflash.go @@ -126,6 +126,14 @@ func (f *TiFlash) GetUpdateRevision() string { return f.Labels[v1alpha1.LabelKeyInstanceRevisionHash] } +func (f *TiFlash) CurrentRevision() string { + return f.Status.CurrentRevision +} + +func (f *TiFlash) SetCurrentRevision(rev string) { + f.Status.CurrentRevision = rev +} + func (f *TiFlash) IsHealthy() bool { return meta.IsStatusConditionTrue(f.Status.Conditions, v1alpha1.TiFlashCondHealth) } @@ -168,12 +176,19 @@ func (fg *TiFlashGroup) To() *v1alpha1.TiFlashGroup { return ToTiFlashGroup(fg) } -func (fg *TiFlashGroup) SetReplicas(replicas *int32) { - fg.Spec.Replicas = replicas +func (fg *TiFlashGroup) SetReplicas(replicas int32) { + fg.Spec.Replicas = &replicas +} + +func (fg *TiFlashGroup) Replicas() int32 { + if fg.Spec.Replicas == nil { + return 1 + } + return *fg.Spec.Replicas } -func (fg *TiFlashGroup) Replicas() *int32 { - return fg.Spec.Replicas +func (fg *TiFlashGroup) Version() string { + return fg.Spec.Version } func (fg *TiFlashGroup) Cluster() string { @@ -199,3 +214,37 @@ func (fg *TiFlashGroup) ObservedGeneration() int64 { func (fg *TiFlashGroup) SetObservedGeneration(g int64) { fg.Status.ObservedGeneration = g } + +func (fg *TiFlashGroup) SetStatusVersion(version string) { + fg.Status.Version = version +} + +func (fg *TiFlashGroup) StatusVersion() string { + return fg.Status.Version +} + +func (fg *TiFlashGroup) SetStatusReplicas(replicas, ready, update, current int32) { + fg.Status.Replicas = replicas + fg.Status.ReadyReplicas = ready + fg.Status.UpdatedReplicas = update + fg.Status.CurrentReplicas = current +} + +func (fg *TiFlashGroup) StatusReplicas() (replicas, ready, update, current int32) { + return fg.Status.Replicas, + fg.Status.ReadyReplicas, + fg.Status.UpdatedReplicas, + fg.Status.CurrentReplicas +} + +func (fg *TiFlashGroup) SetStatusRevision(update, current string, collisionCount *int32) { + fg.Status.UpdateRevision = update + fg.Status.CurrentRevision = current + fg.Status.CollisionCount = collisionCount +} + +func (fg *TiFlashGroup) StatusRevision() (update, current string, collisionCount *int32) { + return fg.Status.UpdateRevision, + fg.Status.CurrentRevision, + fg.Status.CollisionCount +} diff --git a/pkg/runtime/tikv.go b/pkg/runtime/tikv.go index c2821f4eb6..71b98a7f8b 100644 --- a/pkg/runtime/tikv.go +++ b/pkg/runtime/tikv.go @@ -126,6 +126,14 @@ func (kv *TiKV) GetUpdateRevision() string { return kv.Labels[v1alpha1.LabelKeyInstanceRevisionHash] } +func (kv *TiKV) CurrentRevision() string { + return kv.Status.CurrentRevision +} + +func (kv *TiKV) SetCurrentRevision(rev string) { + kv.Status.CurrentRevision = rev +} + func (kv *TiKV) IsHealthy() bool { return meta.IsStatusConditionTrue(kv.Status.Conditions, v1alpha1.TiKVCondHealth) } @@ -168,12 +176,19 @@ func (kvg *TiKVGroup) To() *v1alpha1.TiKVGroup { return ToTiKVGroup(kvg) } -func (kvg *TiKVGroup) SetReplicas(replicas *int32) { - kvg.Spec.Replicas = replicas +func (kvg *TiKVGroup) SetReplicas(replicas int32) { + kvg.Spec.Replicas = &replicas +} + +func (kvg *TiKVGroup) Replicas() int32 { + if kvg.Spec.Replicas == nil { + return 1 + } + return *kvg.Spec.Replicas } -func (kvg *TiKVGroup) Replicas() *int32 { - return kvg.Spec.Replicas +func (kvg *TiKVGroup) Version() string { + return kvg.Spec.Version } func (kvg *TiKVGroup) Cluster() string { @@ -199,3 +214,37 @@ func (kvg *TiKVGroup) ObservedGeneration() int64 { func (kvg *TiKVGroup) SetObservedGeneration(g int64) { kvg.Status.ObservedGeneration = g } + +func (kvg *TiKVGroup) SetStatusVersion(version string) { + kvg.Status.Version = version +} + +func (kvg *TiKVGroup) StatusVersion() string { + return kvg.Status.Version +} + +func (kvg *TiKVGroup) SetStatusReplicas(replicas, ready, update, current int32) { + kvg.Status.Replicas = replicas + kvg.Status.ReadyReplicas = ready + kvg.Status.UpdatedReplicas = update + kvg.Status.CurrentReplicas = current +} + +func (kvg *TiKVGroup) StatusReplicas() (replicas, ready, update, current int32) { + return kvg.Status.Replicas, + kvg.Status.ReadyReplicas, + kvg.Status.UpdatedReplicas, + kvg.Status.CurrentReplicas +} + +func (kvg *TiKVGroup) SetStatusRevision(update, current string, collisionCount *int32) { + kvg.Status.UpdateRevision = update + kvg.Status.CurrentRevision = current + kvg.Status.CollisionCount = collisionCount +} + +func (kvg *TiKVGroup) StatusRevision() (update, current string, collisionCount *int32) { + return kvg.Status.UpdateRevision, + kvg.Status.CurrentRevision, + kvg.Status.CollisionCount +} diff --git a/pkg/updater/actor.go b/pkg/updater/actor.go index ec60a141f0..c04a128dac 100644 --- a/pkg/updater/actor.go +++ b/pkg/updater/actor.go @@ -32,38 +32,6 @@ func (f NewFunc[R]) New() R { return f() } -// e.g. for some write once fields(name, topology, etc.) -type UpdateHook[R runtime.Instance] interface { - Update(update, outdated R) R -} - -// e.g. for topology scheduling -type AddHook[R runtime.Instance] interface { - Add(update R) R -} - -type DelHook[R runtime.Instance] interface { - Delete(name string) -} - -type UpdateHookFunc[R runtime.Instance] func(update, outdated R) R - -func (f UpdateHookFunc[R]) Update(update, outdated R) R { - return f(update, outdated) -} - -type AddHookFunc[PT runtime.Instance] func(update PT) PT - -func (f AddHookFunc[PT]) Add(update PT) PT { - return f(update) -} - -type DelHookFunc[PT runtime.Instance] func(name string) - -func (f DelHookFunc[PT]) Delete(name string) { - f(name) -} - type actor[T runtime.Tuple[O, R], O client.Object, R runtime.Instance] struct { c client.Client diff --git a/pkg/updater/builder.go b/pkg/updater/builder.go index 13c508300e..f5aa74c0d7 100644 --- a/pkg/updater/builder.go +++ b/pkg/updater/builder.go @@ -67,7 +67,7 @@ func (b *builder[T, O, R]) Build() Executor { outdated: NewState(outdated), addHooks: b.addHooks, - updateHooks: b.updateHooks, + updateHooks: append(b.updateHooks, KeepName[R](), KeepTopology[R]()), delHooks: b.delHooks, scaleInSelector: NewSelector(b.scaleInPreferPolicies...), diff --git a/pkg/updater/hook.go b/pkg/updater/hook.go new file mode 100644 index 0000000000..d51def8070 --- /dev/null +++ b/pkg/updater/hook.go @@ -0,0 +1,51 @@ +package updater + +import "github.com/pingcap/tidb-operator/pkg/runtime" + +// e.g. for some write once fields(name, topology, etc.) +type UpdateHook[R runtime.Instance] interface { + Update(update, outdated R) R +} + +// e.g. for topology scheduling +type AddHook[R runtime.Instance] interface { + Add(update R) R +} + +type DelHook[R runtime.Instance] interface { + Delete(name string) +} + +type UpdateHookFunc[R runtime.Instance] func(update, outdated R) R + +func (f UpdateHookFunc[R]) Update(update, outdated R) R { + return f(update, outdated) +} + +type AddHookFunc[PT runtime.Instance] func(update PT) PT + +func (f AddHookFunc[PT]) Add(update PT) PT { + return f(update) +} + +type DelHookFunc[PT runtime.Instance] func(name string) + +func (f DelHookFunc[PT]) Delete(name string) { + f(name) +} + +// Do not change name when update obj +func KeepName[R runtime.Instance]() UpdateHook[R] { + return UpdateHookFunc[R](func(update, outdated R) R { + update.SetName(outdated.GetName()) + return update + }) +} + +// Do not change topology when update obj +func KeepTopology[R runtime.Instance]() UpdateHook[R] { + return UpdateHookFunc[R](func(update, outdated R) R { + update.SetTopology(outdated.GetTopology()) + return update + }) +} diff --git a/pkg/updater/policy/keep.go b/pkg/updater/policy/keep.go deleted file mode 100644 index 32a1d6ec95..0000000000 --- a/pkg/updater/policy/keep.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2024 PingCAP, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package policy - -import ( - "github.com/pingcap/tidb-operator/pkg/runtime" - "github.com/pingcap/tidb-operator/pkg/updater" -) - -func KeepName[R runtime.Instance]() updater.UpdateHook[R] { - return updater.UpdateHookFunc[R](func(update, outdated R) R { - update.SetName(outdated.GetName()) - return update - }) -} - -func KeepTopology[R runtime.Instance]() updater.UpdateHook[R] { - return updater.UpdateHookFunc[R](func(update, outdated R) R { - update.SetTopology(outdated.GetTopology()) - return update - }) -} diff --git a/pkg/updater/policy/topology.go b/pkg/updater/policy/topology.go index ac862d189d..bf0d2522b6 100644 --- a/pkg/updater/policy/topology.go +++ b/pkg/updater/policy/topology.go @@ -31,14 +31,18 @@ type TopologyPolicy[R runtime.Instance] interface { updater.PreferPolicy[R] } -func NewTopologyPolicy[R runtime.Instance](ts []v1alpha1.ScheduleTopology) (TopologyPolicy[R], error) { +func NewTopologyPolicy[R runtime.Instance](ts []v1alpha1.ScheduleTopology, rs ...R) (TopologyPolicy[R], error) { s, err := topology.New(ts) if err != nil { return nil, err } - return &topologyPolicy[R]{ + p := &topologyPolicy[R]{ scheduler: s, - }, nil + } + for _, r := range rs { + p.Add(r) + } + return p, nil } func (p *topologyPolicy[R]) Add(update R) R { diff --git a/pkg/utils/k8s/revision/controller_revision.go b/pkg/utils/k8s/revision/controller_revision.go index faf92174cc..e3805f6ede 100644 --- a/pkg/utils/k8s/revision/controller_revision.go +++ b/pkg/utils/k8s/revision/controller_revision.go @@ -16,17 +16,19 @@ package revision import ( "bytes" + "context" "encoding/json" "fmt" appsv1 "k8s.io/api/apps/v1" - "k8s.io/apimachinery/pkg/runtime" + kuberuntime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" serializerjson "k8s.io/apimachinery/pkg/runtime/serializer/json" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" - "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/scheme" "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/statefulset" @@ -36,7 +38,7 @@ const ( defaultRevisionHistoryLimit = 10 ) -var encoderMap = map[schema.GroupVersion]runtime.Encoder{} +var encoderMap = map[schema.GroupVersion]kuberuntime.Encoder{} // GetCurrentAndUpdate returns the current and update ControllerRevisions. It also // returns a collision count that records the number of name collisions set saw when creating @@ -44,19 +46,30 @@ var encoderMap = map[schema.GroupVersion]runtime.Encoder{} // building the ControllerRevision names for name collision avoidance. This method may create // a new revision, or modify the Revision of an existing revision if an update to ComponentGroup is detected. // This method expects that revisions is sorted when supplied. -func GetCurrentAndUpdate(group client.Object, revisions []*appsv1.ControllerRevision, - cli history.Interface, accessor v1alpha1.ComponentAccessor) ( +func GetCurrentAndUpdate( + _ context.Context, + group client.Object, + labels map[string]string, + revisions []*appsv1.ControllerRevision, + cli history.Interface, + currentRevisionName string, + currentCollisionCount *int32, +) ( currentRevision *appsv1.ControllerRevision, updateRevision *appsv1.ControllerRevision, collisionCount int32, err error, ) { - if accessor.CollisionCount() != nil { - collisionCount = *accessor.CollisionCount() + if currentCollisionCount != nil { + collisionCount = *currentCollisionCount } + gvk, err := apiutil.GVKForObject(group, scheme.Scheme) + if err != nil { + return nil, nil, collisionCount, fmt.Errorf("cannot get gvk of group: %w", err) + } // create a new revision from the current object - updateRevision, err = newRevision(group, accessor.GVK(), statefulset.NextRevision(revisions), &collisionCount) + updateRevision, err = newRevision(group, labels, gvk, statefulset.NextRevision(revisions), &collisionCount) if err != nil { return nil, nil, collisionCount, fmt.Errorf("failed to new a revision: %w", err) } @@ -86,7 +99,7 @@ func GetCurrentAndUpdate(group client.Object, revisions []*appsv1.ControllerRevi // attempt to find the revision that corresponds to the current revision for i := range revisions { - if revisions[i].Name == accessor.CurrentRevision() { + if revisions[i].Name == currentRevisionName { currentRevision = revisions[i] break } @@ -101,18 +114,17 @@ func GetCurrentAndUpdate(group client.Object, revisions []*appsv1.ControllerRevi } // newRevision creates a new ControllerRevision containing a patch that reapplies the target state of CR. -func newRevision(obj client.Object, gvk schema.GroupVersionKind, - revision int64, collisionCount *int32) (*appsv1.ControllerRevision, error) { +func newRevision(obj client.Object, labels map[string]string, gvk schema.GroupVersionKind, + revision int64, collisionCount *int32, +) (*appsv1.ControllerRevision, error) { patch, err := getPatch(obj, gvk) if err != nil { return nil, fmt.Errorf("failed to get patch: %w", err) } - cr, err := history.NewControllerRevision(obj, obj.GetLabels(), runtime.RawExtension{Raw: patch}, revision, collisionCount) + cr, err := history.NewControllerRevision(obj, labels, kuberuntime.RawExtension{Raw: patch}, revision, collisionCount) if err != nil { return nil, fmt.Errorf("failed to create a revision: %w", err) } - // Add this label so that tidb operator will watch this controller revision. - cr.Labels[v1alpha1.LabelKeyManagedBy] = v1alpha1.LabelValManagedByOperator if err = controllerutil.SetControllerReference(obj, cr, scheme.Scheme); err != nil { return nil, fmt.Errorf("failed to set controller reference: %w", err) } @@ -160,27 +172,28 @@ func getPatch(obj client.Object, gvk schema.GroupVersionKind) ([]byte, error) { // Non-live revisions are deleted, starting with the revision with the lowest Revision, // until only RevisionHistoryLimit revisions remain. // This method expects that revisions is sorted when supplied. -func TruncateHistory[T v1alpha1.Instance]( +func TruncateHistory[T runtime.Instance]( cli history.Interface, instances []T, revisions []*appsv1.ControllerRevision, - current *appsv1.ControllerRevision, - update *appsv1.ControllerRevision, - limit *int32) error { + updateRevision string, + currentRevision string, + limit *int32, +) error { // mark all live revisions live := make(map[string]bool, len(revisions)) - if current != nil { - live[current.Name] = true + if updateRevision != "" { + live[updateRevision] = true } - if update != nil { - live[update.Name] = true + if currentRevision != "" { + live[currentRevision] = true } for _, ins := range instances { if ins.CurrentRevision() != "" { live[ins.CurrentRevision()] = true } - if ins.UpdateRevision() != "" { - live[ins.UpdateRevision()] = true + if ins.GetUpdateRevision() != "" { + live[ins.GetUpdateRevision()] = true } } diff --git a/pkg/utils/k8s/revision/controller_revision_test.go b/pkg/utils/k8s/revision/controller_revision_test.go index 17632ddcb2..648d2ed8a6 100644 --- a/pkg/utils/k8s/revision/controller_revision_test.go +++ b/pkg/utils/k8s/revision/controller_revision_test.go @@ -15,6 +15,7 @@ package revision import ( + "context" "testing" "github.com/stretchr/testify/assert" @@ -26,6 +27,7 @@ import ( "github.com/pingcap/tidb-operator/apis/core/v1alpha1" "github.com/pingcap/tidb-operator/pkg/client" + "github.com/pingcap/tidb-operator/pkg/runtime" "github.com/pingcap/tidb-operator/pkg/utils/fake" "github.com/pingcap/tidb-operator/third_party/kubernetes/pkg/controller/history" ) @@ -174,7 +176,7 @@ func TestTruncateHistory(t *testing.T) { cli := &FakeHistoryClient{ Revisions: tt.revisions, } - err := TruncateHistory(cli, tt.instances, tt.revisions, tt.current, tt.update, tt.limit) + err := TruncateHistory(cli, runtime.FromTiDBSlice(tt.instances), tt.revisions, tt.current.Name, tt.update.Name, tt.limit) require.NoError(t, err) remainingRevisions, err := cli.ListControllerRevisions(nil, labels.Everything()) @@ -205,12 +207,12 @@ func TestGetCurrentAndUpdate(t *testing.T) { Replicas: ptr.To[int32](1), }, } - rev1, err := newRevision(pdg, pdg.GVK(), 1, ptr.To[int32](0)) + rev1, err := newRevision(pdg, nil, pdg.GVK(), 1, ptr.To[int32](0)) require.NoError(t, err) pdg2 := pdg.DeepCopy() pdg2.Spec.Replicas = ptr.To[int32](2) - rev2, err := newRevision(pdg2, pdg2.GVK(), 2, ptr.To[int32](0)) + rev2, err := newRevision(pdg2, nil, pdg2.GVK(), 2, ptr.To[int32](0)) require.NoError(t, err) tests := []struct { @@ -259,7 +261,7 @@ func TestGetCurrentAndUpdate(t *testing.T) { return revision, nil }, } - curRev, updRev, _, err := GetCurrentAndUpdate(tt.group, tt.revisions, cli, tt.accessor) + curRev, updRev, _, err := GetCurrentAndUpdate(context.TODO(), tt.group, nil, tt.revisions, cli, tt.accessor.CurrentRevision(), tt.accessor.CollisionCount()) if tt.expectedErr { require.Error(t, err) } else { diff --git a/tests/e2e/data/data.go b/tests/e2e/data/data.go index 464860a4ea..54f049415a 100644 --- a/tests/e2e/data/data.go +++ b/tests/e2e/data/data.go @@ -16,13 +16,12 @@ package data import ( "github.com/pingcap/tidb-operator/apis/core/v1alpha1" - "github.com/pingcap/tidb-operator/pkg/client" "github.com/pingcap/tidb-operator/pkg/runtime" ) type ( - ClusterPatch func(obj *v1alpha1.Cluster) - GroupPatch[T client.Object, G runtime.Group] func(obj G) + ClusterPatch func(obj *v1alpha1.Cluster) + GroupPatch[G runtime.Group] func(obj G) ) const ( @@ -35,7 +34,7 @@ const ( defaultVersion = "v8.1.0" ) -func WithReplicas[T client.Object, G runtime.Group](replicas *int32) GroupPatch[T, G] { +func WithReplicas[G runtime.Group](replicas int32) GroupPatch[G] { return func(obj G) { obj.SetReplicas(replicas) } diff --git a/tests/e2e/data/pd.go b/tests/e2e/data/pd.go index bf5624facf..af335e250a 100644 --- a/tests/e2e/data/pd.go +++ b/tests/e2e/data/pd.go @@ -22,7 +22,7 @@ import ( "github.com/pingcap/tidb-operator/pkg/runtime" ) -func NewPDGroup(ns string, patches ...GroupPatch[*v1alpha1.PDGroup, *runtime.PDGroup]) *v1alpha1.PDGroup { +func NewPDGroup(ns string, patches ...GroupPatch[*runtime.PDGroup]) *v1alpha1.PDGroup { pdg := &runtime.PDGroup{ ObjectMeta: metav1.ObjectMeta{ Namespace: ns, diff --git a/tests/e2e/pd/pd.go b/tests/e2e/pd/pd.go index 9a145f420c..244e7026dc 100644 --- a/tests/e2e/pd/pd.go +++ b/tests/e2e/pd/pd.go @@ -38,7 +38,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() { ginkgo.It("support create PD with 1 replica", func(ctx context.Context) { pdg := data.NewPDGroup( f.Namespace.Name, - data.WithReplicas[*v1alpha1.PDGroup, *runtime.PDGroup](ptr.To[int32](1)), + data.WithReplicas[*runtime.PDGroup](1), ) ginkgo.By("Create PDGroup") @@ -49,7 +49,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() { ginkgo.It("support create PD with 3 replica", func(ctx context.Context) { pdg := data.NewPDGroup( f.Namespace.Name, - data.WithReplicas[*v1alpha1.PDGroup, *runtime.PDGroup](ptr.To[int32](3)), + data.WithReplicas[*runtime.PDGroup](3), ) ginkgo.By("Create PDGroup") @@ -62,7 +62,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() { ginkgo.It("support scale PD form 3 to 5", func(ctx context.Context) { pdg := data.NewPDGroup( f.Namespace.Name, - data.WithReplicas[*v1alpha1.PDGroup, *runtime.PDGroup](ptr.To[int32](3)), + data.WithReplicas[*runtime.PDGroup](3), ) ginkgo.By("Create PDGroup") @@ -81,7 +81,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() { pdg := data.NewPDGroup( f.Namespace.Name, //nolint:mnd // easy for test - data.WithReplicas[*v1alpha1.PDGroup, *runtime.PDGroup](ptr.To[int32](5)), + data.WithReplicas[*runtime.PDGroup](5), ) ginkgo.By("Create PDGroup") @@ -101,7 +101,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() { ginkgo.It("support rolling update PD by change config file", func(ctx context.Context) { pdg := data.NewPDGroup( f.Namespace.Name, - data.WithReplicas[*v1alpha1.PDGroup, *runtime.PDGroup](ptr.To[int32](3)), + data.WithReplicas[*runtime.PDGroup](3), ) ginkgo.By("Create PDGroup") @@ -133,7 +133,7 @@ var _ = ginkgo.Describe("PD", label.PD, func() { ginkgo.It("support suspend and resume PD", func(ctx context.Context) { pdg := data.NewPDGroup( f.Namespace.Name, - data.WithReplicas[*v1alpha1.PDGroup, *runtime.PDGroup](ptr.To[int32](3)), + data.WithReplicas[*runtime.PDGroup](3), ) ginkgo.By("Create PDGroup") diff --git a/tests/e2e/utils/waiter/pod.go b/tests/e2e/utils/waiter/pod.go index ffa5a09a4f..7a6a85d7dd 100644 --- a/tests/e2e/utils/waiter/pod.go +++ b/tests/e2e/utils/waiter/pod.go @@ -94,10 +94,10 @@ func WaitPodsRollingUpdateOnce[G runtime.Group]( } } - if len(infos) != 2*int(*g.Replicas()) { - return fmt.Errorf("expect %v pods info, now only %v, detail:\n%v", 2**g.Replicas(), len(infos), detail.String()) + if len(infos) != 2*int(g.Replicas()) { + return fmt.Errorf("expect %v pods info, now only %v, detail:\n%v", 2*g.Replicas(), len(infos), detail.String()) } - for i := range *g.Replicas() { + for i := range g.Replicas() { if infos[2*i].name != infos[2*i+1].name { return fmt.Errorf("pod may be restarted at same time, detail:\n%v", detail.String()) } @@ -158,8 +158,8 @@ func sortPodInfos(infos []podInfo) { func WaitForPodsReady[G runtime.Group](ctx context.Context, c client.Client, g G, timeout time.Duration) error { list := corev1.PodList{} return WaitForList(ctx, c, &list, func() error { - if len(list.Items) != int(*g.Replicas()) { - return fmt.Errorf("%s/%s pod replicas %d not equal to %d", g.GetNamespace(), g.GetName(), len(list.Items), *g.Replicas()) + if len(list.Items) != int(g.Replicas()) { + return fmt.Errorf("%s/%s pod replicas %d not equal to %d", g.GetNamespace(), g.GetName(), len(list.Items), g.Replicas()) } for i := range list.Items { pod := &list.Items[i] @@ -201,8 +201,8 @@ func WaitForPodsRecreated[G runtime.Group]( ) error { list := corev1.PodList{} return WaitForList(ctx, c, &list, func() error { - if len(list.Items) != int(*g.Replicas()) { - return fmt.Errorf("%s/%s replicas %d not equal to %d", g.GetNamespace(), g.GetName(), len(list.Items), *g.Replicas()) + if len(list.Items) != int(g.Replicas()) { + return fmt.Errorf("%s/%s replicas %d not equal to %d", g.GetNamespace(), g.GetName(), len(list.Items), g.Replicas()) } for i := range list.Items { pod := &list.Items[i] diff --git a/third_party/kubernetes/pkg/controller/history/controller_history.go b/third_party/kubernetes/pkg/controller/history/controller_history.go index de74a95870..17dccac628 100644 --- a/third_party/kubernetes/pkg/controller/history/controller_history.go +++ b/third_party/kubernetes/pkg/controller/history/controller_history.go @@ -62,7 +62,8 @@ func NewControllerRevision(parent metav1.Object, templateLabels map[string]string, data runtime.RawExtension, revision int64, - collisionCount *int32) (*appsv1.ControllerRevision, error) { + collisionCount *int32, +) (*appsv1.ControllerRevision, error) { labelMap := make(map[string]string) for k, v := range templateLabels { labelMap[k] = v @@ -163,6 +164,7 @@ func (br byRevision) Swap(i, j int) { br[i], br[j] = br[j], br[i] } +// TODO(liubo02): add ctx into interface // Interface provides an interface allowing for management of a Controller's history as realized by recorded // ControllerRevisions. An instance of Interface can be retrieved from NewHistory. Implementations must treat all // pointer parameters as "in" parameter, and they must not be mutated. From 59d48ff347bb75fd9d5f231d3939a1ed2e1e8ee7 Mon Sep 17 00:00:00 2001 From: liubo02 Date: Fri, 3 Jan 2025 17:45:20 +0800 Subject: [PATCH 2/4] fix license Signed-off-by: liubo02 --- pkg/controllers/common/revision.go | 14 ++++++++++++++ pkg/controllers/common/revision_test.go | 14 ++++++++++++++ pkg/controllers/common/task_finalizer.go | 14 ++++++++++++++ pkg/controllers/common/task_finalizer_test.go | 14 ++++++++++++++ pkg/controllers/common/task_status.go | 14 ++++++++++++++ pkg/controllers/common/task_status_test.go | 14 ++++++++++++++ pkg/updater/hook.go | 14 ++++++++++++++ 7 files changed, 98 insertions(+) diff --git a/pkg/controllers/common/revision.go b/pkg/controllers/common/revision.go index 5cd8aa9302..6a6830b811 100644 --- a/pkg/controllers/common/revision.go +++ b/pkg/controllers/common/revision.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package common import ( diff --git a/pkg/controllers/common/revision_test.go b/pkg/controllers/common/revision_test.go index b8889ab8ba..7d11e1b182 100644 --- a/pkg/controllers/common/revision_test.go +++ b/pkg/controllers/common/revision_test.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package common import ( diff --git a/pkg/controllers/common/task_finalizer.go b/pkg/controllers/common/task_finalizer.go index 9392dd9e55..2ea8bb5441 100644 --- a/pkg/controllers/common/task_finalizer.go +++ b/pkg/controllers/common/task_finalizer.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package common import ( diff --git a/pkg/controllers/common/task_finalizer_test.go b/pkg/controllers/common/task_finalizer_test.go index 0be872361e..d4ead8f355 100644 --- a/pkg/controllers/common/task_finalizer_test.go +++ b/pkg/controllers/common/task_finalizer_test.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package common import ( diff --git a/pkg/controllers/common/task_status.go b/pkg/controllers/common/task_status.go index ebe6af4f00..473434352d 100644 --- a/pkg/controllers/common/task_status.go +++ b/pkg/controllers/common/task_status.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package common import ( diff --git a/pkg/controllers/common/task_status_test.go b/pkg/controllers/common/task_status_test.go index 455082b805..25f925deb2 100644 --- a/pkg/controllers/common/task_status_test.go +++ b/pkg/controllers/common/task_status_test.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package common import ( diff --git a/pkg/updater/hook.go b/pkg/updater/hook.go index d51def8070..44a588c7cf 100644 --- a/pkg/updater/hook.go +++ b/pkg/updater/hook.go @@ -1,3 +1,17 @@ +// Copyright 2024 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package updater import "github.com/pingcap/tidb-operator/pkg/runtime" From bf145c9441f9ae7381f30e31742506dad8a9859c Mon Sep 17 00:00:00 2001 From: liubo02 Date: Mon, 6 Jan 2025 18:57:18 +0800 Subject: [PATCH 3/4] fix e2e Signed-off-by: liubo02 --- pkg/controllers/tidbgroup/builder.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controllers/tidbgroup/builder.go b/pkg/controllers/tidbgroup/builder.go index cdbf7bab02..78332dd69e 100644 --- a/pkg/controllers/tidbgroup/builder.go +++ b/pkg/controllers/tidbgroup/builder.go @@ -49,6 +49,7 @@ func (r *Reconciler) NewRunner(state *tasks.ReconcileContext, reporter task.Task common.TaskRevision(state, r.Client), tasks.TaskService(state, r.Client), tasks.TaskUpdater(state, r.Client), + tasks.TaskStatusAvailable(state, r.Client), common.TaskGroupStatus[runtime.TiDBGroupTuple](state, r.Client), ) From 4e59401286f9091abf577f98637db27b7481a94c Mon Sep 17 00:00:00 2001 From: liubo02 Date: Tue, 7 Jan 2025 09:12:03 +0800 Subject: [PATCH 4/4] fix e2e Signed-off-by: liubo02 --- pkg/controllers/tiflash/tasks/status.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/controllers/tiflash/tasks/status.go b/pkg/controllers/tiflash/tasks/status.go index 4af2829316..38e44768f2 100644 --- a/pkg/controllers/tiflash/tasks/status.go +++ b/pkg/controllers/tiflash/tasks/status.go @@ -134,6 +134,7 @@ func (t *TaskStatus) Sync(ctx task.Context[ReconcileContext]) task.Result { } if rtx.Pod == nil || rtx.PodIsTerminating { + rtx.Healthy = false } else if statefulset.IsPodRunningAndReady(rtx.Pod) && rtx.StoreState == v1alpha1.StoreStateServing { rtx.Healthy = true if rtx.TiFlash.Status.CurrentRevision != rtx.Pod.Labels[v1alpha1.LabelKeyInstanceRevisionHash] {