Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Old lock upgrade support #4

Open
wants to merge 2 commits into
base: spectro-0.4.0
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ TARGET := kube-vip
.DEFAULT_GOAL: $(TARGET)

# These will be provided to the target
VERSION := v0.4.0
VERSION := v0.4.0-20220722
BUILD := `git rev-parse HEAD`

# Operating System Default (LINUX)
Expand All @@ -14,7 +14,7 @@ TARGETOS=linux
# Use linker flags to provide version/build settings to the target
LDFLAGS=-ldflags "-s -w -X=main.Version=$(VERSION) -X=main.Build=$(BUILD) -extldflags -static"
DOCKERTAG ?= $(VERSION)
REPOSITORY = plndr
REPOSITORY = gcr.io/spectro-dev-public/release/kube-vip

.PHONY: all build clean install uninstall fmt simplify check run e2e-tests

Expand Down
122 changes: 93 additions & 29 deletions pkg/cluster/clusterLeaderElection.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,10 @@ import (
watchtools "k8s.io/client-go/tools/watch"
)

const plunderLock = "plndr-cp-lock"
const (
plunderLock = "plndr-cp-lock"
plunderOldLock = "plunder-lock"
)

// Manager degines the manager of the load-balancing services
type Manager struct {
Expand Down Expand Up @@ -175,6 +178,93 @@ func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer *
}
}

loseElectionCallBack := func() {
// we can do cleanup here
log.Info("This node is becoming a follower within the cluster")

// Stop the dns context
cancelDNS()
// Stop the Arp context if it is running
cancelArp()

// Stop the BGP server
if bgpServer != nil {
err = bgpServer.Close()
if err != nil {
log.Warnf("%v", err)
}
}

err = cluster.Network.DeleteIP()
if err != nil {
log.Warnf("%v", err)
}

log.Fatal("lost leadership, restarting kube-vip")
}

newLeaderCallBack := func(identity string) {
// we're notified when new leader elected
log.Infof("Node [%s] is assuming leadership of the cluster", identity)
}

pods, err := sm.KubernetesClient.CoreV1().Pods("kube-system").List(ctx, metav1.ListOptions{})
if err != nil {
return err
}

oldKubevipPresent := false
for _, pod := range pods.Items {
if pod.Spec.Containers[0].Image == "gcr.io/spectro-images-public/release/kube-vip/kube-vip:0.2.2" {
oldKubevipPresent = true
log.Info("kube-vip 0.2.2 found")
log.Infof("Beginning cluster membership, namespace [%s], lock name [%s], id [%s]", c.Namespace, plunderOldLock, id)
break
}
}

goodToGo := make(chan string, 1)

if oldKubevipPresent {
oldPlunderLock := &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: plunderOldLock,
Namespace: c.Namespace,
},
Client: sm.KubernetesClient.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
}

go func() {
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: oldPlunderLock,
ReleaseOnCancel: true,
LeaseDuration: time.Duration(c.LeaseDuration) * time.Second,
RenewDeadline: time.Duration(c.RenewDeadline) * time.Second,
RetryPeriod: time.Duration(c.RetryPeriod) * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
goodToGo <- "yes"
},
OnStoppedLeading: func() {
loseElectionCallBack()
},
OnNewLeader: newLeaderCallBack,
},
})
}()
} else {
err := sm.KubernetesClient.CoordinationV1().Leases(c.Namespace).Delete(ctx, plunderOldLock, metav1.DeleteOptions{})
if err != nil {
log.Fatalf("could not delete 0.2.2 lease %s", plunderOldLock)
}
goodToGo <- "yes"
}

<-goodToGo

// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: lock,
Expand All @@ -197,34 +287,8 @@ func (cluster *Cluster) StartCluster(c *kubevip.Config, sm *Manager, bgpServer *
}

},
OnStoppedLeading: func() {
// we can do cleanup here
log.Info("This node is becoming a follower within the cluster")

// Stop the dns context
cancelDNS()
// Stop the Arp context if it is running
cancelArp()

// Stop the BGP server
if bgpServer != nil {
err = bgpServer.Close()
if err != nil {
log.Warnf("%v", err)
}
}

err = cluster.Network.DeleteIP()
if err != nil {
log.Warnf("%v", err)
}

log.Fatal("lost leadership, restarting kube-vip")
},
OnNewLeader: func(identity string) {
// we're notified when new leader elected
log.Infof("Node [%s] is assuming leadership of the cluster", identity)
},
OnStoppedLeading: loseElectionCallBack,
OnNewLeader: newLeaderCallBack,
},
})

Expand Down
7 changes: 7 additions & 0 deletions pkg/vip/dns.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,13 @@ func (d *ipUpdater) Run(ctx context.Context) {
ip, err := lookupHost(d.vip.DNSName())
if err != nil {
log.Warnf("cannot lookup %s: %v", d.vip.DNSName(), err)
if d.vip.IsDDNS() {
// if ddns and can't resolve address
// panic and restart the pod
// as renew and rebind are not working
// after lease expires
panic(err.Error())
}
// fallback to renewing the existing IP
ip = d.vip.IP()
}
Expand Down