Skip to content

Commit

Permalink
fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
nestorsokil committed Jun 7, 2020
1 parent 253a45f commit 520a558
Show file tree
Hide file tree
Showing 7 changed files with 126 additions and 58 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ run: generate fmt vet manifests

# Install CRDs into a cluster
install: manifests
kustomize build config/crd | kubectl apply -f -
kustomize build config/crd | kubectl --validate=false apply -f -

# Uninstall CRDs from a cluster
uninstall: manifests
Expand Down
4 changes: 2 additions & 2 deletions api/v1alpha1/bluegreendeployment_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,8 +43,8 @@ const (
)

const (
ColorBlue = "Blue"
ColorGreen = "Green"
ColorBlue = "blue"
ColorGreen = "green"
)

// BlueGreenDeploymentStatus defines the observed state of BlueGreenDeployment
Expand Down
13 changes: 12 additions & 1 deletion api/v1alpha1/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

21 changes: 18 additions & 3 deletions config/samples/cluster_v1alpha1_bluegreendeployment.yaml
Original file line number Diff line number Diff line change
@@ -1,7 +1,22 @@
apiVersion: cluster.kbg/v1alpha1
kind: BlueGreenDeployment
metadata:
name: bluegreendeployment-sample
name: myserver
spec:
# Add fields here
foo: bar
replicas: 2
backupScaleDownPercent: 50
service:
type: NodePort
ports:
- name: http
port: 80
targetPort: 80
protocol: TCP
template:
spec:
containers:
- name: nginx
image: nginx:1.19.0
imagePullPolicy: Always
ports:
- containerPort: 80
5 changes: 5 additions & 0 deletions controllers/bluegreendeployment_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"
"github.com/go-logr/logr"
"github.com/pkg/errors"
kuberrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
ctrl "sigs.k8s.io/controller-runtime"
Expand Down Expand Up @@ -48,6 +49,10 @@ func (r *BlueGreenDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result

deploy, err := r.obtainDeployment(ctx, req.NamespacedName)
if err != nil {
if kuberrors.IsNotFound(err) {
log.Info("BlueGreenDeployment was deleted") // probably...
return ctrl.Result{}, nil
}
log.Error(err, "unable to obtain BlueGreenDeployment")
return ctrl.Result{}, err
}
Expand Down
131 changes: 84 additions & 47 deletions controllers/deployrunner.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@ import (
"github.com/pkg/errors"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
kuberrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand Down Expand Up @@ -36,22 +35,32 @@ func (r *DeployRunner) Run(ctx context.Context) error {
return errors.Wrap(err, "unable to obtain ReplicaSets")
}
switch {
case equalIgnoreHash(&activeRs.Spec.Template, &r.deploy.Spec.Template):
case podEquals(&activeRs.Spec.Template, &r.deploy.Spec.Template):
r.Logger.Info("No changes were detected, skipping")
return nil
case equalIgnoreHash(&inactiveRs.Spec.Template, &r.deploy.Spec.Template):
r.Logger.Info("New config matches inactive ReplicaSet, swapping")
if inactiveRs.Spec.Replicas != r.deploy.Spec.Replicas {
inactiveRs.Spec.Replicas = r.deploy.Spec.Replicas
if err := r.Client.Update(ctx, inactiveRs); err != nil {
return errors.Wrap(err, "unable to scale")
}
}
svc.Spec.Selector[LabelColor] = r.deploy.Status.ActiveColor
if err = r.Client.Update(ctx, svc); err != nil {
return errors.Wrap(err, "unable to swap")
}
return nil
// todo fix this case, gets triggered after regular BG and swaps back
//case podEquals(&inactiveRs.Spec.Template, &r.deploy.Spec.Template):
// // todo swap & scale can be a method
// r.Logger.Info("New config matches inactive ReplicaSet, swapping")
// if inactiveRs.Spec.Replicas != r.deploy.Spec.Replicas {
// inactiveRs.Spec.Replicas = r.deploy.Spec.Replicas
// if err := r.Client.Update(ctx, inactiveRs); err != nil {
// return errors.Wrap(err, "unable to scale")
// }
// }
// desiredInactive := r.inactiveReplicas(ctx)
// if activeRs.Spec.Replicas != desiredInactive {
// activeRs.Spec.Replicas = desiredInactive
// if err := r.Client.Update(ctx, inactiveRs); err != nil {
// return errors.Wrap(err, "unable to scale")
// }
// }
//
// svc.Spec.Selector[LabelColor] = r.deploy.Status.ActiveColor
// if err = r.Client.Update(ctx, svc); err != nil {
// return errors.Wrap(err, "unable to swap")
// }
// return nil
default:
r.Logger.Info("New configuration detected, running B/G deployment")
newRs, err := r.upgradeReplicaSet(ctx, inactiveRs)
Expand All @@ -61,14 +70,19 @@ func (r *DeployRunner) Run(ctx context.Context) error {

// TODO initiate smoke tests here

svc.Spec.Selector[LabelColor] = r.deploy.Status.ActiveColor
svc.Spec.Selector[LabelColor] = newRs.Labels[LabelColor]
if err = r.Client.Update(ctx, svc); err != nil {
return errors.Wrap(err, "unable to scale inactive")
}

inactiveRs = activeRs
activeRs = newRs

r.deploy.Status.ActiveColor = newRs.Labels[LabelColor]
if err := r.Client.Update(ctx, r.deploy); err != nil {
return errors.Wrap(err, "unable to update deploy status")
}

if err := r.scaleInactive(ctx, inactiveRs); err != nil {
// todo should this be non crit?
return errors.Wrap(err, "unable to scale inactive")
Expand All @@ -81,25 +95,25 @@ func (r *DeployRunner) obtainService(ctx context.Context) (*v1.Service, error) {
var deploy = r.deploy
var svc v1.Service
if err := r.Get(ctx, client.ObjectKey{Namespace: deploy.Namespace, Name: deploy.Name}, &svc); err != nil {
if kuberrors.IsNotFound(err) {
svcSpec := deploy.Spec.Service.DeepCopy()
svcSpec.Selector = map[string]string{"color": clusterv1alpha1.ColorBlue}
svc = v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploy.Name,
Namespace: deploy.Namespace,
},
Spec: *svcSpec,
}
if err := r.Client.Create(ctx, &svc); err != nil {
return nil, err
}
if !kuberrors.IsNotFound(err) {
return nil, err
}
svcSpec := deploy.Spec.Service.DeepCopy()
svcSpec.Selector = map[string]string{LabelColor: clusterv1alpha1.ColorBlue}
svc = v1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: deploy.Name,
Namespace: deploy.Namespace,
},
Spec: *svcSpec,
}
if err := r.Client.Create(ctx, &svc); err != nil {
return nil, err
}
return nil, err
}
return &svc, nil
}
Expand Down Expand Up @@ -139,9 +153,13 @@ func (r *DeployRunner) createRs(ctx context.Context, color string) (*appsv1.Repl
LabelColor: color,
}
podTemplate := r.deploy.Spec.Template
if podTemplate.ObjectMeta.Labels == nil {
podTemplate.ObjectMeta.Labels = make(map[string]string)
}
for k, v := range labels {
podTemplate.ObjectMeta.Labels[k] = v
}
replicas := r.desiredReplicas(ctx, color)
rs := appsv1.ReplicaSet{
TypeMeta: metav1.TypeMeta{
Kind: "ReplicaSet",
Expand All @@ -150,12 +168,13 @@ func (r *DeployRunner) createRs(ctx context.Context, color string) (*appsv1.Repl
ObjectMeta: metav1.ObjectMeta{
Name: coloredName,
Namespace: r.deploy.Namespace,
Labels: labels,
},
Spec: appsv1.ReplicaSetSpec{
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
Replicas: r.deploy.Spec.Replicas,
Replicas: replicas,
Template: podTemplate,
},
}
Expand All @@ -166,27 +185,45 @@ func (r *DeployRunner) createRs(ctx context.Context, color string) (*appsv1.Repl
}

func (r *DeployRunner) upgradeReplicaSet(ctx context.Context, rs *appsv1.ReplicaSet) (*appsv1.ReplicaSet, error) {
color := rs.Labels[LabelColor]
if err := r.Client.Delete(ctx, rs); err != nil {
return nil, err
}
return r.createRs(ctx, rs.Labels[LabelColor])
return r.createRs(ctx, color)
}

func (r *DeployRunner) scaleInactive(ctx context.Context, rs *appsv1.ReplicaSet) error {
rs.Spec.Replicas = r.inactiveReplicas(ctx)
return r.Client.Update(ctx, rs)
}

// todo i dont like the signature for these 2
func (r *DeployRunner) desiredReplicas(ctx context.Context, color string) *int32 {
if color == r.deploy.Status.ActiveColor {
return r.deploy.Spec.Replicas
}
return r.inactiveReplicas(ctx)
}
func (r *DeployRunner) inactiveReplicas(ctx context.Context) *int32 {
scaleDownPercent := *r.deploy.Spec.BackupScaleDownPercent
activeReplicas := *r.deploy.Spec.Replicas
inactiveReplicas := activeReplicas * (scaleDownPercent / 100.0)
rs.Spec.Replicas = &inactiveReplicas

return r.Client.Update(ctx, rs)
factor := float32(scaleDownPercent) / float32(100.0)
inactiveReplicas := int32(float32(activeReplicas) * factor)
return &inactiveReplicas
}

// from github.com/kubernetes/kubernetes/staging/src/k8s.io/kubectl/pkg/util/deployment/deployment.go
func equalIgnoreHash(template1, template2 *v1.PodTemplateSpec) bool {
t1Copy := template1.DeepCopy()
t2Copy := template2.DeepCopy()
// Remove hash labels from template.Labels before comparing
delete(t1Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
delete(t2Copy.Labels, appsv1.DefaultDeploymentUniqueLabelKey)
return apiequality.Semantic.DeepEqual(t1Copy, t2Copy)
func podEquals(template1, template2 *v1.PodTemplateSpec) bool {
return template1.Spec.Containers[0].Image == template2.Spec.Containers[0].Image
// todo these are hacks, will figure out later

//for i := range template1.Spec.Containers {
// template1.Spec.Containers[i].TerminationMessagePath = "/dev/termination-log"
// template1.Spec.Containers[i].TerminationMessagePolicy = "File"
//}
//for i := range template2.Spec.Containers {
// template2.Spec.Containers[i].TerminationMessagePath = "/dev/termination-log"
// template2.Spec.Containers[i].TerminationMessagePolicy = "File"
//}
//return apiequality.Semantic.DeepEqual(template1.Spec.Containers, template2.Spec.Containers)
}
8 changes: 4 additions & 4 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,10 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "BlueGreenDeployment")
os.Exit(1)
}
if err = (&clusterv1alpha1.BlueGreenDeployment{}).SetupWebhookWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create webhook", "webhook", "BlueGreenDeployment")
os.Exit(1)
}
//if err = (&clusterv1alpha1.BlueGreenDeployment{}).SetupWebhookWithManager(mgr); err != nil {
// setupLog.Error(err, "unable to create webhook", "webhook", "BlueGreenDeployment")
// os.Exit(1)
//}
// +kubebuilder:scaffold:builder

setupLog.Info("starting manager")
Expand Down

0 comments on commit 520a558

Please sign in to comment.