diff --git a/Makefile b/Makefile index 97ee754..b084643 100644 --- a/Makefile +++ b/Makefile @@ -17,6 +17,7 @@ VERSIONS_FILE?=$(CALICO_UPGRADE_DIR)../_data/versions.yml # overriden (by the environment). CALICOCTL_VER?=master CALICOCTL_V2_VER?=v1.6.x-series +K8S_VERSION?=v1.8.1 # Construct the calico/ctl names we'll use to download calicoctl and extract the # binaries. @@ -24,6 +25,7 @@ $(info $(shell printf "%-21s = %-10s\n" "CALICOCTL_VER" $(CALICOCTL_VER))) $(info $(shell printf "%-21s = %-10s\n" "CALICOCTL_V2_VER" $(CALICOCTL_V2_VER))) CTL_CONTAINER_NAME?=calico/ctl:$(CALICOCTL_VER) CTL_CONTAINER_V2_NAME?=calico/ctl:$(CALICOCTL_V2_VER) +KUBECTL_URL=https://dl.k8s.io/$(K8S_VERSION)/kubernetes-client-linux-amd64.tar.gz ############################################################################### # calico-upgrade build @@ -55,6 +57,9 @@ LDFLAGS=-ldflags "-X $(PACKAGE_NAME)/pkg/commands.VERSION=$(CALICO_UPGRADE_VERSI LIBCALICOGO_PATH?=none +# curl should failed on 404 +CURL=curl -sSf + calico/upgrade: $(CALICO_UPGRADE_CONTAINER_CREATED) ## Create the calico/upgrade image .PHONY: clean-calico-upgrade @@ -80,10 +85,17 @@ vendor: glide.yaml glide install -strip-vendor' # build calico_upgrade image -$(CALICO_UPGRADE_CONTAINER_CREATED): pkg/Dockerfile.calico_upgrade dist/calico-upgrade +$(CALICO_UPGRADE_CONTAINER_CREATED): pkg/Dockerfile.calico_upgrade dist/calico-upgrade dist/kubectl docker build -t $(CALICO_UPGRADE_CONTAINER_NAME) -f pkg/Dockerfile.calico_upgrade . touch $@ +# Download kubectl instead of copying from hyperkube because it is 4x smaller +# this way +dist/kubectl: + $(CURL) -L $(KUBECTL_URL) -o - | tar -zxvf - -C dist --strip-components=3 + chmod +x $(@D)/* + + ## Build calico-upgrade binary: $(CALICO_UPGRADE_FILES) vendor # Don't try to "install" the intermediate build files (.a .o) when not on linux diff --git a/glide.lock b/glide.lock index ce83f36..1a42f87 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: 7aa78273efd54872379cc5493073c3468e28a13d8ba2b59f45b8e8cd2e88d34d -updated: 2018-04-18T22:24:00.13212705Z +hash: ce4ce095bb3ae9112a0efe3c1675cd6bc7831b7351c529dc3e83489d5aab1603 +updated: 2018-05-01T12:19:44.165587666-05:00 imports: - name: cloud.google.com/go version: 3b1ae45394a234c385be014e9a488f2bb6eef821 @@ -14,7 +14,7 @@ imports: - autorest/azure - autorest/date - name: github.com/beorn7/perks - version: 3a771d992973f24aa725d07868b467d1ddfceafb + version: 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 subpackages: - quantile - name: github.com/coreos/etcd @@ -33,7 +33,7 @@ imports: - pkg/types - version - name: github.com/coreos/go-semver - version: e214231b295a8ea9479f11b70b35d5acf3556d9b + version: 8ab6407b697782a06568d4b7f1db25550ec2e4c6 subpackages: - semver - name: github.com/davecgh/go-spew @@ -137,7 +137,7 @@ imports: - name: github.com/projectcalico/go-yaml-wrapper version: 598e54215bee41a19677faa4f0c32acd2a87eb56 - name: github.com/projectcalico/libcalico-go - version: e3351395c934cee118999b9d29508e9280fa75ec + version: 9047dcb0ecb57fbfc9924c5aac09db4e019a7588 subpackages: - lib/apiconfig - lib/apis/v1 @@ -183,11 +183,11 @@ imports: subpackages: - prometheus - name: github.com/prometheus/client_model - version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c + version: 6f3806018612930941127f2a7c6c453ba2c527d2 subpackages: - go - name: github.com/prometheus/common - version: d0f7cd64bda49e08b22ae8a730aa57aa0db125d6 + version: 49fee292b27bfff7f354ee0f64e1bc4850462edf subpackages: - expfmt - internal/bitbucket.org/ww/goautoneg @@ -252,7 +252,7 @@ imports: - unicode/norm - width - name: google.golang.org/appengine - version: 0a24098c0ec68416ec050f567f75df563d6b231e + version: 4f7eeb5305a4ba1966344836ba4af9996b7b4e05 subpackages: - internal - internal/app_identity diff --git a/glide.yaml b/glide.yaml index ee52455..b7769e6 100644 --- a/glide.yaml +++ b/glide.yaml @@ -3,7 +3,7 @@ import: - package: github.com/sirupsen/logrus version: v1.0.4 - package: github.com/projectcalico/libcalico-go - version: e3351395c934cee118999b9d29508e9280fa75ec + version: 9047dcb0ecb57fbfc9924c5aac09db4e019a7588 - package: github.com/coreos/etcd version: v3.3.0 - package: github.com/spf13/pflag diff --git a/integration-snippets/README.md b/integration-snippets/README.md new file mode 100644 index 0000000..5af3932 --- /dev/null +++ b/integration-snippets/README.md @@ -0,0 +1,81 @@ +# Purpose + +The scripts and snippets here are intended to support automating the upgrade +of a cluster through the addition of two initContainers and a Job. The scripts +are added to the calico/upgrade container image so they can be ran as +containers. + +# Configuration + +Each of the scripts need some configuration, here are the common options +between the scripts. + +## V3 datastore configuration +The following can be used to configure access to the V3 datastore. + + CALICO_ETCD_ENDPOINTS + CALICO_ETCD_USERNAME + CALCIO_ETCD_PASSWORD + CALICO_ETCD_KEY_FILE + CALICO_ETCD_CERT_FILE + CALICO_ETCD_CA_CERT_FILE + +## V1 datastore configuration +The following can be used to configure access to the V1 datastore. + + CALICO_APIV1_ETCD_SCHEME + CALICO_APIV1_ETCD_AUTHORITY + CALICO_APIV1_ETCD_ENDPOINTS + CALICO_APIV1_ETCD_USERNAME + CALICO_APIV1_ETCD_PASSWORD + CALICO_APIV1_ETCD_KEY_FILE + CALICO_APIV1_ETCD_CERT_FILE + CALICO_APIV1_ETCD_CA_CERT_FILE + +## calico-upgrade configuration + +It may be useful to provide some additional options or args to calico-upgrade. +The following are provided for that purpose: +* `UPGRADE_OPTIONS` can be used to to pass log level to the commands. +* `UPGRADE_ARGS` could be used to to pass apiconfig if config files are + available instead of environment variables. + +# Node Daemonset initContainer + +This container will be responsible for checking if the datastore needs to be +migrated and if needed will start the migration. If the datastore migration is +in progress this container will block startup until it is complete. +The [script](node-init-container.sh) which does the above can be added to the +calico-node Daemonset with something like +[this snippet](node-init-container.yaml). + +# calico/kube-controller initContainer + +This container will be responsible for checking if the datastore needs to be +migrated and block startup until the migration is finished. +The [script](controller-init.sh) which does the above can be added to the +calico-kube-controllers Deployment with something like +[this snippet](controller-init.yaml). + +# Calico upgrade completion Job + +This container will be responsible for checking if the calico-node Daemonset +has rolled out before completing the upgrade. It also checks that the Daemonset +has the correct calico/node image as a check that the proper Daemonset +rolled out. +The [script](completion-job.sh) which does the above can be deployed with +something like [this snippet](completion-job.yaml). + +## Daemonset monitor configuration + +The monitoring of the Daemonset has some values that may change in some +deployments so the following are exposed to allow their configuration. +* `DS_NAME`: This should be the name of the 'new' Calico Daemonset. + (Default: "calico-node") +* `DS_SELECTOR`: This is the selector used select the rollout that is being + monitored. (Default: "k8s-app=$DS_NAME") +* `DS_IMAGE_SELECTOR`: This is the jsonpath used to select the name of the + image for the calico-node container. (Default: "{.items[?(@.metadata.name=='$DS_NAME')].spec.template.spec.containers[?(@.name=='calico-node')].image}") +* `EXPECTED_NODE_IMAGE`: This is the image that is expected in the Daemonset, + this is to ensure the correct Daemonset is rolled out. + (Default: "quay.io/calico/node:v3.1.1") diff --git a/integration-snippets/completion-job.sh b/integration-snippets/completion-job.sh new file mode 100755 index 0000000..70438c9 --- /dev/null +++ b/integration-snippets/completion-job.sh @@ -0,0 +1,74 @@ +#!/bin/sh + +# Monitors the status of the rollout of the calico-node Daemonset, the script +# will poll until the Rollout is complete and verify it is the correct Rollout +# by checking the calico/node images is the expected version. +# +# Requirements: +# - calico-upgrade is available in the PATH +# - kubectl is available in the PATH + + +DS_NAME=${DS_NAME:-"calico-node"} +DS_SELECTOR=${DS_SELECTOR:-"k8s-app=$DS_NAME"} +DS_IMAGE_SELECTOR="{.items[?(@.metadata.name=='$DS_NAME')].spec.template.spec.containers[?(@.name=='calico-node')].image}" +EXPECTED_NODE_IMAGE=${EXPECTED_NODE_IMAGE:-"quay.io/calico/node:v3.1.1"} + +echo "Ensure that the Daemonset $DS_NAME is rolled out, and the calico-node" +echo "container is running $EXPECTED_NODE_IMAGE before completing the upgrade" + +IsDsImageCorrect() +{ + image=$(kubectl get daemonset --selector=$DS_SELECTOR -n kube-system \ + -o jsonpath="$DS_IMAGE_SELECTOR") + if [ $? -ne 0 ]; then + return 1 + fi + echo "$image" | grep "$EXPECTED_NODE_IMAGE" +} + +IsDsRollOutFinished() +{ + rollout_status=$(kubectl rollout status daemonset/$DS_NAME) + if [ $? -ne 0 ]; then + return 1 + fi + echo "$rollout_status" | grep "successfully rolled out" +} + +echo "=== Current $DS_NAME Daemonset ===" +kubectl get daemonset --selector=$DS_SELECTOR -n kube-system + +# Wait for calico-node daemonset to have a v3 calico-node image +while ! IsDsImageCorrect; do + echo "Waiting for the image $EXPECTED_NODE_IMAGE to be in the Daemonset $DS_NAME" + sleep 5 +done + +echo "=== Current $DS_NAME Daemonset ===" +kubectl get daemonset --selector=$DS_SELECTOR -n kube-system + +# Wait for daemonset to finish rollout +while ! IsDsRollOutFinished; do + echo "Waiting for Daemonset $DS_NAME to finish rolling out" + sleep 5 +done + +# Verify daemonset still has v3 calico-node image, in case they've done a rollback +if ! IsDsImageCorrect; then + echo "=== Current $DS_NAME Daemonset ===" + kubectl get daemonset --selector=$DS_SELECTOR -n kube-system \ + -o jsonpath="$DS_IMAGE_SELECTOR" + echo "" + echo "After waiting for $DS_NAME to finish rolling out it does not have the expected" + echo "calico/node image version. If a rollback was done on the calico/node daemonset" + echo "this Job should be deleted?" + exit 1 +fi + +# Complete upgrade +calico-upgrade $UPGRADE_OPTIONS complete --no-prompts $UPGRADE_ARGS +if [ $? -ne 0 ]; then + echo "Completing the upgrade failed," + exit 1 +fi diff --git a/integration-snippets/completion-job.yaml b/integration-snippets/completion-job.yaml new file mode 100644 index 0000000..63620a9 --- /dev/null +++ b/integration-snippets/completion-job.yaml @@ -0,0 +1,87 @@ +apiVersion: batch/v1 +kind: Job +metadata: + name: complete-upgrade + namespace: kube-system +spec: + template: + spec: + hostNetwork: true + serviceAccountName: calico-upgrade-job + restartPolicy: Never + containers: + - name: migrate-completion + image: $UPGRADE_CONTAINER + command: ['/bin/sh', '-c', '/completion-job.sh'] + env: + - name: EXPECTED_NODE_IMAGE + valueFrom: + configMapKeyRef: + name: calico-config + key: node_image + # The location of the Calico etcd cluster. + - name: CALICO_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + - name: CALICO_ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: CALICO_ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: CALICO_ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # The location of the Calico etcd cluster. + - name: CALICO_APIV1_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + - name: CALICO_APIV1_ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: CALICO_APIV1_ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: CALICO_APIV1_ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + - name: CALICO_APIV1_DATASTORE_TYPE + value: "etcdv2" + volumeMounts: + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-upgrade-job + namespace: kube-system diff --git a/integration-snippets/completion-rbac.yaml b/integration-snippets/completion-rbac.yaml new file mode 100644 index 0000000..0f7f105 --- /dev/null +++ b/integration-snippets/completion-rbac.yaml @@ -0,0 +1,29 @@ +apiVersion: rbac.authorization.k8s.io/v1beta1 +kind: ClusterRoleBinding +metadata: + name: calico-upgrade-job +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: calico-upgrade-job +subjects: +- kind: ServiceAccount + name: calico-upgrade-job + namespace: kube-system + +--- + +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1beta1 +metadata: + name: calico-upgrade-job +rules: + - apiGroups: + - extensions + resources: + - daemonsets + - daemonsets/status + verbs: + - get + - list + diff --git a/integration-snippets/controller-init.sh b/integration-snippets/controller-init.sh new file mode 100755 index 0000000..6d7e588 --- /dev/null +++ b/integration-snippets/controller-init.sh @@ -0,0 +1,24 @@ +#!/bin/sh + +# This script checks if upgrade is needed and returns 0 if no upgrade +# is needed. +# +# Requirements: +# - calico-upgrade is available in the PATH + + +calico-upgrade $UPGRADE_OPTIONS needed $UPGRADE_ARGS +needed=$? +while [ $needed -eq 0 ]; do + sleep 5 + calico-upgrade $UPGRADE_OPTIONS needed $UPGRADE_ARGS + needed=$? +done + +if [ $needed -eq 1 ]; then + echo "No data migration is needed. Continuing on." + exit 0 +fi + +# Will hit this if there is a problem accessing the datastore +exit 1 diff --git a/integration-snippets/controller-init.yaml b/integration-snippets/controller-init.yaml new file mode 100644 index 0000000..4bc6b58 --- /dev/null +++ b/integration-snippets/controller-init.yaml @@ -0,0 +1,56 @@ + initContainers: + - name: migrate + image: $UPGRADE_CONTAINER + command: ['/bin/sh', '-c', '/controller-init.sh'] + env: + # The location of the Calico etcd cluster. + - name: CALICO_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + - name: CALICO_ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: CALICO_ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: CALICO_ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # The location of the Calico etcd cluster. + - name: CALICO_APIV1_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + - name: CALICO_APIV1_ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: CALICO_APIV1_ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: CALICO_APIV1_ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + - name: CALICO_APIV1_DATASTORE_TYPE + value: "etcdv2" + volumeMounts: + - mountPath: /calico-secrets + name: etcd-certs diff --git a/integration-snippets/node-init-container.sh b/integration-snippets/node-init-container.sh new file mode 100755 index 0000000..667f39b --- /dev/null +++ b/integration-snippets/node-init-container.sh @@ -0,0 +1,63 @@ +#!/bin/sh + +# Checks if upgrade is needed and starts/runs the upgrade +# if one is not already in progress. +# +# Requirements: +# - calico-upgrade is available in the PATH + +calico-upgrade $UPGRADE_OPTIONS needed $UPGRADE_ARGS +status=$? +if [ $status -eq 1 ]; then + echo "No data migration is needed. Continuing on." + exit 0 +elif [ $status -gt 1 ]; then + echo "Error checking if migration is needed" + exit 1 +fi + +# Before starting the migration first check if one is already in progress +calico-upgrade $UPGRADE_OPTIONS inprogress $UPGRADE_ARGS +inprogress=$? +while [ $inprogress -eq 0 ]; do + sleep 5 + calico-upgrade $UPGRADE_OPTIONS inprogress $UPGRADE_ARGS + inprogress=$? + if [ $inprogress -gt 1 ]; then + echo "Error checking if migration is in progress" + exit 1 + fi +done + +# Check one more time before exiting +calico-upgrade $UPGRADE_OPTIONS needed $UPGRADE_ARGS +status=$? +if [ $status -eq 1 ]; then + echo "No data migration is needed. Continuing on." + exit 0 +elif [ $status -gt 1 ]; then + echo "Error checking if migration is needed" + exit 1 +fi + +REPORT_DIR=${REPORT_DIR:-migration-output} +# Capture the output so we can dump other information before this output +# is printed so it is some of the last information presented. +calico-upgrade $UPGRADE_OPTIONS start --no-prompts --output-dir=$REPORT_DIR $UPGRADE_ARGS &> migration.out +status=$? + +# Dump the migration files so they are in the log +find $REPORT_DIR -type f -print0 | xargs -0 -I {} sh -c 'echo "=== Contents of {} ==="; cat "{}"' + +echo "=== Migration output ===" +# Dump the migration output so it does not get lost in the migration files output +cat migration.out + +if [ $status -ne 0 ]; then + echo "The status returned by the calico-upgrade command was $status" + echo "indicating there was an error. Review the above output and the" + echo "conversion results output for problems." + echo "To recover from errors consult your integrations documentation" + echo "for recovery help." +fi +exit $status diff --git a/integration-snippets/node-init-container.yaml b/integration-snippets/node-init-container.yaml new file mode 100644 index 0000000..d8592a3 --- /dev/null +++ b/integration-snippets/node-init-container.yaml @@ -0,0 +1,56 @@ + initContainers: + - name: migrate + image: $UPGRADE_CONTAINER + command: ['/bin/sh', '-c', '/node-init-container.sh'] + env: + # The location of the Calico etcd cluster. + - name: CALICO_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + - name: CALICO_ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: CALICO_ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: CALICO_ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # The location of the Calico etcd cluster. + - name: CALICO_APIV1_ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + - name: CALICO_APIV1_ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: CALICO_APIV1_ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: CALICO_APIV1_ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + - name: CALICO_APIV1_DATASTORE_TYPE + value: "etcdv2" + volumeMounts: + - mountPath: /calico-secrets + name: etcd-certs diff --git a/integration-snippets/template.sh b/integration-snippets/template.sh new file mode 100755 index 0000000..097764f --- /dev/null +++ b/integration-snippets/template.sh @@ -0,0 +1,390 @@ +#!/bin/bash -e + +cd "$(dirname "${BASH_SOURCE[0]}")" + +UPGRADE_CONTAINER=${UPGRADE_CONTAINER:-"calico/upgrade:latest"} + +dump_and_update() +{ + cat $1 | sed -e "s|\$UPGRADE_CONTAINER|$UPGRADE_CONTAINER|" +} + +cat << EOF +# Calico Version v3.1.1 +# https://docs.projectcalico.org/v3.1/releases#v3.1.1 +# This manifest includes the following component versions: +# calico/node:v3.1.1 +# calico/cni:v3.1.1 +# calico/kube-controllers:v3.1.1 + +# This ConfigMap is used to configure a self-hosted Calico installation. +kind: ConfigMap +apiVersion: v1 +metadata: + name: calico-config + namespace: kube-system +data: + # Configure this with the location of your etcd cluster. + etcd_endpoints: "http://172.18.18.101:2379" + + node_image: "calico/node:v3.1.1" + + # Configure the Calico backend to use. + calico_backend: "bird" + + # The CNI network configuration to install on each node. + cni_network_config: |- + { + "name": "k8s-pod-network", + "cniVersion": "0.3.0", + "plugins": [ + { + "type": "calico", + "etcd_endpoints": "__ETCD_ENDPOINTS__", + "etcd_key_file": "__ETCD_KEY_FILE__", + "etcd_cert_file": "__ETCD_CERT_FILE__", + "etcd_ca_cert_file": "__ETCD_CA_CERT_FILE__", + "log_level": "info", + "mtu": 1500, + "ipam": { + "type": "calico-ipam" + }, + "policy": { + "type": "k8s" + }, + "kubernetes": { + "kubeconfig": "__KUBECONFIG_FILEPATH__" + } + }, + { + "type": "portmap", + "snat": true, + "capabilities": {"portMappings": true} + } + ] + } + + # If you're using TLS enabled etcd uncomment the following. + # You must also populate the Secret below with these files. + etcd_ca: "" # "/calico-secrets/etcd-ca" + etcd_cert: "" # "/calico-secrets/etcd-cert" + etcd_key: "" # "/calico-secrets/etcd-key" + +--- + +# The following contains k8s Secrets for use with a TLS enabled etcd cluster. +# For information on populating Secrets, see http://kubernetes.io/docs/user-guide/secrets/ +apiVersion: v1 +kind: Secret +type: Opaque +metadata: + name: calico-etcd-secrets + namespace: kube-system +data: + # Populate the following files with etcd TLS configuration if desired, but leave blank if + # not using TLS for etcd. + # This self-hosted install expects three files with the following names. The values + # should be base64 encoded strings of the entire contents of each file. + # etcd-key: null + # etcd-cert: null + # etcd-ca: null + +--- + +# This manifest installs the calico/node container, as well +# as the Calico CNI plugins and network config on +# each master and worker node in a Kubernetes cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: + name: calico-node + namespace: kube-system + labels: + k8s-app: calico-node +spec: + selector: + matchLabels: + k8s-app: calico-node + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 1 + template: + metadata: + labels: + k8s-app: calico-node + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' + spec: + hostNetwork: true + tolerations: + # Make sure calico/node gets scheduled on all nodes. + - effect: NoSchedule + operator: Exists + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - effect: NoExecute + operator: Exists + serviceAccountName: calico-node + # Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force + # deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods. + terminationGracePeriodSeconds: 0 +EOF +dump_and_update node-init-container.yaml +cat << EOF + containers: + # Runs calico/node container on each Kubernetes node. This + # container programs network policy and routes on each + # host. + - name: calico-node + image: quay.io/calico/node:v3.1.1 + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Choose the backend to use. + - name: CALICO_NETWORKING_BACKEND + valueFrom: + configMapKeyRef: + name: calico-config + key: calico_backend + # Cluster type to identify the deployment type + - name: CLUSTER_TYPE + value: "k8s,bgp" + # Disable file logging so 'kubectl logs' works. + - name: CALICO_DISABLE_FILE_LOGGING + value: "true" + # Set noderef for node controller. + - name: CALICO_K8S_NODE_REF + valueFrom: + fieldRef: + fieldPath: spec.nodeName + # Set Felix endpoint to host default action to ACCEPT. + - name: FELIX_DEFAULTENDPOINTTOHOSTACTION + value: "ACCEPT" + # The default IPv4 pool to create on startup if none exists. Pod IPs will be + # chosen from this range. Changing this value after installation will have + # no effect. This should fall within '--cluster-cidr'. + - name: CALICO_IPV4POOL_CIDR + value: "192.168.0.0/16" + - name: CALICO_IPV4POOL_IPIP + value: "Always" + # Disable IPv6 on Kubernetes. + - name: FELIX_IPV6SUPPORT + value: "false" + # Set Felix logging to "info" + - name: FELIX_LOGSEVERITYSCREEN + value: "info" + # Set MTU for tunnel device used if ipip is enabled + - name: FELIX_IPINIPMTU + value: "1440" + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Auto-detect the BGP IP address. + - name: IP + value: "autodetect" + - name: FELIX_HEALTHENABLED + value: "true" + securityContext: + privileged: true + resources: + requests: + cpu: 250m + livenessProbe: + httpGet: + path: /liveness + port: 9099 + periodSeconds: 10 + initialDelaySeconds: 10 + failureThreshold: 6 + readinessProbe: + httpGet: + path: /readiness + port: 9099 + periodSeconds: 10 + volumeMounts: + - mountPath: /lib/modules + name: lib-modules + readOnly: true + - mountPath: /var/run/calico + name: var-run-calico + readOnly: false + - mountPath: /var/lib/calico + name: var-lib-calico + readOnly: false + - mountPath: /calico-secrets + name: etcd-certs + # This container installs the Calico CNI binaries + # and CNI network config file on each node. + - name: install-cni + image: quay.io/calico/cni:v3.1.1 + command: ["/install-cni.sh"] + env: + # Name of the CNI config file to create. + - name: CNI_CONF_NAME + value: "10-calico.conflist" + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # The CNI network config to install on each node. + - name: CNI_NETWORK_CONFIG + valueFrom: + configMapKeyRef: + name: calico-config + key: cni_network_config + volumeMounts: + - mountPath: /host/opt/cni/bin + name: cni-bin-dir + - mountPath: /host/etc/cni/net.d + name: cni-net-dir + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Used by calico/node. + - name: lib-modules + hostPath: + path: /lib/modules + - name: var-run-calico + hostPath: + path: /var/run/calico + - name: var-lib-calico + hostPath: + path: /var/lib/calico + # Used to install CNI. + - name: cni-bin-dir + hostPath: + path: /opt/cni/bin + - name: cni-net-dir + hostPath: + path: /etc/cni/net.d + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + +--- + +# This manifest deploys the Calico Kubernetes controllers. +# See https://github.com/projectcalico/kube-controllers +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + annotations: + scheduler.alpha.kubernetes.io/critical-pod: '' +spec: + # The controllers can only have a single active instance. + replicas: 1 + strategy: + type: Recreate + template: + metadata: + name: calico-kube-controllers + namespace: kube-system + labels: + k8s-app: calico-kube-controllers + spec: + # The controllers must run in the host network namespace so that + # it isn't governed by policy that would prevent it from working. + hostNetwork: true + tolerations: + # Mark the pod as a critical add-on for rescheduling. + - key: CriticalAddonsOnly + operator: Exists + - key: node-role.kubernetes.io/master + effect: NoSchedule + serviceAccountName: calico-kube-controllers +EOF +dump_and_update controller-init.yaml +cat << EOF + containers: + - name: calico-kube-controllers + image: quay.io/calico/kube-controllers:v3.1.1 + env: + # The location of the Calico etcd cluster. + - name: ETCD_ENDPOINTS + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_endpoints + # Location of the CA certificate for etcd. + - name: ETCD_CA_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_ca + # Location of the client key for etcd. + - name: ETCD_KEY_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_key + # Location of the client certificate for etcd. + - name: ETCD_CERT_FILE + valueFrom: + configMapKeyRef: + name: calico-config + key: etcd_cert + # Choose which controllers to run. + - name: ENABLED_CONTROLLERS + value: policy,profile,workloadendpoint,node + volumeMounts: + # Mount in the etcd TLS secrets. + - mountPath: /calico-secrets + name: etcd-certs + volumes: + # Mount in the etcd TLS secrets with mode 400. + # See https://kubernetes.io/docs/concepts/configuration/secret/ + - name: etcd-certs + secret: + secretName: calico-etcd-secrets + defaultMode: 0400 + +--- +EOF +dump_and_update completion-job.yaml +cat << EOF +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-kube-controllers + namespace: kube-system + +--- + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: calico-node + namespace: kube-system +EOF diff --git a/pkg/Dockerfile.calico_upgrade b/pkg/Dockerfile.calico_upgrade index 24fa9a8..a39cb94 100644 --- a/pkg/Dockerfile.calico_upgrade +++ b/pkg/Dockerfile.calico_upgrade @@ -1,6 +1,13 @@ FROM alpine:3.4 ADD dist/calico-upgrade ./calico-upgrade +ADD dist/kubectl ./kubectl +ADD integration-snippets/node-init-container.sh ./ +RUN chmod +x ./node-init-container.sh +ADD integration-snippets/controller-init.sh ./ +RUN chmod +x ./controller-init.sh +ADD integration-snippets/completion-job.sh ./ +RUN chmod +x ./completion-job.sh ENV CALICO_UPGRADE_CONTAINER=TRUE ENV PATH=$PATH:/ diff --git a/pkg/calicoupgrade.go b/pkg/calicoupgrade.go index 9f0df02..b110158 100644 --- a/pkg/calicoupgrade.go +++ b/pkg/calicoupgrade.go @@ -29,23 +29,25 @@ func main() { doc := `Usage: calico-upgrade [options] [...] - dry-run Perform a dry-run of the data migration. This validates that the - v1 formatted data will be successfully converted and that the v3 - datastore is in the correct state for the data migration. This - command outputs a full report of any migrated names, migration - errors, or migrated name conflicts. See Description section - below for details. - start Start the upgrade process. This does the following: - - performs a dry-run to verify the data will be migrated - successfully - - pauses Calico networking: this prevents new endpoints from - being created while allowing existing endpoints to remain - networked - - migrates the data from v1 to v3 format - complete This resumes Calico networking for the v3.x nodes. - abort This aborts the upgrade process by resuming Calico networking - for the v2.x nodes. - version Display the version of calico-upgrade. + dry-run Perform a dry-run of the data migration. This validates that the + v1 formatted data will be successfully converted and that the v3 + datastore is in the correct state for the data migration. This + command outputs a full report of any migrated names, migration + errors, or migrated name conflicts. See Description section + below for details. + start Start the upgrade process. This does the following: + - performs a dry-run to verify the data will be migrated + successfully + - pauses Calico networking: this prevents new endpoints from + being created while allowing existing endpoints to remain + networked + - migrates the data from v1 to v3 format + complete This resumes Calico networking for the v3.x nodes. + abort This aborts the upgrade process by resuming Calico networking + for the v2.x nodes. + needed An upgrade is needed. + inprogress Datastore migration is currently in progress. + version Display the version of calico-upgrade. Options: -h --help Show this screen. @@ -88,6 +90,10 @@ Description: commands.Complete(args) case "abort": commands.Abort(args) + case "needed": + commands.Needed(args) + case "inprogress": + commands.InProgress(args) case "version": commands.Version(args) default: diff --git a/pkg/commands/inprogress.go b/pkg/commands/inprogress.go new file mode 100644 index 0000000..e43c0cf --- /dev/null +++ b/pkg/commands/inprogress.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package commands + +import ( + "fmt" + "os" + "strings" + + "github.com/docopt/docopt-go" + + "github.com/projectcalico/calico-upgrade/pkg/constants" + "github.com/projectcalico/libcalico-go/lib/upgrade/migrator" + "github.com/projectcalico/libcalico-go/lib/upgrade/migrator/clients" +) + +func InProgress(args []string) { + doc := constants.DatastoreIntro + `Usage: + calico-upgrade inprogress + [--apiconfigv3=] + [--apiconfigv1=] + +Example: + calico-upgrade inprogress --apiconfigv3=/path/to/v3/config --apiconfigv1=/path/to/v1/config + +Options: + -h --help Show this screen. + --apiconfigv3= Path to the file containing connection + configuration in YAML or JSON format for + the Calico v3 API. + [default: ` + constants.DefaultConfigPathV3 + `] + --apiconfigv1= Path to the file containing connection + configuration in YAML or JSON format for + the Calico v1 API. + [default: ` + constants.DefaultConfigPathV1 + `] + +Return code: + 0 Datastore migration is in the process of being upgraded to the Calico V3 API. + 1 Datastore is not currently being migrated. + >1 Error checking if datastore migration is in progress. + +Description: + The inprogress command reports if there is a datastore migration currently + in progress. This is not the same as the upgrade being started but not being + completed or aborted, this command reports if the V1 data is currently being + converted and written to V3 format. + +` + constants.ReportHelp + parsedArgs, err := docopt.Parse(doc, args, true, "", false, false) + if err != nil { + fmt.Printf("Invalid option:\n calico-upgrade %s\nUse flag '--help' to read about a specific subcommand.\n", strings.Join(args, " ")) + os.Exit(2) + } + if len(parsedArgs) == 0 { + return + } + cfv3 := parsedArgs["--apiconfigv3"].(string) + cfv1 := parsedArgs["--apiconfigv1"].(string) + ch := &cliHelper{} + + // Obtain the v1 and v3 clients. + clientv3, clientv1, err := clients.LoadClients(cfv3, cfv1) + if err != nil { + ch.Separator() + ch.Msg("Failed to check if upgrade is needed.") + ch.Bullet(fmt.Sprintf("Error accessing the Calico API: %v", err)) + ch.NewLine() + os.Exit(2) + } + + m := migrator.New(clientv3, clientv1, ch) + + ch.Separator() + // Check migration needed status. + yes, err := m.IsMigrationInProgress() + if err == nil { + if yes { + // Migration is needed + ch.Msg("Migration is in Progress.") + ch.NewLine() + os.Exit(0) + } else { + // Migration is not needed. + ch.Msg("Migration is NOT in Progress.") + ch.NewLine() + os.Exit(1) + } + } else { + // There was an error checking the migration status + ch.Msg("There was an error checking if datastore migration is in progress") + ch.Bullet(fmt.Sprint("Error: ", err)) + ch.NewLine() + os.Exit(2) + } + + return +} diff --git a/pkg/commands/needed.go b/pkg/commands/needed.go new file mode 100644 index 0000000..8a2ef0b --- /dev/null +++ b/pkg/commands/needed.go @@ -0,0 +1,109 @@ +// Copyright (c) 2016 Tigera, Inc. All rights reserved. + +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package commands + +import ( + "fmt" + "os" + "strings" + + "github.com/docopt/docopt-go" + + "github.com/projectcalico/calico-upgrade/pkg/constants" + "github.com/projectcalico/libcalico-go/lib/upgrade/migrator" + "github.com/projectcalico/libcalico-go/lib/upgrade/migrator/clients" +) + +func Needed(args []string) { + doc := constants.DatastoreIntro + `Usage: + calico-upgrade needed + [--apiconfigv3=] + [--apiconfigv1=] + [--no-prompts] + +Example: + calico-upgrade needed --apiconfigv3=/path/to/v3/config --apiconfigv1=/path/to/v1/config + +Options: + -h --help Show this screen. + --apiconfigv3= Path to the file containing connection + configuration in YAML or JSON format for + the Calico v3 API. + [default: ` + constants.DefaultConfigPathV3 + `] + --apiconfigv1= Path to the file containing connection + configuration in YAML or JSON format for + the Calico v1 API. + [default: ` + constants.DefaultConfigPathV1 + `] + +Return code: + 0 Datastore has not been upgraded to the Calico V3 API. + 1 Datastore does not need to be migrated. + >1 There was a problem checking if migration is needed. + +Description: + Indicates if the version information in the datastore(s) indicates the need to + upgrade the data in the datastore or if no upgrade is needed (due to being a + new install or is already in the V3 format). + +` + constants.ReportHelp + parsedArgs, err := docopt.Parse(doc, args, true, "", false, false) + if err != nil { + fmt.Printf("Invalid option:\n calico-upgrade %s\nUse flag '--help' to read about a specific subcommand.\n", strings.Join(args, " ")) + os.Exit(2) + } + if len(parsedArgs) == 0 { + return + } + cfv3 := parsedArgs["--apiconfigv3"].(string) + cfv1 := parsedArgs["--apiconfigv1"].(string) + ch := &cliHelper{} + + // Obtain the v1 and v3 clients. + clientv3, clientv1, err := clients.LoadClients(cfv3, cfv1) + if err != nil { + ch.Separator() + ch.Msg("Failed to check if upgrade is needed.") + ch.Bullet(fmt.Sprintf("Error accessing the Calico API: %v", err)) + ch.NewLine() + os.Exit(2) + } + + m := migrator.New(clientv3, clientv1, ch) + + ch.Separator() + // Check migration needed status. + yes, err := m.ShouldMigrate() + if err == nil { + if yes { + // Migration is needed + ch.Msg("Migration of the datastore to the V3 API is needed.") + ch.NewLine() + os.Exit(0) + } else { + // Migration is not needed. + ch.Msg("Migration of the datastore is not needed.") + ch.NewLine() + os.Exit(1) + } + } else { + // There was an error checking the migration status + ch.Msg("There was an error checking if datastore migration is needed") + ch.Bullet(fmt.Sprint("Error: ", err)) + ch.NewLine() + os.Exit(2) + } + + return +}