Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PMM-13734 helm tests #767

Merged
merged 12 commits into from
Feb 12, 2025
113 changes: 113 additions & 0 deletions .github/workflows/helm-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,113 @@
name: Helm tests

on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch:
inputs:
server_image:
description: "server image: repo/name:tag"
default: "perconalab/pmm-server:3-dev-latest"
required: true
type: string
client_image:
description: "client image: repo/name:tag"
default: "perconalab/pmm-client:3-dev-latest"
required: true
type: string
pmm_qa_branch:
description: "Branch for pmm-qa to checkout"
default: "v3"
required: false
type: string
sha:
description: "commit sha to report status"
required: false
type: string

workflow_call:
inputs:
server_image:
required: true
type: string
client_image:
required: true
type: string
pmm_qa_branch:
required: false
type: string
sha:
required: false
type: string

jobs:
helm-tests:
runs-on: ubuntu-latest
env:
WORK_DIR: pmm-qa/k8s
GH_API_TOKEN: ${{ secrets.GH_API_TOKEN }}
PMM_QA_BRANCH: ${{ inputs.pmm_qa_branch || 'v3' }}
SERVER_IMAGE: ${{ inputs.server_image || 'perconalab/pmm-server:3-dev-latest' }}

steps:
- name: Checkout code
uses: actions/checkout@v4
with:
ref: ${{ env.PMM_QA_BRANCH }}
repository: percona/pmm-qa
path: ./pmm-qa

- name: Set up bats globally
run: |
git clone https://github.com/bats-core/bats-core.git /opt/bats
sudo /opt/bats/install.sh /usr/local

- name: Set up bats libraries
working-directory: ${{ env.WORK_DIR }}
run: |
./setup_bats_libs.sh
echo "BATS_LIB_PATH=$(pwd)/lib" >> $GITHUB_ENV

- name: Start minikube
run: |
minikube start
minikube addons disable storage-provisioner

### Install CSI drivers for snapshots
kubectl delete storageclass standard
minikube addons enable csi-hostpath-driver
minikube addons enable volumesnapshots
kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}'
kubectl wait --for=condition=Ready node --timeout=90s minikube

- name: Run helm tests
working-directory: ${{ env.WORK_DIR }}
run: |
echo $(git submodule status)

export IMAGE_REPO=$(echo $SERVER_IMAGE | cut -d ':' -f 1)
export IMAGE_TAG=$(echo $SERVER_IMAGE | cut -d ':' -f 2)
bats --tap helm-test.bats

- name: Create status check
if: ${{ always() && inputs.sha }}
continue-on-error: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
REPO: ${{ github.repository }}
RUN_ID: ${{ github.run_id }}
SHA: ${{ inputs.sha }}
STATUS: ${{ job.status }}
run: |
if [ "${STATUS}" = "cancelled" ]; then
STATUS="error"
fi

gh api \
--method POST \
-H "Accept: application/vnd.github.v3+json" \
/repos/$REPO/statuses/$SHA \
-f state="$STATUS" \
-f target_url="https://github.com/$REPO/actions/runs/$RUN_ID" \
-f description="Helm Tests status: $STATUS" \
-f context='actions/workflows/helm-tests'
251 changes: 251 additions & 0 deletions k8s/helm-test.bats
Original file line number Diff line number Diff line change
@@ -0,0 +1,251 @@
## pmm k8s helm tests
### needs: helm, kubectl, k8s cluster with snapshotclass, default kubeconfig
## add comment #bats test_tags=bats:focus above the test to focus it

# minikube delete && \
# minikube start && \
# minikube addons disable storage-provisioner && \
# kubectl delete storageclass standard && \
# minikube addons enable csi-hostpath-driver && \
# minikube addons enable volumesnapshots && \
# kubectl patch storageclass csi-hostpath-sc -p '{"metadata": {"annotations":{"storageclass.kubernetes.io/is-default-class":"true"}}}' && \
# kubectl wait --for=condition=Ready node --timeout=90s minikube && \
# bats helm-test.bats

cleanup () {
echo "--------cleanup---------"
helm list --short | xargs helm uninstall || true
kubectl delete pod,service,statefulset,configmap,secret,serviceaccount,volumesnapshot --selector=app.kubernetes.io/name=pmm --force || true
delete_pvc || true
rm values.yaml || true
echo "------------------------"
}

setup() {
echo "Running setup"
PROJECT_ROOT=$(dirname "$BATS_TEST_FILENAME")
echo "Project root: $PROJECT_ROOT"
source "$PROJECT_ROOT/k8s_helper.sh"
source "$PROJECT_ROOT/pmm_helper.sh"
IMAGE_REPO=${IMAGE_REPO:-"perconalab/pmm-server"}
IMAGE_TAG=${IMAGE_TAG:-"3-dev-latest"}
RELEASE_REPO="percona/pmm-server"
RELEASE_TAG="3"

cleanup
}

teardown() {
echo "Running teardown"
echo "-------debug info-------"
kubectl get pods
kubectl describe pod --selector=app.kubernetes.io/name=pmm
kubectl get events --sort-by=lastTimestamp
kubectl logs --all-containers --timestamps --selector=app.kubernetes.io/name=pmm
echo "------------------------"

cleanup
}

# Helper function to trim whitespace
trim() {
local var="$*"
# remove leading whitespace characters
var="${var#"${var%%[![:space:]]*}"}"
# remove trailing whitespace characters
var="${var%"${var##*[![:space:]]}"}"
echo -n "$var"
}

# Function to update values.yaml based on the OS
update_values_yaml() {
local property=$1
local value=$2

if [[ "$OSTYPE" == "linux-gnu"* ]]; then
# Linux
sed -i "s|$property: .*|$property: \"$value\"|g" values.yaml
elif [[ "$OSTYPE" == "darwin"* ]]; then
# macOS
sed -i '' "s|$property: .*|$property: \"$value\"|g" values.yaml
else
echo "Unsupported OS: $OSTYPE"
return 1
fi
}


@test "add helm repo" {
helm repo add percona https://percona.github.io/percona-helm-charts/
}

@test "generate values.yaml" {
helm show values percona/pmm > values.yaml
}

@test "install/uninstall default chart and check connectivity" {
stop_port_forward
helm install pmm \
--set image.repository=$IMAGE_REPO \
--set image.tag=$IMAGE_TAG \
--wait \
percona/pmm

wait_for_pmm
start_port_forward

pmm_version=$(get_pmm_version)
echo "pmm_version is ${pmm_version}"

stop_port_forward
helm uninstall --wait --timeout 60s pmm
# maybe pmm uninstall has ability to kill pvcs
# add validation that there is no load balancer, stateful set and containers/pods left
delete_pvc
}

@test "install/uninstall with parameter set in cli" {
stop_port_forward
local instance_name="pmm1"
helm install $instance_name \
--set image.repository=$IMAGE_REPO \
--set image.tag=$IMAGE_TAG \
--set-string pmmEnv.PMM_ENABLE_ACCESS_CONTROL="1" \
--set service.type="NodePort" \
--wait \
percona/pmm
wait_for_pmm

start_port_forward

result=$(get_env_variable $instance_name "PMM_ENABLE_ACCESS_CONTROL")
trimmed_result=$(trim "$result")
assert_equal "$trimmed_result" "1"

pmm_version=$(get_pmm_version)
echo "pmm_version is ${pmm_version}"

stop_port_forward
# add check that pmm is working and env var was set

helm uninstall --wait --timeout 60s pmm1
delete_pvc
}

@test "install/uninstall chart with file" {
stop_port_forward
helm show values percona/pmm > values.yaml

update_values_yaml "tag" "$IMAGE_TAG"
update_values_yaml "repository" "$IMAGE_REPO"

helm install pmm2 -f values.yaml --wait percona/pmm
wait_for_pmm
start_port_forward

pmm_version=$(get_pmm_version)
echo "pmm_version is ${pmm_version}"

helm uninstall --wait --timeout 60s pmm2
delete_pvc
}

@test "install last released V3 version, upgrade to V3 and uninstall" {
stop_port_forward
helm show values percona/pmm > values.yaml

update_values_yaml "tag" "$RELEASE_TAG"
update_values_yaml "repository" "$RELEASE_REPO"

helm install pmm3 -f values.yaml --wait percona/pmm
wait_for_pmm
start_port_forward

pmm_version=$(get_pmm_version)
echo "pmm_version is ${pmm_version}"

update_values_yaml "tag" "$IMAGE_TAG"
update_values_yaml "repository" "$IMAGE_REPO"

helm upgrade pmm3 -f values.yaml --set podSecurityContext.runAsGroup=null --set podSecurityContext.fsGroup=null percona/pmm
sleep 7 # give a chance to update manifest
wait_for_pmm

pmm_version=$(get_pmm_version)

local new_ver=$(kubectl get pod --selector=app.kubernetes.io/name=pmm -o jsonpath="{.items[*].spec.containers[*].image}")

if [ "$new_ver" != "$IMAGE_REPO:$IMAGE_TAG" ]; then
echo "Unexpected version: $new_ver , should be '$IMAGE_REPO:$IMAGE_TAG'"
cat values.yaml
false
fi

stop_port_forward
helm uninstall --wait --timeout 60s pmm3
delete_pvc
}

@test "install last released V2 version, upgrade to V3 and uninstall" {
stop_port_forward
helm show values --version 1.3.0 percona/pmm > values.yaml

update_values_yaml "tag" "2.44.0"
update_values_yaml "repository" "percona/pmm-server"

helm install pmm4 --version 1.3.0 -f values.yaml --wait percona/pmm

wait_for_pmm
start_port_forward 443

admin_pass=$(get_pmm_pswd)
pmm_address=$(get_pmm_addr)

# encode pass, as it can have special characters
encoded_u_p=$(echo -n admin:${admin_pass} | base64)

echo "curl -k -H 'Authorization: Basic ...' https://"${pmm_address}"/v1/version"
# echo admin pass in case there are some issues with it
echo "pass:${admin_pass}"

run bash -c "curl -sk -H 'Authorization: Basic ${encoded_u_p}' https://${pmm_address}/v1/version | jq .version"
assert_success

# Check that the pmm_version string is not empty
if [[ -z "${output}" ]]; then
fail "pmm_version is empty"
fi

pmm_version=${output}
echo "pmm_version is ${pmm_version}"

stop_port_forward
start_port_forward

helm show values percona/pmm > values.yaml

update_values_yaml "tag" "$IMAGE_TAG"
update_values_yaml "repository" "$IMAGE_REPO"

kubectl exec pmm4-0 -- supervisorctl stop all
kubectl exec pmm4-0 -- chown -R pmm:pmm /srv

helm upgrade pmm4 -f values.yaml --set podSecurityContext.runAsGroup=null --set podSecurityContext.fsGroup=null percona/pmm
sleep 7 # give a chance to update manifest
wait_for_pmm

pmm_version=$(get_pmm_version)

local new_ver=$(kubectl get pod --selector=app.kubernetes.io/name=pmm -o jsonpath="{.items[*].spec.containers[*].image}")

if [ "$new_ver" != "$IMAGE_REPO:$IMAGE_TAG" ]; then
echo "Unexpected version: $new_ver , should be '$IMAGE_REPO:$IMAGE_TAG'"
cat values.yaml
false
fi

stop_port_forward
helm uninstall --wait --timeout 60s pmm4
delete_pvc
}

Loading