Skip to content
This repository has been archived by the owner on Sep 12, 2023. It is now read-only.

add ability to use image-based installs #121

Open
wants to merge 11 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 11 additions & 7 deletions Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ node {
}
}
executeStage(stageCheckout, 'Checkout from Git')
def stageLoadConfig = {

def stageLoadConfig = {
checkThatConfigFilesExist()
environmentVariables()
}
Expand All @@ -61,9 +61,13 @@ node {
}
executeStage(stagePubAndPromote, 'publish and promote CV')

/*
* if you do kickstart installs (the default), then execute buildtestvms.sh
* if you do image-based installs, then execute buildtestvms-from-image.sh instead
*/
def stagePrepVms = {
if (params.REBUILD_VMS == true) {
executeScript("${SCRIPTS_DIR}/buildtestvms.sh")
executeScript("${SCRIPTS_DIR}/buildtestvms-from-image.sh")
} else {
executeScript("${SCRIPTS_DIR}/starttestvms.sh")
}
Expand All @@ -75,7 +79,7 @@ node {
step([$class: "TapPublisher", testResults: "test_results/*.tap", ])
}
executeStage(stageRunTests, 'run tests')

def stagePowerOff = {
if (params.POWER_OFF_VMS_AFTER_BUILD == true) {
executeScript("${SCRIPTS_DIR}/powerofftestvms.sh")
Expand All @@ -102,7 +106,7 @@ def executeStage(Closure closure, String stageName) {
currentBuild.result = 'FAILURE'
}
}
}
}
}

/**
Expand All @@ -113,7 +117,7 @@ def executeStage(Closure closure, String stageName) {
def checkThatConfigFilesExist() {
filesMissing = false
errorMessage = "The following config files are missing:"
[GENERAL_CONFIG_FILE, specificConfigFile].each { fileName ->
[GENERAL_CONFIG_FILE, specificConfigFile].each { fileName ->
if (fileExists("${fileName}") == false) {
filesMissing = true
errorMessage = errorMessage + " ${fileName}"
Expand All @@ -125,7 +129,7 @@ def checkThatConfigFilesExist() {
}

def environmentVariables() {
[GENERAL_CONFIG_FILE, specificConfigFile].each { fileName ->
[GENERAL_CONFIG_FILE, specificConfigFile].each { fileName ->
load "${fileName}"
}
}
Expand Down
118 changes: 118 additions & 0 deletions buildtestvms-from-image.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
#!/bin/bash

# Instruct Foreman to rebuild the test VMs
#
# e.g ${WORKSPACE}/scripts/buildtestvms-from-image.sh 'test'
#
# this will tell Foreman to rebuild all machines in hostgroup TESTVM_HOSTGROUP

# Load common parameter variables
. $(dirname "${0}")/common.sh

if [[ -z ${PUSH_USER} ]] || [[ -z ${SATELLITE} ]] || [[ -z ${RSA_ID} ]] \
|| [[ -z ${ORG} ]] || [[ -z ${TESTVM_HOSTCOLLECTION} ]]
then
err "Environment variable PUSH_USER, SATELLITE, RSA_ID, ORG " \
"or TESTVM_HOSTCOLLECTION not set or not found."
exit ${WORKSPACE_ERR}
fi

get_test_vm_list # populate TEST_VM_LIST

# TODO: Error out if no test VM's are available.
if [ $(echo ${#TEST_VM_LIST[@]}) -eq 0 ]; then
err "No test VMs configured in Satellite"
fi

# for each host; dump Org, Loc and HG in a file, process that

for I in "${TEST_VM_LIST[@]}"
do
SUT_TMP_INFOFILE=$(mktemp)
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host info --id $I" | awk -F '[[:space:]][[:space:]]+' '$1~/^(Name|Organi[sz]ation|Host Group|Location)/ {print $1,$2}' > ${SUT_TMP_INFOFILE}
SUT_NAME=$(awk -F ': ' '$1~/^Name/ {print $2}' ${SUT_TMP_INFOFILE})
SUT_ORG=$(awk -F ': ' '$1~/^Organi[sz]ation/ {print $2}' ${SUT_TMP_INFOFILE})
SUT_HG_TITLE=$(awk -F ': ' '$1~/^Host Group/ {print $2}' ${SUT_TMP_INFOFILE})
SUT_LOC=$(awk -F ': ' '$1~/^Location/ {print $2}' ${SUT_TMP_INFOFILE})
rm -I ${SUT_TMP_INFOFILE}

inform "Deleting VM ID $I"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host delete --id $I"

inform "Recreating VM ID $I"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host create \
--name \"${SUT_NAME}\" \
--organization \"${SUT_ORG}\" \
--location \"${SUT_LOC}\" \
--hostgroup-title \"${SUT_HG_TITLE}\" \
--provision-method image \
--enabled true \
--managed true \
--compute-attributes=\"start=1\""
done


# we need to wait until all the test machines have been rebuilt by foreman
# this check was previously only in pushtests, but when using pipelines
# it's more sensible to wait here while the machines are in build mode
# the ping and ssh checks must remain in pushtests.sh
# as a pupet only build will not call this script

declare -A vmcopy # declare an associative array to copy our VM array into
for I in "${TEST_VM_LIST[@]}"; do vmcopy[$I]=$I; done

# the below test from the kickstart based installs will also work for image based installs.
# the status of Build: still changes
#[root@satellite ~]# hammer host info --name kvm-test2.sattest.pcfe.net | grep -e "Managed" -e "Enabled" -e "Build"
#Managed: yes
# Build Status: Pending installation
# Build: yes
# Enabled: yes
#[root@satellite ~]# hammer host info --name kvm-test2.sattest.pcfe.net | grep -e "Managed" -e "Enabled" -e "Build"
#Managed: yes
# Build Status: Installed
# Build: no
# Enabled: yes
#
# But potentially also check for
# Build Status: Pending installation
# changing to
# Build Status: Installed

WAIT=0
while [[ ${#vmcopy[@]} -gt 0 ]]
do
inform "Waiting 1 minute"
sleep 60
((WAIT+=60))
for I in "${vmcopy[@]}"
do
inform "Checking if host $I is in build mode."
status=$(ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host info --name $I | \
grep -e \"Managed.*yes\" -e \"Enabled.*yes\" -e \"Build.*no\" \
| wc -l")
# Check if status is OK, then the SUT will have left build mode
if [[ ${status} == 3 ]]
then
tell "host $I no longer in build mode."
unset vmcopy[$I]
# reboot the box here so that new kernel is active
# this is only necessay on image based installs
tell "rebooting host $I since it applied errata as part of cloud-init and we want latest kernel and glibc active"
ssh -q -l ${PUSH_USER} -i ${RSA_ID} ${SATELLITE} \
"hammer host reboot --name $I"
else
tell "host $I is still in build mode."
fi
done
if [[ ${WAIT} -gt 6000 ]]
then
err "At least one host still in build mode after 6000 seconds. Exiting."
exit 1
fi
done

7 changes: 6 additions & 1 deletion pushtests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,12 @@ do
# if the repolist does not contain whet you expect, switch off auto-attach on the used activation-key
inform "Listing repos on test server $I"
ssh -o StrictHostKeyChecking=no -i ${RSA_ID} root@$I "subscription-manager repos"


# force upload of package list to Satellite
# without this an image-based install shows up as katello-agent not installed in Satellite 6.5.3
# see https://access.redhat.com/solutions/1385683
ssh -o StrictHostKeyChecking=no -i ${RSA_ID} root@$I "katello-package-upload --force"

# copy puppet-done-test.sh to SUT
scp -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i ${RSA_ID} \
${WORKSPACE}/scripts/puppet-done-test.sh root@$I:
Expand Down
2 changes: 1 addition & 1 deletion rhel-7-script-env-vars-puppet-only.groovy
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
env.REPO_ID=""
env.PUPPET_REPO_ID="2369"
env.PUPPET_REPO_ID="9"
env.TESTVM_HOSTCOLLECTION="Test Servers Jenkins pipeline"
env.PUPPET_REPO="/var/www/html/pub/soe-puppet-only"
env.CV="cv-puppet-only"
Expand Down
6 changes: 3 additions & 3 deletions rhel-7-script-env-vars-rpm.groovy
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
//this is for RHEL7 only as we build packages and shove them in a RHEL7 only yum repo
env.REPO_ID="70"
env.PUPPET_REPO_ID="71"
env.TESTVM_HOSTCOLLECTION="Test Servers Jenkins pipeline"
env.REPO_ID="10"
env.PUPPET_REPO_ID="8"
env.TESTVM_HOSTCOLLECTION="Test Servers installed from image"
env.YUM_REPO="/var/www/html/pub/soe-repo/rhel7"
env.PUPPET_REPO="/var/www/html/pub/soe-puppet"
env.CV="cv-RHEL-Server-Jenkins-pipeline"
Expand Down