Skip to content

Commit

Permalink
Add initial integration test for dag tektoncd#168 🤓
Browse files Browse the repository at this point in the history
This tests DAG functionality by defining a pipeline with both fan in and
fan out. The idea is that each Task echoes the current time, so after
the pipeline compeletes, we can look at which Task echoes which which
time to make sure they run in order. The tasks are also declared in the
Pipeline in the wrong order on purpose, to make sure that the order of
declaration doesn't affect how they are run (the opposite of the current
functionality)

Three caveats to this integration test:
- It was created before tektoncd#320, so the resource binding is not correct
- It was created before tektoncd#387, so it relies on the log PVC which will no
longer exist (can work around this by mounting a PVC explicitly in the
test and writing directly to it instead of echoing?)
- It doesn't exercise `runAfter` functionality
  • Loading branch information
bobcatfish committed Jan 28, 2019
1 parent 559397d commit 6197df7
Show file tree
Hide file tree
Showing 3 changed files with 448 additions and 11 deletions.
20 changes: 10 additions & 10 deletions test/crd.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,15 +50,15 @@ const (
buildOutput = "Build successful"
)

func getHelloWorldValidationPod(namespace, volumeClaimName string) *corev1.Pod {
func getLogFetcherPod(namespace, volumeClaimName, podName string) *corev1.Pod {
return &corev1.Pod{
ObjectMeta: metav1.ObjectMeta{
Namespace: namespace,
Name: hwValidationPodName,
Name: podName,
},
Spec: corev1.PodSpec{
Containers: []corev1.Container{{
Name: hwValidationPodName,
Name: podName,
Image: "busybox",
Command: []string{
"cat",
Expand Down Expand Up @@ -118,29 +118,29 @@ func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun {
}
}

func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, testStr string) (string, error) {
func getBuildOutputFromVolume(logger *logging.BaseLogger, c *clients, namespace, volumeClaimName, podName string) (string, error) {
// Create Validation Pod
pods := c.KubeClient.Kube.CoreV1().Pods(namespace)

// Volume created for Task should have the same name as the Task
if _, err := pods.Create(getHelloWorldValidationPod(namespace, hwTaskRunName)); err != nil {
return "", fmt.Errorf("failed to create Validation pod to mount volume `%s`: %s", hwTaskRunName, err)
if _, err := pods.Create(getLogFetcherPod(namespace, volumeClaimName, podName)); err != nil {
return "", fmt.Errorf("failed to create Validation pod to mount volume `%s`: %s", volumeClaimName, err)
}

logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", hwTaskRunName)
if err := WaitForPodState(c, hwValidationPodName, namespace, func(p *corev1.Pod) (bool, error) {
logger.Infof("Waiting for pod with test volume %s to come up so we can read logs from it", volumeClaimName)
if err := WaitForPodState(c, podName, namespace, func(p *corev1.Pod) (bool, error) {
// the "Running" status is used as "Succeeded" caused issues as the pod succeeds and restarts quickly
// there might be a race condition here and possibly a better way of handling this, perhaps using a Job or different state validation
if p.Status.Phase == corev1.PodRunning {
return true, nil
}
return false, nil
}, "ValidationPodCompleted"); err != nil {
return "", fmt.Errorf("error waiting for Pod %s to finish: %s", hwValidationPodName, err)
return "", fmt.Errorf("error waiting for Pod %s to finish: %s", podName, err)
}

// Get validation pod logs and verify that the build executed a container w/ desired output
req := pods.GetLogs(hwValidationPodName, &corev1.PodLogOptions{})
req := pods.GetLogs(podName, &corev1.PodLogOptions{})
readCloser, err := req.Stream()
if err != nil {
return "", fmt.Errorf("failed to open stream to read: %v", err)
Expand Down
Loading

0 comments on commit 6197df7

Please sign in to comment.