From 4a405ace39165607125112c4c0d61b8e267a9656 Mon Sep 17 00:00:00 2001 From: Vincent Demeester Date: Mon, 23 Mar 2020 17:14:17 +0100 Subject: [PATCH] =?UTF-8?q?Introduce=20v1beta1=20e2e=20go=20tests=20?= =?UTF-8?q?=F0=9F=93=A5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit … and move v1alpha1 e2e go tests in their own package. Signed-off-by: Vincent Demeester --- pkg/reconciler/pipelinerun/cancel_test.go | 2 +- .../pipelinerun/pipelinerun_test.go | 2 +- pkg/reconciler/reconciler_test.go | 2 +- pkg/reconciler/taskrun/cancel_test.go | 2 +- pkg/reconciler/taskrun/taskrun_test.go | 2 +- pkg/reconciler/timeout_handler_test.go | 2 +- test/artifact_bucket_test.go | 201 +++++-- test/cancel_test.go | 42 +- test/clients.go | 17 +- test/cluster_resource_test.go | 88 ++- test/controller.go | 42 +- test/dag_test.go | 164 +++-- test/duplicate_test.go | 21 +- test/e2e-tests.sh | 2 +- test/embed_test.go | 55 +- test/entrypoint_test.go | 30 +- test/git_checkout_test.go | 65 +- test/helm_task_test.go | 210 +++++-- test/kaniko_task_test.go | 83 ++- test/pipelinerun_test.go | 372 ++++++++---- test/retry_test.go | 15 +- test/sidecar_test.go | 41 +- test/start_time_test.go | 11 +- test/status_test.go | 48 +- test/taskrun_test.go | 75 ++- test/timeout_test.go | 137 +++-- test/v1alpha1/adoc.go | 44 ++ test/v1alpha1/artifact_bucket_test.go | 266 +++++++++ test/v1alpha1/build_logs.go | 70 +++ test/v1alpha1/cancel_test.go | 155 +++++ test/v1alpha1/clients.go | 96 +++ test/v1alpha1/cluster_resource_test.go | 159 +++++ test/v1alpha1/controller.go | 178 ++++++ test/v1alpha1/dag_test.go | 186 ++++++ test/v1alpha1/duplicate_test.go | 77 +++ test/v1alpha1/embed_test.go | 90 +++ test/v1alpha1/entrypoint_test.go | 64 ++ test/v1alpha1/git_checkout_test.go | 284 +++++++++ test/v1alpha1/helm_task_test.go | 359 +++++++++++ test/v1alpha1/init_test.go | 213 +++++++ test/v1alpha1/kaniko_task_test.go | 209 +++++++ test/v1alpha1/ko_test.go | 53 ++ test/v1alpha1/pipelinerun_test.go | 558 ++++++++++++++++++ test/v1alpha1/registry_test.go | 93 +++ test/v1alpha1/retry_test.go | 141 +++++ test/v1alpha1/secret.go | 63 ++ test/v1alpha1/sidecar_test.go | 164 +++++ test/v1alpha1/start_time_test.go | 93 +++ test/v1alpha1/status_test.go | 74 +++ test/v1alpha1/taskrun_test.go | 167 ++++++ test/v1alpha1/timeout_test.go | 273 +++++++++ test/v1alpha1/wait.go | 266 +++++++++ test/v1alpha1/wait_example_test.go | 69 +++ test/{ => v1alpha1}/wait_test.go | 0 test/v1alpha1/workingdir_test.go | 132 +++++ test/v1alpha1/workspace_test.go | 168 ++++++ test/workingdir_test.go | 48 +- test/workspace_test.go | 146 +++-- 58 files changed, 6087 insertions(+), 602 deletions(-) create mode 100644 test/v1alpha1/adoc.go create mode 100644 test/v1alpha1/artifact_bucket_test.go create mode 100644 test/v1alpha1/build_logs.go create mode 100644 test/v1alpha1/cancel_test.go create mode 100644 test/v1alpha1/clients.go create mode 100644 test/v1alpha1/cluster_resource_test.go create mode 100644 test/v1alpha1/controller.go create mode 100644 test/v1alpha1/dag_test.go create mode 100644 test/v1alpha1/duplicate_test.go create mode 100644 test/v1alpha1/embed_test.go create mode 100644 test/v1alpha1/entrypoint_test.go create mode 100644 test/v1alpha1/git_checkout_test.go create mode 100644 test/v1alpha1/helm_task_test.go create mode 100644 test/v1alpha1/init_test.go create mode 100644 test/v1alpha1/kaniko_task_test.go create mode 100644 test/v1alpha1/ko_test.go create mode 100644 test/v1alpha1/pipelinerun_test.go create mode 100644 test/v1alpha1/registry_test.go create mode 100644 test/v1alpha1/retry_test.go create mode 100644 test/v1alpha1/secret.go create mode 100644 test/v1alpha1/sidecar_test.go create mode 100644 test/v1alpha1/start_time_test.go create mode 100644 test/v1alpha1/status_test.go create mode 100644 test/v1alpha1/taskrun_test.go create mode 100644 test/v1alpha1/timeout_test.go create mode 100644 test/v1alpha1/wait.go create mode 100644 test/v1alpha1/wait_example_test.go rename test/{ => v1alpha1}/wait_test.go (100%) create mode 100644 test/v1alpha1/workingdir_test.go create mode 100644 test/v1alpha1/workspace_test.go diff --git a/pkg/reconciler/pipelinerun/cancel_test.go b/pkg/reconciler/pipelinerun/cancel_test.go index a0f5e0addf9..6e94e4e81e4 100644 --- a/pkg/reconciler/pipelinerun/cancel_test.go +++ b/pkg/reconciler/pipelinerun/cancel_test.go @@ -23,8 +23,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" + test "github.com/tektoncd/pipeline/test/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" ) diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 1728c0efc7c..1108c09cf29 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -32,9 +32,9 @@ import ( taskrunresources "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/system" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" "github.com/tektoncd/pipeline/test/names" + test "github.com/tektoncd/pipeline/test/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ktesting "k8s.io/client-go/testing" diff --git a/pkg/reconciler/reconciler_test.go b/pkg/reconciler/reconciler_test.go index f44b3ff97b3..36f5908c880 100644 --- a/pkg/reconciler/reconciler_test.go +++ b/pkg/reconciler/reconciler_test.go @@ -26,8 +26,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" + test "github.com/tektoncd/pipeline/test/v1alpha1" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" diff --git a/pkg/reconciler/taskrun/cancel_test.go b/pkg/reconciler/taskrun/cancel_test.go index 6dcdad01d2b..7480d0ddf64 100644 --- a/pkg/reconciler/taskrun/cancel_test.go +++ b/pkg/reconciler/taskrun/cancel_test.go @@ -23,8 +23,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" + test "github.com/tektoncd/pipeline/test/v1alpha1" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" diff --git a/pkg/reconciler/taskrun/taskrun_test.go b/pkg/reconciler/taskrun/taskrun_test.go index 1bb34e7d610..8b5f38095c9 100644 --- a/pkg/reconciler/taskrun/taskrun_test.go +++ b/pkg/reconciler/taskrun/taskrun_test.go @@ -33,9 +33,9 @@ import ( "github.com/tektoncd/pipeline/pkg/reconciler/taskrun/resources/cloudevent" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" "github.com/tektoncd/pipeline/pkg/system" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" "github.com/tektoncd/pipeline/test/names" + test "github.com/tektoncd/pipeline/test/v1alpha1" corev1 "k8s.io/api/core/v1" k8sapierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" diff --git a/pkg/reconciler/timeout_handler_test.go b/pkg/reconciler/timeout_handler_test.go index 76dfaed429a..77f2cc7855b 100644 --- a/pkg/reconciler/timeout_handler_test.go +++ b/pkg/reconciler/timeout_handler_test.go @@ -25,8 +25,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/config" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" ttesting "github.com/tektoncd/pipeline/pkg/reconciler/testing" - "github.com/tektoncd/pipeline/test" tb "github.com/tektoncd/pipeline/test/builder" + test "github.com/tektoncd/pipeline/test/v1alpha1" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go index 0fc1a3338b1..3ffaad85d3b 100644 --- a/test/artifact_bucket_test.go +++ b/test/artifact_bucket_test.go @@ -26,6 +26,8 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" @@ -66,28 +68,44 @@ func TestStorageBucketPipelineRun(t *testing.T) { defer deleteBucketSecret(c, t, namespace) t.Logf("Creating GCS bucket %s", bucketName) - createbuckettask := tb.Task("createbuckettask", namespace, tb.TaskSpec( - tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: bucketSecretName, - }, - })), - tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)), - tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), - tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), - ), - ), - ) + createbuckettask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "createbuckettask", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "step1", + Image: "google/cloud-sdk:alpine", + Command: []string{"/bin/bash"}, + Args: []string{"-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "bucket-secret-volume", + MountPath: fmt.Sprintf("/var/secret/%s", bucketSecretName), + }}, + Env: []corev1.EnvVar{{ + Name: "CREDENTIALS", Value: fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey), + }}, + }}}, + Volumes: []corev1.Volume{{ + Name: "bucket-secret-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + }, + }}, + }, + } t.Logf("Creating Task %s", "createbuckettask") if _, err := c.TaskClient.Create(createbuckettask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "createbuckettask", err) } - createbuckettaskrun := tb.TaskRun("createbuckettaskrun", namespace, - tb.TaskRunSpec(tb.TaskRunTaskRef("createbuckettask"))) + createbuckettaskrun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: "createbuckettaskrun", Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "createbuckettask"}, + }, + } t.Logf("Creating TaskRun %s", "createbuckettaskrun") if _, err := c.TaskRunClient.Create(createbuckettaskrun); err != nil { @@ -129,48 +147,97 @@ func TestStorageBucketPipelineRun(t *testing.T) { } t.Logf("Creating Task %s", addFileTaskName) - addFileTask := tb.Task(addFileTaskName, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), - tb.TaskOutputs(tb.OutputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), - tb.Step("ubuntu", tb.StepName("addfile"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "'#!/bin/bash\necho hello' > /workspace/helloworldgit/newfile"), - ), - tb.Step("ubuntu", tb.StepName("make-executable"), tb.StepCommand("chmod"), - tb.StepArgs("+x", "/workspace/helloworldgit/newfile")), - )) + addFileTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: addFileTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{ + Name: "addfile", Image: "ubuntu", + }, + Script: "echo '#!/bin/bash\necho hello' > /workspace/helloworldgit/newfile", + }, { + Container: corev1.Container{ + Name: "make-executable", Image: "ubuntu", + }, + Script: "chmod +x /workspace/helloworldgit/newfile", + }}, + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: helloworldResourceName, Type: resourcev1alpha1.PipelineResourceTypeGit, + }}}, + Outputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: helloworldResourceName, Type: resourcev1alpha1.PipelineResourceTypeGit, + }}}, + }, + }, + } if _, err := c.TaskClient.Create(addFileTask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) } t.Logf("Creating Task %s", runFileTaskName) - readFileTask := tb.Task(runFileTaskName, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), - tb.Step("ubuntu", tb.StepName("runfile"), tb.StepCommand("/workspace/helloworld/newfile")), - )) + readFileTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: runFileTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "runfile", Image: "ubuntu", + Command: []string{"/workspace/hellowrld/newfile"}, + }}}, + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: helloworldResourceName, Type: resourcev1alpha1.PipelineResourceTypeGit, + }}}, + }, + }, + } if _, err := c.TaskClient.Create(readFileTask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", runFileTaskName, err) } t.Logf("Creating Pipeline %s", bucketTestPipelineName) - bucketTestPipeline := tb.Pipeline(bucketTestPipelineName, namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("source-repo", "git"), - tb.PipelineTask("addfile", addFileTaskName, - tb.PipelineTaskInputResource("helloworldgit", "source-repo"), - tb.PipelineTaskOutputResource("helloworldgit", "source-repo"), - ), - tb.PipelineTask("runfile", runFileTaskName, - tb.PipelineTaskInputResource("helloworldgit", "source-repo", tb.From("addfile")), - ), - )) + bucketTestPipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: bucketTestPipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "source-repo", Type: resourcev1alpha1.PipelineResourceTypeGit, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "addfile", + TaskRef: &v1beta1.TaskRef{Name: addFileTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "helloworldgit", Resource: "source-repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "helloworldgit", Resource: "source-rep", + }}, + }, + }, { + Name: "runfile", + TaskRef: &v1beta1.TaskRef{Name: runFileTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "helloworldgit", Resource: "source-repo", + }}, + }, + }}, + }, + } if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) } t.Logf("Creating PipelineRun %s", bucketTestPipelineRunName) - bucketTestPipelineRun := tb.PipelineRun(bucketTestPipelineRunName, namespace, tb.PipelineRunSpec( - bucketTestPipelineName, - tb.PipelineRunResourceBinding("source-repo", tb.PipelineResourceBindingRef(helloworldResourceName)), - )) + bucketTestPipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: bucketTestPipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: bucketTestPipelineName}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "source-repo", + ResourceRef: &v1beta1.PipelineResourceRef{Name: helloworldResourceName}, + }}, + }, + } if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) } @@ -232,28 +299,44 @@ func resetConfigMap(t *testing.T, c *clients, namespace, configName string, valu } func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { - deletelbuckettask := tb.Task("deletelbuckettask", namespace, tb.TaskSpec( - tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: bucketSecretName, - }, - })), - tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)), - tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), - tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), - ), - ), - ) + deletelbuckettask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "deletelbuckettask", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "step1", + Image: "google/cloud-sdk:alpine", + Command: []string{"/bin/bash"}, + Args: []string{"-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "bucket-secret-volume", + MountPath: fmt.Sprintf("/var/secret/%s", bucketSecretName), + }}, + Env: []corev1.EnvVar{{ + Name: "CREDENTIALS", Value: fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey), + }}, + }}}, + Volumes: []corev1.Volume{{ + Name: "bucket-secret-volume", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + }, + }}, + }, + } t.Logf("Creating Task %s", "deletelbuckettask") if _, err := c.TaskClient.Create(deletelbuckettask); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "deletelbuckettask", err) } - deletelbuckettaskrun := tb.TaskRun("deletelbuckettaskrun", namespace, - tb.TaskRunSpec(tb.TaskRunTaskRef("deletelbuckettask"))) + deletelbuckettaskrun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: "deletelbuckettaskrun", Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "deletelbuckettask"}, + }, + } t.Logf("Creating TaskRun %s", "deletelbuckettaskrun") if _, err := c.TaskRunClient.Create(deletelbuckettaskrun); err != nil { diff --git a/test/cancel_test.go b/test/cancel_test.go index 0d029885d2e..ab937329c6b 100644 --- a/test/cancel_test.go +++ b/test/cancel_test.go @@ -22,8 +22,8 @@ import ( "sync" "testing" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -52,9 +52,12 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { for _, tdd := range tds { t.Run(tdd.name, func(t *testing.T) { tdd := tdd - var pipelineTask = tb.PipelineTask("foo", "banana") + pipelineTask := v1beta1.PipelineTask{ + Name: "foo", + TaskRef: &v1beta1.TaskRef{Name: "banana"}, + } if tdd.retries { - pipelineTask = tb.PipelineTask("foo", "banana", tb.Retries(1)) + pipelineTask.Retries = 1 } c, namespace := setup(t) @@ -64,22 +67,37 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { defer tearDown(t, c, namespace) t.Logf("Creating Task in namespace %s", namespace) - task := tb.Task("banana", namespace, tb.TaskSpec( - tb.Step("ubuntu", tb.StepCommand("/bin/bash"), tb.StepArgs("-c", "sleep 5000")), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "banana", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "sleep 5000"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `banana`: %s", err) } t.Logf("Creating Pipeline in namespace %s", namespace) - pipeline := tb.Pipeline("tomatoes", namespace, - tb.PipelineSpec(pipelineTask), - ) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "tomatoes", Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{pipelineTask}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) } - pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name)) + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "pear", Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipeline.Name}, + }, + } t.Logf("Creating PipelineRun in namespace %s", namespace) if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { @@ -117,7 +135,7 @@ func TestTaskRunPipelineRunCancel(t *testing.T) { t.Fatalf("Failed to get PipelineRun `%s`: %s", "pear", err) } - pr.Spec.Status = v1alpha1.PipelineRunSpecStatusCancelled + pr.Spec.Status = v1beta1.PipelineRunSpecStatusCancelled if _, err := c.PipelineRunClient.Update(pr); err != nil { t.Fatalf("Failed to cancel PipelineRun `%s`: %s", "pear", err) } diff --git a/test/clients.go b/test/clients.go index 3d4b9efafea..56822302a80 100644 --- a/test/clients.go +++ b/test/clients.go @@ -43,6 +43,7 @@ import ( "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" resourceversioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned" resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1" knativetest "knative.dev/pkg/test" @@ -52,10 +53,10 @@ import ( type clients struct { KubeClient *knativetest.KubeClient - PipelineClient v1alpha1.PipelineInterface - TaskClient v1alpha1.TaskInterface - TaskRunClient v1alpha1.TaskRunInterface - PipelineRunClient v1alpha1.PipelineRunInterface + PipelineClient v1beta1.PipelineInterface + TaskClient v1beta1.TaskInterface + TaskRunClient v1beta1.TaskRunInterface + PipelineRunClient v1beta1.PipelineRunInterface PipelineResourceClient resourcev1alpha1.PipelineResourceInterface ConditionClient v1alpha1.ConditionInterface } @@ -86,10 +87,10 @@ func newClients(t *testing.T, configPath, clusterName, namespace string) *client if err != nil { t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) } - c.PipelineClient = cs.TektonV1alpha1().Pipelines(namespace) - c.TaskClient = cs.TektonV1alpha1().Tasks(namespace) - c.TaskRunClient = cs.TektonV1alpha1().TaskRuns(namespace) - c.PipelineRunClient = cs.TektonV1alpha1().PipelineRuns(namespace) + c.PipelineClient = cs.TektonV1beta1().Pipelines(namespace) + c.TaskClient = cs.TektonV1beta1().Tasks(namespace) + c.TaskRunClient = cs.TektonV1beta1().TaskRuns(namespace) + c.PipelineRunClient = cs.TektonV1beta1().PipelineRuns(namespace) c.PipelineResourceClient = rcs.TektonV1alpha1().PipelineResources(namespace) c.ConditionClient = cs.TektonV1alpha1().Conditions(namespace) return c diff --git a/test/cluster_resource_test.go b/test/cluster_resource_test.go index a35e2306b51..130d081688e 100644 --- a/test/cluster_resource_test.go +++ b/test/cluster_resource_test.go @@ -22,6 +22,8 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resources "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -97,35 +99,69 @@ func getClusterResourceTaskSecret(namespace, name string) *corev1.Secret { } } -func getClusterResourceTask(namespace, name, configName string) *v1alpha1.Task { - return tb.Task(name, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("target-cluster", v1alpha1.PipelineResourceTypeCluster)), - tb.TaskVolume("config-vol", tb.VolumeSource(corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: configName, - }, +func getClusterResourceTask(namespace, name, configName string) *v1beta1.Task { + return &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "target-cluster", + Type: resources.PipelineResourceTypeCluster, + }}}, }, - })), - tb.Step("ubuntu", tb.StepName("check-file-existence"), - tb.StepCommand("cat"), tb.StepArgs("/workspace/helloworld-cluster/kubeconfig"), - ), - tb.Step("ubuntu", tb.StepName("check-config-data"), - tb.StepCommand("cat"), tb.StepArgs("/config/test.data"), - tb.StepVolumeMount("config-vol", "/config"), - ), - tb.Step("ubuntu", tb.StepName("check-contents"), - tb.StepCommand("bash"), tb.StepArgs("-c", "cmp -b /workspace/helloworld-cluster/kubeconfig /config/test.data"), - tb.StepVolumeMount("config-vol", "/config"), - ), - )) + Volumes: []corev1.Volume{{ + Name: "config-vol", + VolumeSource: corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configName, + }, + }, + }, + }}, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "check-file-existence", + Image: "ubuntu", + Command: []string{"cat"}, + Args: []string{"/workspace/helloworld-cluster/kubeconfig"}, + }}, {Container: corev1.Container{ + Name: "check-config-data", + Image: "ubuntu", + Command: []string{"cat"}, + Args: []string{"/config/test.data"}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "config-vol", + MountPath: "/config", + }}, + }}, {Container: corev1.Container{ + Name: "check-contents", + Image: "ubuntu", + Command: []string{"bash"}, + Args: []string{"-c", "cmp -b /workspace/helloworld-cluster/kubeconfig /config/test.data"}, + VolumeMounts: []corev1.VolumeMount{{ + Name: "config-vol", + MountPath: "/config", + }}, + }}}, + }, + } } -func getClusterResourceTaskRun(namespace, name, taskName, resName string) *v1alpha1.TaskRun { - return tb.TaskRun(name, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(taskName), - tb.TaskRunInputs(tb.TaskRunInputsResource("target-cluster", tb.TaskResourceBindingRef(resName))), - )) +func getClusterResourceTaskRun(namespace, name, taskName, resName string) *v1beta1.TaskRun { + return &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: name, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: taskName}, + Resources: &v1beta1.TaskRunResources{ + Inputs: []v1beta1.TaskResourceBinding{{ + PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "target-cluster", + ResourceRef: &v1beta1.PipelineResourceRef{Name: resName}, + }, + }}, + }, + }, + } } func getClusterConfigMap(namespace, name string) *corev1.ConfigMap { diff --git a/test/controller.go b/test/controller.go index e02c848b049..a229b5704b3 100644 --- a/test/controller.go +++ b/test/controller.go @@ -22,15 +22,17 @@ import ( // Link in the fakes so they get injected into injection.Fake "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + informersv1beta1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1beta1" fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" - fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake" fakeconditioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake" - fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake" - fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake" - faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake" - faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake" + fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/clustertask/fake" + fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipeline/fake" + fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/pipelinerun/fake" + faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/task/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1beta1/taskrun/fake" fakeresourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake" resourceinformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1" fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" @@ -46,11 +48,11 @@ import ( // Data represents the desired state of the system (i.e. existing resources) to seed controllers // with. type Data struct { - PipelineRuns []*v1alpha1.PipelineRun - Pipelines []*v1alpha1.Pipeline - TaskRuns []*v1alpha1.TaskRun - Tasks []*v1alpha1.Task - ClusterTasks []*v1alpha1.ClusterTask + PipelineRuns []*v1beta1.PipelineRun + Pipelines []*v1beta1.Pipeline + TaskRuns []*v1beta1.TaskRun + Tasks []*v1beta1.Task + ClusterTasks []*v1beta1.ClusterTask PipelineResources []*v1alpha1.PipelineResource Conditions []*v1alpha1.Condition Pods []*corev1.Pod @@ -66,11 +68,11 @@ type Clients struct { // Informers holds references to informers which are useful for reconciler tests. type Informers struct { - PipelineRun informersv1alpha1.PipelineRunInformer - Pipeline informersv1alpha1.PipelineInformer - TaskRun informersv1alpha1.TaskRunInformer - Task informersv1alpha1.TaskInformer - ClusterTask informersv1alpha1.ClusterTaskInformer + PipelineRun informersv1beta1.PipelineRunInformer + Pipeline informersv1beta1.PipelineInformer + TaskRun informersv1beta1.TaskRunInformer + Task informersv1beta1.TaskInformer + ClusterTask informersv1beta1.ClusterTaskInformer PipelineResource resourceinformersv1alpha1.PipelineResourceInformer Condition informersv1alpha1.ConditionInformer Pod coreinformers.PodInformer @@ -107,7 +109,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.PipelineRun.Informer().GetIndexer().Add(pr); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + if _, err := c.Pipeline.TektonV1beta1().PipelineRuns(pr.Namespace).Create(pr); err != nil { t.Fatal(err) } } @@ -115,7 +117,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Pipeline.Informer().GetIndexer().Add(p); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(p); err != nil { + if _, err := c.Pipeline.TektonV1beta1().Pipelines(p.Namespace).Create(p); err != nil { t.Fatal(err) } } @@ -123,7 +125,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.TaskRun.Informer().GetIndexer().Add(tr); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(tr); err != nil { + if _, err := c.Pipeline.TektonV1beta1().TaskRuns(tr.Namespace).Create(tr); err != nil { t.Fatal(err) } } @@ -131,7 +133,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.Task.Informer().GetIndexer().Add(ta); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ta); err != nil { + if _, err := c.Pipeline.TektonV1beta1().Tasks(ta.Namespace).Create(ta); err != nil { t.Fatal(err) } } @@ -139,7 +141,7 @@ func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers if err := i.ClusterTask.Informer().GetIndexer().Add(ct); err != nil { t.Fatal(err) } - if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ct); err != nil { + if _, err := c.Pipeline.TektonV1beta1().ClusterTasks().Create(ct); err != nil { t.Fatal(err) } } diff --git a/test/dag_test.go b/test/dag_test.go index 0e23e2d34af..2b1a11ea8c6 100644 --- a/test/dag_test.go +++ b/test/dag_test.go @@ -25,8 +25,11 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resources "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1beta1" tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -49,15 +52,31 @@ func TestDAGPipelineRun(t *testing.T) { defer tearDown(t, c, namespace) // Create the Task that echoes text - echoTask := tb.Task("echo-task", namespace, tb.TaskSpec( - tb.TaskInputs( - tb.InputsResource("repo", v1alpha1.PipelineResourceTypeGit), - tb.InputsParamSpec("text", v1alpha1.ParamTypeString, tb.ParamSpecDescription("The text that should be echoed")), - ), - tb.TaskOutputs(tb.OutputsResource("repo", v1alpha1.PipelineResourceTypeGit)), - tb.Step("busybox", tb.StepCommand("echo"), tb.StepArgs("$(inputs.params.text)")), - tb.Step("busybox", tb.StepCommand("ln"), tb.StepArgs("-s", "$(inputs.resources.repo.path)", "$(outputs.resources.repo.path)")), - )) + repoTaskResource := v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "repo", Type: resources.PipelineResourceTypeGit, + }} + echoTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "echo-task", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{repoTaskResource}, + Outputs: []v1beta1.TaskResource{repoTaskResource}, + }, + Params: []v1beta1.ParamSpec{{ + Name: "text", Type: v1beta1.ParamTypeString, + Description: "The text that should be echoed", + }}, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"echo"}, + Args: []string{"$(params.text)"}, + }}, {Container: corev1.Container{ + Image: "busybox", + Command: []string{"ln"}, + Args: []string{"-s", "$(resources.inputs.repo.path)", "$(resources.outputs.repo.path)"}, + }}}, + }, + } if _, err := c.TaskClient.Create(echoTask); err != nil { t.Fatalf("Failed to create echo Task: %s", err) } @@ -73,41 +92,102 @@ func TestDAGPipelineRun(t *testing.T) { // Intentionally declaring Tasks in a mixed up order to ensure the order // of execution isn't at all dependent on the order they are declared in - pipeline := tb.Pipeline("dag-pipeline", namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("repo", "git"), - tb.PipelineTask("pipeline-task-3", "echo-task", - tb.PipelineTaskInputResource("repo", "repo", tb.From("pipeline-task-2-parallel-1", "pipeline-task-2-parallel-2")), - tb.PipelineTaskOutputResource("repo", "repo"), - tb.PipelineTaskParam("text", "wow"), - ), - tb.PipelineTask("pipeline-task-2-parallel-2", "echo-task", - tb.PipelineTaskInputResource("repo", "repo", tb.From("pipeline-task-1")), tb.PipelineTaskOutputResource("repo", "repo"), - tb.PipelineTaskOutputResource("repo", "repo"), - tb.PipelineTaskParam("text", "such parallel"), - ), - tb.PipelineTask("pipeline-task-4", "echo-task", - tb.RunAfter("pipeline-task-3"), - tb.PipelineTaskInputResource("repo", "repo"), - tb.PipelineTaskOutputResource("repo", "repo"), - tb.PipelineTaskParam("text", "very cloud native"), - ), - tb.PipelineTask("pipeline-task-2-parallel-1", "echo-task", - tb.PipelineTaskInputResource("repo", "repo", tb.From("pipeline-task-1")), - tb.PipelineTaskOutputResource("repo", "repo"), - tb.PipelineTaskParam("text", "much graph"), - ), - tb.PipelineTask("pipeline-task-1", "echo-task", - tb.PipelineTaskInputResource("repo", "repo"), - tb.PipelineTaskOutputResource("repo", "repo"), - tb.PipelineTaskParam("text", "how to ci/cd?"), - ), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "dag-pipeline", Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "repo", Type: resources.PipelineResourceTypeGit, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "pipeline-task-3", + TaskRef: &v1beta1.TaskRef{Name: "echo-task"}, + Params: []v1beta1.Param{{ + Name: "text", Value: v1beta1.NewArrayOrString("wow"), + }}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "repo", Resource: "repo", + From: []string{"pipeline-task-2-parallel-1", "pipeline-task-2-parallel-2"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "repo", Resource: "repo", + }}, + }, + }, { + Name: "pipeline-task-2-parallel-2", + TaskRef: &v1beta1.TaskRef{Name: "echo-task"}, + Params: []v1beta1.Param{{ + Name: "text", Value: v1beta1.NewArrayOrString("such parallel"), + }}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "repo", Resource: "repo", + From: []string{"pipeline-task-1"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "repo", Resource: "repo", + }}, + }, + }, { + Name: "pipeline-task-4", + TaskRef: &v1beta1.TaskRef{Name: "echo-task"}, + Params: []v1beta1.Param{{ + Name: "text", Value: v1beta1.NewArrayOrString("very cloud native"), + }}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "repo", Resource: "repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "repo", Resource: "repo", + }}, + }, + RunAfter: []string{"pipeline-task-3"}, + }, { + Name: "pipeline-task-2-parallel-1", + TaskRef: &v1beta1.TaskRef{Name: "echo-task"}, + Params: []v1beta1.Param{{ + Name: "text", Value: v1beta1.NewArrayOrString("much graph"), + }}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "repo", Resource: "repo", + From: []string{"pipeline-task-1"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "repo", Resource: "repo", + }}, + }, + }, { + Name: "pipeline-task-1", + TaskRef: &v1beta1.TaskRef{Name: "echo-task"}, + Params: []v1beta1.Param{{ + Name: "text", Value: v1beta1.NewArrayOrString("how to ci/cd?"), + }}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "repo", Resource: "repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "repo", Resource: "repo", + }}, + }, + }}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create dag-pipeline: %s", err) } - pipelineRun := tb.PipelineRun("dag-pipeline-run", namespace, tb.PipelineRunSpec("dag-pipeline", - tb.PipelineRunResourceBinding("repo", tb.PipelineResourceBindingRef("repo")), - )) + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "dag-pipeline-run", Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: "dag-pipeline"}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "repo", + ResourceRef: &v1beta1.PipelineResourceRef{Name: "repo"}, + }}, + }, + } if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { t.Fatalf("Failed to create dag-pipeline-run PipelineRun: %s", err) } diff --git a/test/duplicate_test.go b/test/duplicate_test.go index 083dbf5cba1..fd19ba9d183 100644 --- a/test/duplicate_test.go +++ b/test/duplicate_test.go @@ -24,7 +24,8 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -43,12 +44,18 @@ func TestDuplicatePodTaskRun(t *testing.T) { taskrunName := fmt.Sprintf("duplicate-pod-taskrun-%d", i) t.Logf("Creating taskrun %q.", taskrunName) - taskrun := tb.TaskRun(taskrunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskSpec(tb.Step("busybox", - tb.StepCommand("/bin/echo"), - tb.StepArgs("simple"), - )), - )) + taskrun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: taskrunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskSpec: &v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"/bin/echo"}, + Args: []string{"simple"}, + }}}, + }, + }, + } if _, err := c.TaskRunClient.Create(taskrun); err != nil { t.Fatalf("Error creating taskrun: %v", err) } diff --git a/test/e2e-tests.sh b/test/e2e-tests.sh index e9ef0e877f6..c13f01308ae 100755 --- a/test/e2e-tests.sh +++ b/test/e2e-tests.sh @@ -31,7 +31,7 @@ failed=0 # Run the integration tests header "Running Go e2e tests" -go_test_e2e -timeout=20m ./test || failed=1 +go_test_e2e -timeout=20m ./test/... || failed=1 # Run these _after_ the integration tests b/c they don't quite work all the way # and they cause a lot of noise in the logs, making it harder to debug integration diff --git a/test/embed_test.go b/test/embed_test.go index acd3af16926..310ad4c1332 100644 --- a/test/embed_test.go +++ b/test/embed_test.go @@ -23,7 +23,10 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resources "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -61,30 +64,44 @@ func TestTaskRun_EmbeddedResource(t *testing.T) { // completion of the TaskRun means the TaskRun did what it was intended. } -func getEmbeddedTask(namespace string, args []string) *v1alpha1.Task { - return tb.Task(embedTaskName, namespace, - tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("docs", v1alpha1.PipelineResourceTypeGit)), - tb.Step("ubuntu", - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "cat /workspace/docs/LICENSE"), - ), - tb.Step("busybox", tb.StepCommand(args...)), - )) +func getEmbeddedTask(namespace string, args []string) *v1beta1.Task { + return &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: embedTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "docs", Type: resources.PipelineResourceTypeGit, + }}}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "cat /workspace/docs/LICENSE"}, + }}, {Container: corev1.Container{ + Image: "busybox", + Command: args, + }}}, + }, + } } -func getEmbeddedTaskRun(namespace string) *v1alpha1.TaskRun { - testSpec := &v1alpha1.PipelineResourceSpec{ +func getEmbeddedTaskRun(namespace string) *v1beta1.TaskRun { + testSpec := &resources.PipelineResourceSpec{ Type: v1alpha1.PipelineResourceTypeGit, Params: []v1alpha1.ResourceParam{{ Name: "URL", Value: "https://github.com/knative/docs", }}, } - return tb.TaskRun(embedTaskRunName, namespace, - tb.TaskRunSpec( - tb.TaskRunInputs( - tb.TaskRunInputsResource("docs", tb.TaskResourceBindingResourceSpec(testSpec)), - ), - tb.TaskRunTaskRef(embedTaskName))) + return &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: embedTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + Resources: &v1beta1.TaskRunResources{ + Inputs: []v1beta1.TaskResourceBinding{{PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "docs", ResourceSpec: testSpec, + }}}, + }, + TaskRef: &v1beta1.TaskRef{Name: embedTaskName}, + }, + } } diff --git a/test/entrypoint_test.go b/test/entrypoint_test.go index 9f21ebe8314..97ede439ef4 100644 --- a/test/entrypoint_test.go +++ b/test/entrypoint_test.go @@ -21,7 +21,9 @@ package test import ( "testing" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -42,16 +44,28 @@ func TestEntrypointRunningStepsInOrder(t *testing.T) { defer tearDown(t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) - task := tb.Task(epTaskName, namespace, tb.TaskSpec( - tb.Step("ubuntu", tb.StepArgs("-c", "sleep 3 && touch foo")), - tb.Step("ubuntu", tb.StepArgs("-c", "ls", "foo")), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: epTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + Args: []string{"-c", "sleep 3 && touch foo"}, + }}, {Container: corev1.Container{ + Image: "ubuntu", + Args: []string{"-c", "ls", "foo"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - taskRun := tb.TaskRun(epTaskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(epTaskName), tb.TaskRunServiceAccountName("default"), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: epTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: epTaskName}, + ServiceAccountName: "default", + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } diff --git a/test/git_checkout_test.go b/test/git_checkout_test.go index c90444f716b..dc0779aec5b 100644 --- a/test/git_checkout_test.go +++ b/test/git_checkout_test.go @@ -23,6 +23,8 @@ import ( "testing" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resources "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -260,25 +262,54 @@ func getGitPipelineResource(namespace, revision, sslverify, httpproxy, httpsprox )) } -func getGitCheckTask(namespace string) *v1alpha1.Task { - return tb.Task(gitTestTaskName, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)), - tb.Step("alpine/git", tb.StepArgs("--git-dir=/workspace/gitsource/.git", "show")), - )) +func getGitCheckTask(namespace string) *v1beta1.Task { + return &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: gitTestTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "gitsource", Type: resources.PipelineResourceTypeGit, + }}}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "alpine/git", + Args: []string{"--git-dir=/workspace/gitsource/.git", "show"}, + }}}, + }, + } } -func getGitCheckPipeline(namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(gitTestPipelineName, namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineTask("git-check", gitTestTaskName, - tb.PipelineTaskInputResource("gitsource", "git-repo"), - ), - )) +func getGitCheckPipeline(namespace string) *v1beta1.Pipeline { + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: gitTestPipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "git-repo", Type: resources.PipelineResourceTypeGit, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "git-check", + TaskRef: &v1beta1.TaskRef{Name: gitTestTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "gitsource", + Resource: "git-repo", + }}, + }, + }}, + }, + } + } -func getGitCheckPipelineRun(namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(gitTestPipelineRunName, namespace, tb.PipelineRunSpec( - gitTestPipelineName, - tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef(gitSourceResourceName)), - )) +func getGitCheckPipelineRun(namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: gitTestPipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: gitTestPipelineName}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "git-repo", + ResourceRef: &v1beta1.PipelineResourceRef{Name: gitSourceResourceName}, + }}, + }, + } } diff --git a/test/helm_task_test.go b/test/helm_task_test.go index 5991c3a4ac5..89a12709cbd 100644 --- a/test/helm_task_test.go +++ b/test/helm_task_test.go @@ -25,6 +25,8 @@ import ( "time" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resources "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/names" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" @@ -153,63 +155,123 @@ func getHelmImageResource(namespace, dockerRepo string) *v1alpha1.PipelineResour )) } -func getCreateImageTask(namespace string) *v1alpha1.Task { - return tb.Task(createImageTaskName, namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)), - tb.TaskOutputs(tb.OutputsResource("builtimage", v1alpha1.PipelineResourceTypeImage)), - tb.Step("gcr.io/kaniko-project/executor:v0.17.1", tb.StepName("kaniko"), tb.StepArgs( - "--dockerfile=/workspace/gitsource/test/gohelloworld/Dockerfile", - "--context=/workspace/gitsource/", - "--destination=$(outputs.resources.builtimage.url)", - )), - )) +func getCreateImageTask(namespace string) *v1beta1.Task { + return &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: createImageTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "gitsource", Type: resources.PipelineResourceTypeGit, + }}}, + Outputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "builtimage", Type: resources.PipelineResourceTypeImage, + }}}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "kaniko", + Image: "gcr.io/kaniko-project/executor:v0.17.1", + Args: []string{ + "--dockerfile=/workspace/gitsource/test/gohelloworld/Dockerfile", + "--context=/workspace/gitsource/", + "--destination=$(outputs.resources.builtimage.url)", + }, + }}}, + }, + } } -func getHelmDeployTask(namespace string) *v1alpha1.Task { - return tb.Task(helmDeployTaskName, namespace, tb.TaskSpec( - tb.TaskInputs( - tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit), - tb.InputsResource("image", v1alpha1.PipelineResourceTypeImage), - tb.InputsParamSpec("pathToHelmCharts", v1alpha1.ParamTypeString, tb.ParamSpecDescription("Path to the helm charts")), - tb.InputsParamSpec("chartname", v1alpha1.ParamTypeString, tb.ParamSpecDefault("")), - ), - tb.Step("alpine/helm:2.14.0", tb.StepArgs("init", "--wait")), - tb.Step("alpine/helm:2.14.0", tb.StepArgs( - "install", - "--debug", - "--name=$(inputs.params.chartname)", - "$(inputs.params.pathToHelmCharts)", - "--set", - "image.repository=$(inputs.resources.image.url)", - )), - )) +func getHelmDeployTask(namespace string) *v1beta1.Task { + empty := v1beta1.NewArrayOrString("") + return &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: helmDeployTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "gitsource", Type: resources.PipelineResourceTypeGit, + }}, {ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "image", Type: resources.PipelineResourceTypeImage, + }}}, + }, + Params: []v1beta1.ParamSpec{{ + Name: "pathToHelmCharts", Type: v1beta1.ParamTypeString, Description: "Path to the helm charts", + }, { + Name: "chartname", Type: v1beta1.ParamTypeString, Default: &empty, + }}, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "alpine/helm:2.14.0", + Args: []string{"init", "--wait"}, + }}, {Container: corev1.Container{ + Image: "alpine/helm:2.14.0", + Args: []string{"install", + "--debug", + "--name=$(inputs.params.chartname)", + "$(inputs.params.pathToHelmCharts)", + "--set", + "image.repository=$(inputs.resources.image.url)", + }, + }}}, + }, + } } -func getHelmDeployPipeline(namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(helmDeployPipelineName, namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineDeclaredResource("the-image", "image"), - tb.PipelineParamSpec("chartname", v1alpha1.ParamTypeString), - tb.PipelineTask("push-image", createImageTaskName, - tb.PipelineTaskInputResource("gitsource", "git-repo"), - tb.PipelineTaskOutputResource("builtimage", "the-image"), - ), - tb.PipelineTask("helm-deploy", helmDeployTaskName, - tb.PipelineTaskInputResource("gitsource", "git-repo"), - tb.PipelineTaskInputResource("image", "the-image", tb.From("push-image")), - tb.PipelineTaskParam("pathToHelmCharts", "/workspace/gitsource/test/gohelloworld/gohelloworld-chart"), - tb.PipelineTaskParam("chartname", "$(params.chartname)"), - ), - )) +func getHelmDeployPipeline(namespace string) *v1beta1.Pipeline { + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: helmDeployPipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "chartname", Type: v1beta1.ParamTypeString, + }}, + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "git-repo", Type: "git", + }, { + Name: "the-image", Type: "image", + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "push-image", + TaskRef: &v1beta1.TaskRef{Name: createImageTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "gitsource", Resource: "git-repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{{ + Name: "builtimage", Resource: "the-image", + }}, + }, + }, { + Name: "helm-deploy", + TaskRef: &v1beta1.TaskRef{Name: helmDeployTaskName}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "gitsource", Resource: "git-repo", + }, { + Name: "image", Resource: "the-image", From: []string{"push-image"}, + }}, + }, + Params: []v1beta1.Param{{ + Name: "pathToHelmCharts", Value: v1beta1.NewArrayOrString("/workspace/gitsource/test/gohelloworld/gohelloworld-chart"), + }, { + Name: "chartname", Value: v1beta1.NewArrayOrString("$(params.chartname)"), + }}, + }}, + }, + } } -func getHelmDeployPipelineRun(namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(helmDeployPipelineRunName, namespace, tb.PipelineRunSpec( - helmDeployPipelineName, - tb.PipelineRunParam("chartname", "gohelloworld"), - tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef(sourceResourceName)), - tb.PipelineRunResourceBinding("the-image", tb.PipelineResourceBindingRef(sourceImageName)), - )) +func getHelmDeployPipelineRun(namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: helmDeployPipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: helmDeployPipelineName}, + Params: []v1beta1.Param{{ + Name: "chartname", Value: v1beta1.NewArrayOrString("gohelloworld"), + }}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "git-repo", ResourceRef: &v1beta1.PipelineResourceRef{Name: sourceResourceName}, + }, { + Name: "the-image", ResourceRef: &v1beta1.PipelineResourceRef{Name: sourceImageName}, + }}, + }, + } } func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { @@ -304,16 +366,25 @@ func helmCleanup(c *clients, t *testing.T, namespace string) { func removeAllHelmReleases(c *clients, t *testing.T, namespace string) { helmRemoveAllTaskName := "helm-remove-all-task" - helmRemoveAllTask := tb.Task(helmRemoveAllTaskName, namespace, tb.TaskSpec( - tb.Step("alpine/helm:2.14.0", tb.StepName("helm-remove-all"), tb.StepCommand("/bin/sh"), - tb.StepArgs("-c", "helm ls --short --all | xargs -n1 helm del --purge"), - ), - )) + helmRemoveAllTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: helmRemoveAllTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "helm-remove-all", + Image: "alpine/helm:2.14.0", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "helm ls --short --all | xargs -n1 helm del --purge"}, + }}}, + }, + } helmRemoveAllTaskRunName := "helm-remove-all-taskrun" - helmRemoveAllTaskRun := tb.TaskRun(helmRemoveAllTaskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(helmRemoveAllTaskName), - )) + helmRemoveAllTaskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: helmRemoveAllTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: helmRemoveAllTaskName}, + }, + } t.Logf("Creating Task %s", helmRemoveAllTaskName) if _, err := c.TaskClient.Create(helmRemoveAllTask); err != nil { @@ -333,14 +404,23 @@ func removeAllHelmReleases(c *clients, t *testing.T, namespace string) { func removeHelmFromCluster(c *clients, t *testing.T, namespace string) { helmResetTaskName := "helm-reset-task" - helmResetTask := tb.Task(helmResetTaskName, namespace, tb.TaskSpec( - tb.Step("alpine/helm:2.14.0", tb.StepArgs("reset", "--force")), - )) + helmResetTask := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: helmResetTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "alpine/helm:2.14.0", + Args: []string{"reset", "--force"}, + }}}, + }, + } helmResetTaskRunName := "helm-reset-taskrun" - helmResetTaskRun := tb.TaskRun(helmResetTaskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(helmResetTaskName), - )) + helmResetTaskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: helmResetTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: helmResetTaskName}, + }, + } t.Logf("Creating Task %s", helmResetTaskName) if _, err := c.TaskClient.Create(helmResetTask); err != nil { diff --git a/test/kaniko_task_test.go b/test/kaniko_task_test.go index cbe9a608ff4..45dc2f3b453 100644 --- a/test/kaniko_task_test.go +++ b/test/kaniko_task_test.go @@ -26,6 +26,8 @@ import ( "github.com/google/go-cmp/cmp" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resources "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -135,40 +137,59 @@ func getImageResource(namespace, repo string) *v1alpha1.PipelineResource { )) } -func getTask(repo, namespace string) *v1alpha1.Task { +func getTask(repo, namespace string) *v1beta1.Task { root := int64(0) - taskSpecOps := []tb.TaskSpecOp{ - tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)), - tb.TaskOutputs(tb.OutputsResource("builtImage", v1alpha1.PipelineResourceTypeImage)), - } - stepOps := []tb.StepOp{ - tb.StepName("kaniko"), - tb.StepArgs( - "--dockerfile=/workspace/gitsource/integration/dockerfiles/Dockerfile_test_label", - fmt.Sprintf("--destination=%s", repo), - "--context=/workspace/gitsource", - "--oci-layout-path=/workspace/output/builtImage", - "--insecure", - "--insecure-pull", - "--insecure-registry=registry."+namespace+":5000/", - ), - tb.StepSecurityContext(&corev1.SecurityContext{RunAsUser: &root}), - } - step := tb.Step("gcr.io/kaniko-project/executor:v0.17.1", stepOps...) - taskSpecOps = append(taskSpecOps, step) - sidecar := tb.Sidecar("registry", "registry") - taskSpecOps = append(taskSpecOps, sidecar) - - return tb.Task(kanikoTaskName, namespace, tb.TaskSpec(taskSpecOps...)) + return &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: kanikoTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "gitsource", Type: resources.PipelineResourceTypeGit, + }}}, + Outputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "builtImage", Type: resources.PipelineResourceTypeImage, + }}}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "kaniko", + Image: "gcr.io/kaniko-project/executor:v0.17.1", + Args: []string{ + "--dockerfile=/workspace/gitsource/integration/dockerfiles/Dockerfile_test_label", + fmt.Sprintf("--destination=%s", repo), + "--context=/workspace/gitsource", + "--oci-layout-path=/workspace/output/builtImage", + "--insecure", + "--insecure-pull", + "--insecure-registry=registry." + namespace + ":5000/", + }, + SecurityContext: &corev1.SecurityContext{ + RunAsUser: &root, + }, + }}}, + Sidecars: []v1beta1.Sidecar{{Container: corev1.Container{ + Name: "registry", + Image: "registry", + }}}, + }, + } } -func getTaskRun(namespace string) *v1alpha1.TaskRun { - return tb.TaskRun(kanikoTaskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(kanikoTaskName), - tb.TaskRunTimeout(2*time.Minute), - tb.TaskRunInputs(tb.TaskRunInputsResource("gitsource", tb.TaskResourceBindingRef(kanikoGitResourceName))), - tb.TaskRunOutputs(tb.TaskRunOutputsResource("builtImage", tb.TaskResourceBindingRef(kanikoImageResourceName))), - )) +func getTaskRun(namespace string) *v1beta1.TaskRun { + return &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: kanikoTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: kanikoTaskName}, + Timeout: &metav1.Duration{Duration: 2 * time.Minute}, + Resources: &v1beta1.TaskRunResources{ + Inputs: []v1beta1.TaskResourceBinding{{PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "gitsource", ResourceRef: &v1beta1.PipelineResourceRef{Name: kanikoGitResourceName}, + }}}, + Outputs: []v1beta1.TaskResourceBinding{{PipelineResourceBinding: v1beta1.PipelineResourceBinding{ + Name: "builtImage", ResourceRef: &v1beta1.PipelineResourceRef{Name: kanikoImageResourceName}, + }}}, + }, + }, + } } // getRemoteDigest starts a pod to query the registry from the namespace itself, using skopeo (and jq). diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 575c67996ae..6bc6113d491 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -27,6 +27,8 @@ import ( "github.com/tektoncd/pipeline/pkg/apis/pipeline" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + resource "github.com/tektoncd/pipeline/pkg/apis/resource/v1alpha1" "github.com/tektoncd/pipeline/pkg/artifacts" tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" @@ -55,7 +57,7 @@ func TestPipelineRun(t *testing.T) { testSetup func(t *testing.T, c *clients, namespace string, index int) expectedTaskRuns []string expectedNumberOfEvents int - pipelineRunFunc func(int, string) *v1alpha1.PipelineRun + pipelineRunFunc func(int, string) *v1beta1.PipelineRun } tds := []tests{{ @@ -94,15 +96,24 @@ func TestPipelineRun(t *testing.T) { t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) } - task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsParamSpec("path", v1alpha1.ParamTypeString), - tb.InputsParamSpec("dest", v1alpha1.ParamTypeString)), - // Reference build: https://github.com/knative/build/tree/master/test/docker-basic - tb.Step("quay.io/rhpipeline/skopeo:alpine", tb.StepName("config-docker"), - tb.StepCommand("skopeo"), - tb.StepArgs("copy", "$(inputs.params.path)", "$(inputs.params.dest)"), - ), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: getName(taskName, index), Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "path", Type: v1beta1.ParamTypeString, + }, { + Name: "dest", Type: v1beta1.ParamTypeString, + }}, + Steps: []v1beta1.Step{{ + Container: corev1.Container{ + Name: "config-docker", + Image: "quay.io/rhpipeline/skopeo:alpine", + Command: []string{"skopeo"}, + Args: []string{"copy", "$(params.path)", "$(params.dest)"}, + }}, + }, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } @@ -124,12 +135,16 @@ func TestPipelineRun(t *testing.T) { t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) } - task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( - tb.Step("ubuntu", - tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo hello, world"), - ), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: getName(taskName, index), Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo hello, world"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) } @@ -228,87 +243,168 @@ func TestPipelineRun(t *testing.T) { } } -func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineParamSpec("path", v1alpha1.ParamTypeString), - tb.PipelineParamSpec("dest", v1alpha1.ParamTypeString), - tb.PipelineTask(task1Name, getName(taskName, suffix), - tb.PipelineTaskParam("path", "$(params.path)"), - tb.PipelineTaskParam("dest", "$(params.dest)")), - )) +func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1beta1.Pipeline { + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Params: []v1beta1.ParamSpec{{ + Name: "path", Type: v1beta1.ParamTypeString, + }, { + Name: "dest", Type: v1beta1.ParamTypeString, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: task1Name, + TaskRef: &v1beta1.TaskRef{Name: getName(taskName, suffix)}, + Params: []v1beta1.Param{{ + Name: "path", Value: v1beta1.NewArrayOrString("$(params.path)"), + }, { + Name: "dest", Value: v1beta1.NewArrayOrString("$(params.dest)"), + }}, + }}, + }, + } } -func getFanInFanOutTasks(namespace string) []*v1alpha1.Task { - inWorkspaceResource := tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit) - outWorkspaceResource := tb.OutputsResource("workspace", v1alpha1.PipelineResourceTypeGit) - return []*v1alpha1.Task{ - tb.Task("create-file", namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, - tb.ResourceTargetPath("brandnewspace"), - )), - tb.TaskOutputs(outWorkspaceResource), - tb.Step("ubuntu", tb.StepName("write-data-task-0-step-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo stuff > $(outputs.resources.workspace.path)/stuff"), - ), - tb.Step("ubuntu", tb.StepName("write-data-task-0-step-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo other > $(outputs.resources.workspace.path)/other"), - ), - )), - tb.Task("check-create-files-exists", namespace, tb.TaskSpec( - tb.TaskInputs(inWorkspaceResource), - tb.TaskOutputs(outWorkspaceResource), - tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"), - ), - tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo something > $(outputs.resources.workspace.path)/something"), - ), - )), - tb.Task("check-create-files-exists-2", namespace, tb.TaskSpec( - tb.TaskInputs(inWorkspaceResource), - tb.TaskOutputs(outWorkspaceResource), - tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"), - ), - tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "echo else > $(outputs.resources.workspace.path)/else"), - ), - )), - tb.Task("read-files", namespace, tb.TaskSpec( - tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, - tb.ResourceTargetPath("readingspace"), - )), - tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"), - ), - tb.Step("ubuntu", tb.StepName("read-from-task-1"), tb.StepCommand("/bin/bash"), - tb.StepArgs("-c", "[[ else == $(cat $(inputs.resources.workspace.path)/else) ]]"), - ), - )), - } +func getFanInFanOutTasks(namespace string) []*v1beta1.Task { + workspaceResource := v1beta1.TaskResource{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "workspace", + Type: resource.PipelineResourceTypeGit, + }} + return []*v1beta1.Task{{ + ObjectMeta: metav1.ObjectMeta{Name: "create-file", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "workspace", + Type: resource.PipelineResourceTypeGit, + TargetPath: "brandnewspace", + }}}, + Outputs: []v1beta1.TaskResource{workspaceResource}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "write-data-task-0-step-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo stuff > $(resources.outputs.workspace.path)/stuff"}, + }}, {Container: corev1.Container{ + Name: "write-data-task-0-step-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo other > $(resources.outputs.workspace.path)/other"}, + }}}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "check-create-files-exists", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{workspaceResource}, + Outputs: []v1beta1.TaskResource{workspaceResource}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "read-from-task-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"}, + }}, {Container: corev1.Container{ + Name: "write-data-task-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo something > $(outputs.resources.workspace.path)/something"}, + }}}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "check-create-files-exists-2", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{workspaceResource}, + Outputs: []v1beta1.TaskResource{workspaceResource}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "read-from-task-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"}, + }}, {Container: corev1.Container{ + Name: "write-data-task-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "echo else > $(outputs.resources.workspace.path)/else"}, + }}}, + }, + }, { + ObjectMeta: metav1.ObjectMeta{Name: "read-files", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Resources: &v1beta1.TaskResources{ + Inputs: []v1beta1.TaskResource{{ResourceDeclaration: v1beta1.ResourceDeclaration{ + Name: "workspace", + Type: resource.PipelineResourceTypeGit, + TargetPath: "readingspace", + }}}, + }, + Steps: []v1beta1.Step{{Container: corev1.Container{ + Name: "read-from-task-0", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"}, + }}, {Container: corev1.Container{ + Name: "read-from-task-1", + Image: "ubuntu", + Command: []string{"/bin/bash"}, + Args: []string{"-c", "[[ else == $(cat $(inputs.resources.workspace.path)/else) ]]"}, + }}}, + }, + }} } -func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline { - outGitResource := tb.PipelineTaskOutputResource("workspace", "git-repo") - - return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineDeclaredResource("git-repo", "git"), - tb.PipelineTask("create-file-kritis", "create-file", - tb.PipelineTaskInputResource("workspace", "git-repo"), - outGitResource, - ), - tb.PipelineTask("create-fan-out-1", "check-create-files-exists", - tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), - outGitResource, - ), - tb.PipelineTask("create-fan-out-2", "check-create-files-exists-2", - tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), - outGitResource, - ), - tb.PipelineTask("check-fan-in", "read-files", - tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-fan-out-2", "create-fan-out-1")), - ), - )) +func getFanInFanOutPipeline(suffix int, namespace string) *v1beta1.Pipeline { + outGitResource := v1beta1.PipelineTaskOutputResource{ + Name: "workspace", + Resource: "git-repo", + } + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Resources: []v1beta1.PipelineDeclaredResource{{ + Name: "git-repo", Type: resource.PipelineResourceTypeGit, + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "create-file-kritis", + TaskRef: &v1beta1.TaskRef{Name: "create-file"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{outGitResource}, + }, + }, { + Name: "create-fan-out-1", + TaskRef: &v1beta1.TaskRef{Name: "check-create-files-exists"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", From: []string{"create-file-kritis"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{outGitResource}, + }, + }, { + Name: "create-fan-out-2", + TaskRef: &v1beta1.TaskRef{Name: "check-create-files-exists-2"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", From: []string{"create-file-kritis"}, + }}, + Outputs: []v1beta1.PipelineTaskOutputResource{outGitResource}, + }, + }, { + Name: "check-fan-in", + TaskRef: &v1beta1.TaskRef{Name: "read-files"}, + Resources: &v1beta1.PipelineTaskResources{ + Inputs: []v1beta1.PipelineTaskInputResource{{ + Name: "workspace", Resource: "git-repo", From: []string{"create-fan-out-2", "create-fan-out-1"}, + }}, + }, + }}, + }, + } } func getFanInFanOutGitResources(namespace string) []*v1alpha1.PipelineResource { @@ -332,11 +428,17 @@ func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceA }}, } } -func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, - tb.PipelineRunSpec(getName(pipelineName, suffix), - tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")), - )) +func getFanInFanOutPipelineRun(suffix int, namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineRunName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: getName(pipelineName, suffix)}, + Resources: []v1beta1.PipelineResourceBinding{{ + Name: "git-repo", + ResourceRef: &v1beta1.PipelineResourceRef{Name: "kritis-resource-git"}, + }}, + }, + } } func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { @@ -368,15 +470,26 @@ func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { } } -func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, - tb.PipelineRunLabel("hello-world-key", "hello-world-value"), - tb.PipelineRunSpec(getName(pipelineName, suffix), - tb.PipelineRunParam("path", "docker://gcr.io/build-crd-testing/secret-sauce"), - tb.PipelineRunParam("dest", "dir:///tmp/"), - tb.PipelineRunServiceAccountName(fmt.Sprintf("%s%d", saName, suffix)), - ), - ) +func getHelloWorldPipelineRun(suffix int, namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: getName(pipelineRunName, suffix), Namespace: namespace, + Labels: map[string]string{ + "hello-world-key": "hello-world-value", + }, + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: getName(pipelineName, suffix)}, + Params: []v1beta1.Param{{ + Name: "path", + Value: v1beta1.NewArrayOrString("docker://gcr.io/build-crd-testing/secret-sauce"), + }, { + Name: "dest", + Value: v1beta1.NewArrayOrString("dir:///tmp/"), + }}, + ServiceAccountName: fmt.Sprintf("%s%d", saName, suffix), + }, + } } func getName(namespace string, suffix int) string { @@ -418,7 +531,7 @@ func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, // checkLabelPropagation checks that labels are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { +func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { // Our controllers add 4 labels automatically. If custom labels are set on // the Pipeline, PipelineRun, or Task then the map will have to be resized. labels := make(map[string]string, 4) @@ -471,7 +584,7 @@ func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineR // checkAnnotationPropagation checks that annotations are correctly propagating from // Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. -func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { +func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1beta1.TaskRun) { annotations := make(map[string]string) // Check annotation propagation to PipelineRuns. @@ -508,7 +621,7 @@ func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipe assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations) } -func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { +func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1beta1.TaskRun) *corev1.Pod { // The Pod name has a random suffix, so we filter by label to find the one we care about. pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name, @@ -538,11 +651,23 @@ func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations } } -func getPipelineWithFailingCondition(suffix int, namespace string) *v1alpha1.Pipeline { - return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( - tb.PipelineTask(task1Name, getName(taskName, suffix), tb.PipelineTaskCondition(cond1Name)), - tb.PipelineTask("task2", getName(taskName, suffix), tb.RunAfter(task1Name)), - )) +func getPipelineWithFailingCondition(suffix int, namespace string) *v1beta1.Pipeline { + return &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: getName(pipelineName, suffix), Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: task1Name, + TaskRef: &v1beta1.TaskRef{Name: getName(taskName, suffix)}, + Conditions: []v1beta1.PipelineTaskCondition{{ + ConditionRef: cond1Name, + }}, + }, { + Name: "task2", + TaskRef: &v1beta1.TaskRef{Name: getName(taskName, suffix)}, + RunAfter: []string{task1Name}, + }}, + }, + } } func getFailingCondition(namespace string) *v1alpha1.Condition { @@ -550,9 +675,16 @@ func getFailingCondition(namespace string) *v1alpha1.Condition { tb.Command("/bin/bash"), tb.Args("exit 1")))) } -func getConditionalPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { - return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, - tb.PipelineRunLabel("hello-world-key", "hello-world-value"), - tb.PipelineRunSpec(getName(pipelineName, suffix)), - ) +func getConditionalPipelineRun(suffix int, namespace string) *v1beta1.PipelineRun { + return &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: getName(pipelineRunName, suffix), Namespace: namespace, + Labels: map[string]string{ + "hello-world-key": "hello-world-vaule", + }, + }, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: getName(pipelineName, suffix)}, + }, + } } diff --git a/test/retry_test.go b/test/retry_test.go index ab7add10491..cce0874b85b 100644 --- a/test/retry_test.go +++ b/test/retry_test.go @@ -22,7 +22,6 @@ import ( "testing" "time" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -42,18 +41,18 @@ func TestTaskRunRetry(t *testing.T) { // configured to retry 5 times. pipelineRunName := "retry-pipeline" numRetries := 5 - if _, err := c.PipelineRunClient.Create(&v1alpha1.PipelineRun{ + if _, err := c.PipelineRunClient.Create(&v1beta1.PipelineRun{ ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, - Spec: v1alpha1.PipelineRunSpec{ - PipelineSpec: &v1alpha1.PipelineSpec{ - Tasks: []v1alpha1.PipelineTask{{ + Spec: v1beta1.PipelineRunSpec{ + PipelineSpec: &v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ Name: "retry-me", - TaskSpec: &v1alpha1.TaskSpec{TaskSpec: v1beta1.TaskSpec{ - Steps: []v1alpha1.Step{{ + TaskSpec: &v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ Container: corev1.Container{Image: "busybox"}, Script: "exit 1", }}, - }}, + }, Retries: numRetries, }}, }, diff --git a/test/sidecar_test.go b/test/sidecar_test.go index 832961c8b45..9fc35467ef0 100644 --- a/test/sidecar_test.go +++ b/test/sidecar_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - tb "github.com/tektoncd/pipeline/test/builder" + v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -59,26 +59,25 @@ func TestSidecarTaskSupport(t *testing.T) { t.Run(test.desc, func(t *testing.T) { sidecarTaskName := fmt.Sprintf("%s-%d", sidecarTaskName, i) sidecarTaskRunName := fmt.Sprintf("%s-%d", sidecarTaskRunName, i) - task := tb.Task(sidecarTaskName, namespace, - tb.TaskSpec( - tb.Step( - "busybox:1.31.0-musl", - tb.StepName(primaryContainerName), - tb.StepCommand(test.stepCommand...), - ), - tb.Sidecar( - sidecarContainerName, - "busybox:1.31.0-musl", - tb.Command(test.sidecarCommand...), - ), - ), - ) - - taskRun := tb.TaskRun(sidecarTaskRunName, namespace, - tb.TaskRunSpec(tb.TaskRunTaskRef(sidecarTaskName), - tb.TaskRunTimeout(1*time.Minute), - ), - ) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: sidecarTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Name: primaryContainerName, Image: "busybox:1.31.0-musl", Command: test.stepCommand}, + }}, + Sidecars: []v1beta1.Sidecar{{ + Container: corev1.Container{Name: sidecarContainerName, Image: "busybox:1.31.0-musl", Command: test.sidecarCommand}, + }}, + }, + } + + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: sidecarTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: sidecarTaskName}, + Timeout: &metav1.Duration{Duration: 1 * time.Minute}, + }, + } t.Logf("Creating Task %q", sidecarTaskName) if _, err := clients.TaskClient.Create(task); err != nil { diff --git a/test/start_time_test.go b/test/start_time_test.go index 8dd8e742a70..9657c66eec3 100644 --- a/test/start_time_test.go +++ b/test/start_time_test.go @@ -18,7 +18,6 @@ import ( "testing" "time" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,14 +36,14 @@ func TestStartTime(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) t.Logf("Creating TaskRun in namespace %q", namespace) - tr, err := c.TaskRunClient.Create(&v1alpha1.TaskRun{ + tr, err := c.TaskRunClient.Create(&v1beta1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ GenerateName: "start-time-test-", Namespace: namespace, }, - Spec: v1alpha1.TaskRunSpec{ - TaskSpec: &v1alpha1.TaskSpec{TaskSpec: v1beta1.TaskSpec{ - Steps: []v1alpha1.Step{{ + Spec: v1beta1.TaskRunSpec{ + TaskSpec: &v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ Container: corev1.Container{Image: "ubuntu"}, Script: "sleep 10", }, { @@ -60,7 +59,7 @@ func TestStartTime(t *testing.T) { Container: corev1.Container{Image: "ubuntu"}, Script: "sleep 10", }}, - }}, + }, }, }) if err != nil { diff --git a/test/status_test.go b/test/status_test.go index 31cd8791c5a..8382d2c0bde 100644 --- a/test/status_test.go +++ b/test/status_test.go @@ -21,7 +21,9 @@ package test import ( "testing" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" ) @@ -36,15 +38,25 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { defer tearDown(t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) - task := tb.Task("banana", namespace, tb.TaskSpec( - tb.Step("busybox", tb.StepCommand("ls", "-la")), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "banana", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"ls", "-la"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - taskRun := tb.TaskRun("apple", namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef("banana"), tb.TaskRunServiceAccountName("inexistent"), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: "apple", Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "banana"}, + ServiceAccountName: "inexistent", + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } @@ -54,12 +66,22 @@ func TestTaskRunPipelineRunStatus(t *testing.T) { t.Errorf("Error waiting for TaskRun to finish: %s", err) } - pipeline := tb.Pipeline("tomatoes", namespace, - tb.PipelineSpec(tb.PipelineTask("foo", "banana")), - ) - pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec( - "tomatoes", tb.PipelineRunServiceAccountName("inexistent"), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "tomatoes", Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: "foo", + TaskRef: &v1beta1.TaskRef{Name: "banana"}, + }}, + }, + } + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "pear", Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: "tomatoes"}, + ServiceAccountName: "inexistent", + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) } diff --git a/test/taskrun_test.go b/test/taskrun_test.go index 7ac7a7fbd50..afec2d1fd99 100644 --- a/test/taskrun_test.go +++ b/test/taskrun_test.go @@ -24,8 +24,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" @@ -41,23 +40,33 @@ func TestTaskRunFailure(t *testing.T) { taskRunName := "failing-taskrun" t.Logf("Creating Task and TaskRun in namespace %s", namespace) - task := tb.Task("failing-task", namespace, tb.TaskSpec( - tb.Step("busybox", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "echo hello"), - ), - tb.Step("busybox", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "exit 1"), - ), - tb.Step("busybox", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 30s"), - ), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "failing-task", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "echo hello"}, + }}, {Container: corev1.Container{ + Image: "busybox", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "exit 1"}, + }}, {Container: corev1.Container{ + Image: "busybox", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "sleep 30s"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef("failing-task"), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: taskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "failing-task"}, + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } @@ -72,7 +81,7 @@ func TestTaskRunFailure(t *testing.T) { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } - expectedStepState := []v1alpha1.StepState{{ + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ ExitCode: 0, @@ -101,7 +110,7 @@ func TestTaskRunFailure(t *testing.T) { ContainerName: "step-unnamed-2", }} ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") - ignoreStepFields := cmpopts.IgnoreFields(v1alpha1.StepState{}, "ImageID") + ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID") if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { t.Fatalf("-got, +want: %v", d) } @@ -118,18 +127,26 @@ func TestTaskRunStatus(t *testing.T) { fqImageName := "busybox@sha256:895ab622e92e18d6b461d671081757af7dbaa3b00e3e28e12505af7817f73649" t.Logf("Creating Task and TaskRun in namespace %s", namespace) - task := tb.Task("status-task", namespace, tb.TaskSpec( - // This was the digest of the latest tag as of 8/12/2019 - tb.Step("busybox@sha256:895ab622e92e18d6b461d671081757af7dbaa3b00e3e28e12505af7817f73649", - tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "echo hello"), - ), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "status-task", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + // This was the digest of the latest tag as of 8/12/2019 + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox@sha256:895ab622e92e18d6b461d671081757af7dbaa3b00e3e28e12505af7817f73649", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "echo hello"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef("status-task"), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: taskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "status-task"}, + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } @@ -144,7 +161,7 @@ func TestTaskRunStatus(t *testing.T) { t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) } - expectedStepState := []v1alpha1.StepState{{ + expectedStepState := []v1beta1.StepState{{ ContainerState: corev1.ContainerState{ Terminated: &corev1.ContainerStateTerminated{ ExitCode: 0, @@ -156,7 +173,7 @@ func TestTaskRunStatus(t *testing.T) { }} ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") - ignoreStepFields := cmpopts.IgnoreFields(v1alpha1.StepState{}, "ImageID") + ignoreStepFields := cmpopts.IgnoreFields(v1beta1.StepState{}, "ImageID") if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { t.Fatalf("-got, +want: %v", d) } diff --git a/test/timeout_test.go b/test/timeout_test.go index 0da8cb4c1dc..9ca531f0b1a 100644 --- a/test/timeout_test.go +++ b/test/timeout_test.go @@ -24,9 +24,8 @@ import ( "testing" "time" - "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources" - tb "github.com/tektoncd/pipeline/test/builder" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "knative.dev/pkg/apis" @@ -44,18 +43,36 @@ func TestPipelineRunTimeout(t *testing.T) { defer tearDown(t, c, namespace) t.Logf("Creating Task in namespace %s", namespace) - task := tb.Task("banana", namespace, tb.TaskSpec( - tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 10")))) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "banana", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "sleep 10"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "banana", err) } - pipeline := tb.Pipeline("tomatoes", namespace, - tb.PipelineSpec(tb.PipelineTask("foo", "banana")), - ) - pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name, - tb.PipelineRunTimeout(5*time.Second), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "tomatoes", Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: "foo", + TaskRef: &v1beta1.TaskRef{Name: "banana"}, + }}, + }, + } + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "pear", Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipeline.Name}, + Timeout: &metav1.Duration{Duration: 5 * time.Second}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) } @@ -119,9 +136,21 @@ func TestPipelineRunTimeout(t *testing.T) { // Verify that we can create a second Pipeline using the same Task without a Pipeline-level timeout that will not // time out - secondPipeline := tb.Pipeline("peppers", namespace, - tb.PipelineSpec(tb.PipelineTask("foo", "banana"))) - secondPipelineRun := tb.PipelineRun("kiwi", namespace, tb.PipelineRunSpec("peppers")) + secondPipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "peppers", Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: "foo", + TaskRef: &v1beta1.TaskRef{Name: "banana"}, + }}, + }, + } + secondPipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "kiwi", Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: "peppers"}, + }, + } if _, err := c.PipelineClient.Create(secondPipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", secondPipeline.Name, err) } @@ -144,14 +173,29 @@ func TestTaskRunTimeout(t *testing.T) { defer tearDown(t, c, namespace) t.Logf("Creating Task and TaskRun in namespace %s", namespace) - if _, err := c.TaskClient.Create(tb.Task("giraffe", namespace, - tb.TaskSpec(tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 3000"))))); err != nil { + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "giraffe", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"/bin/sh"}, + Args: []string{"-c", "sleep 3000"}, + }}}, + }, + } + if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task `%s`: %s", "giraffe", err) } - if _, err := c.TaskRunClient.Create(tb.TaskRun("run-giraffe", namespace, tb.TaskRunSpec(tb.TaskRunTaskRef("giraffe"), - // Do not reduce this timeout. Taskrun e2e test is also verifying - // if reconcile is triggered from timeout handler and not by pod informers - tb.TaskRunTimeout(30*time.Second)))); err != nil { + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: "run-giraffe", Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: "giraffe"}, + // Do not reduce this timeout. Taskrun e2e test is also verifying + // if reconcile is triggered from timeout handler and not by pod informers + Timeout: &metav1.Duration{Duration: 30 * time.Second}, + }, + } + if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun `%s`: %s", "run-giraffe", err) } @@ -169,11 +213,26 @@ func TestPipelineTaskTimeout(t *testing.T) { defer tearDown(t, c, namespace) t.Logf("Creating Tasks in namespace %s", namespace) - task1 := tb.Task("success", namespace, tb.TaskSpec( - tb.Step("busybox", tb.StepCommand("sleep"), tb.StepArgs("1s")))) - - task2 := tb.Task("timeout", namespace, tb.TaskSpec( - tb.Step("busybox", tb.StepCommand("sleep"), tb.StepArgs("10s")))) + task1 := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "success", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"sleep"}, + Args: []string{"1s"}, + }}}, + }, + } + task2 := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: "timeout", Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "busybox", + Command: []string{"sleep"}, + Args: []string{"10s"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task1); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task1.Name, err) @@ -182,14 +241,26 @@ func TestPipelineTaskTimeout(t *testing.T) { t.Fatalf("Failed to create Task `%s`: %s", task2.Name, err) } - pipeline := tb.Pipeline("pipelinetasktimeout", namespace, - tb.PipelineSpec( - tb.PipelineTask("pipelinetask1", task1.Name, tb.PipelineTaskTimeout(60*time.Second)), - tb.PipelineTask("pipelinetask2", task2.Name, tb.PipelineTaskTimeout(5*time.Second)), - ), - ) - - pipelineRun := tb.PipelineRun("prtasktimeout", namespace, tb.PipelineRunSpec(pipeline.Name)) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: "pipelinetasktimeout", Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Tasks: []v1beta1.PipelineTask{{ + Name: "pipelinetask1", + TaskRef: &v1beta1.TaskRef{Name: task1.Name}, + Timeout: &metav1.Duration{Duration: 60 * time.Second}, + }, { + Name: "pipelinetask2", + TaskRef: &v1beta1.TaskRef{Name: task2.Name}, + Timeout: &metav1.Duration{Duration: 5 * time.Second}, + }}, + }, + } + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: "prtasktimeout", Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipeline.Name}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) @@ -238,7 +309,7 @@ func TestPipelineTaskTimeout(t *testing.T) { var wg sync.WaitGroup for _, taskrunItem := range taskrunList.Items { wg.Add(1) - go func(tr v1alpha1.TaskRun) { + go func(tr v1beta1.TaskRun) { defer wg.Done() name := tr.Name err := WaitForTaskRunState(c, name, func(ca apis.ConditionAccessor) (bool, error) { diff --git a/test/v1alpha1/adoc.go b/test/v1alpha1/adoc.go new file mode 100644 index 00000000000..3deae97650b --- /dev/null +++ b/test/v1alpha1/adoc.go @@ -0,0 +1,44 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package test holds the project's test helpers and end-to-end tests (e2e). + +Create Pipeline resources + +To create Tekton objects (e.g. Task, Pipeline, …), you +can use the builder (./builder) package to reduce noise: + + func MyTest(t *testing.T){ + // Pipeline + pipeline := tb.Pipeline("tomatoes", "namespace", + tb.PipelineSpec(tb.PipelineTask("foo", "banana")), + ) + // … and PipelineRun + pipelineRun := tb.PipelineRun("pear", "namespace", + tb.PipelineRunSpec("tomatoes", tb.PipelineRunServiceAccount("inexistent")), + ) + // And do something with them + // […] + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) + } + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) + } + } +*/ +package test diff --git a/test/v1alpha1/artifact_bucket_test.go b/test/v1alpha1/artifact_bucket_test.go new file mode 100644 index 00000000000..0fc1a3338b1 --- /dev/null +++ b/test/v1alpha1/artifact_bucket_test.go @@ -0,0 +1,266 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/artifacts" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +const ( + helloworldResourceName = "helloworldgit" + addFileTaskName = "add-file-to-resource-task" + runFileTaskName = "run-new-file-task" + bucketTestPipelineName = "bucket-test-pipeline" + bucketTestPipelineRunName = "bucket-test-pipeline-run" + systemNamespace = "tekton-pipelines" + bucketSecretName = "bucket-secret" + bucketSecretKey = "bucket-secret-key" +) + +// TestStorageBucketPipelineRun is an integration test that will verify a pipeline +// can use a bucket for temporary storage of artifacts shared between tasks +func TestStorageBucketPipelineRun(t *testing.T) { + configFilePath := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") + if configFilePath == "" { + t.Skip("GCP_SERVICE_ACCOUNT_KEY_PATH variable is not set.") + } + c, namespace := setup(t) + // Bucket tests can't run in parallel without causing issues with other tests. + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + bucketName := fmt.Sprintf("build-pipeline-test-%s-%d", namespace, time.Now().Unix()) + + t.Logf("Creating Secret %s", bucketSecretName) + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getBucketSecret(t, configFilePath, namespace)); err != nil { + t.Fatalf("Failed to create Secret %q: %v", bucketSecretName, err) + } + defer deleteBucketSecret(c, t, namespace) + + t.Logf("Creating GCS bucket %s", bucketName) + createbuckettask := tb.Task("createbuckettask", namespace, tb.TaskSpec( + tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + })), + tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil mb gs://%s", bucketName)), + tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), + tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), + ), + ), + ) + + t.Logf("Creating Task %s", "createbuckettask") + if _, err := c.TaskClient.Create(createbuckettask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "createbuckettask", err) + } + + createbuckettaskrun := tb.TaskRun("createbuckettaskrun", namespace, + tb.TaskRunSpec(tb.TaskRunTaskRef("createbuckettask"))) + + t.Logf("Creating TaskRun %s", "createbuckettaskrun") + if _, err := c.TaskRunClient.Create(createbuckettaskrun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", "createbuckettaskrun", err) + } + + if err := WaitForTaskRunState(c, "createbuckettaskrun", TaskRunSucceed("createbuckettaskrun"), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", "createbuckettaskrun", err) + } + + defer runTaskToDeleteBucket(c, t, namespace, bucketName, bucketSecretName, bucketSecretKey) + + originalConfigMap, err := c.KubeClient.Kube.CoreV1().ConfigMaps(systemNamespace).Get(artifacts.GetBucketConfigName(), metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get ConfigMap `%s`: %s", artifacts.GetBucketConfigName(), err) + } + originalConfigMapData := originalConfigMap.Data + + t.Logf("Creating ConfigMap %s", artifacts.GetBucketConfigName()) + configMapData := map[string]string{ + artifacts.BucketLocationKey: fmt.Sprintf("gs://%s", bucketName), + artifacts.BucketServiceAccountSecretName: bucketSecretName, + artifacts.BucketServiceAccountSecretKey: bucketSecretKey, + } + if err := updateConfigMap(c.KubeClient, systemNamespace, artifacts.GetBucketConfigName(), configMapData); err != nil { + t.Fatal(err) + } + defer resetConfigMap(t, c, systemNamespace, artifacts.GetBucketConfigName(), originalConfigMapData) + + t.Logf("Creating Git PipelineResource %s", helloworldResourceName) + helloworldResource := tb.PipelineResource(helloworldResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/pivotal-nader-ziada/gohelloworld"), + tb.PipelineResourceSpecParam("Revision", "master"), + ), + ) + if _, err := c.PipelineResourceClient.Create(helloworldResource); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", helloworldResourceName, err) + } + + t.Logf("Creating Task %s", addFileTaskName) + addFileTask := tb.Task(addFileTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.TaskOutputs(tb.OutputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.Step("ubuntu", tb.StepName("addfile"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "'#!/bin/bash\necho hello' > /workspace/helloworldgit/newfile"), + ), + tb.Step("ubuntu", tb.StepName("make-executable"), tb.StepCommand("chmod"), + tb.StepArgs("+x", "/workspace/helloworldgit/newfile")), + )) + if _, err := c.TaskClient.Create(addFileTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) + } + + t.Logf("Creating Task %s", runFileTaskName) + readFileTask := tb.Task(runFileTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.Step("ubuntu", tb.StepName("runfile"), tb.StepCommand("/workspace/helloworld/newfile")), + )) + if _, err := c.TaskClient.Create(readFileTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", runFileTaskName, err) + } + + t.Logf("Creating Pipeline %s", bucketTestPipelineName) + bucketTestPipeline := tb.Pipeline(bucketTestPipelineName, namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("source-repo", "git"), + tb.PipelineTask("addfile", addFileTaskName, + tb.PipelineTaskInputResource("helloworldgit", "source-repo"), + tb.PipelineTaskOutputResource("helloworldgit", "source-repo"), + ), + tb.PipelineTask("runfile", runFileTaskName, + tb.PipelineTaskInputResource("helloworldgit", "source-repo", tb.From("addfile")), + ), + )) + if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", bucketTestPipelineRunName) + bucketTestPipelineRun := tb.PipelineRun(bucketTestPipelineRunName, namespace, tb.PipelineRunSpec( + bucketTestPipelineName, + tb.PipelineRunResourceBinding("source-repo", tb.PipelineResourceBindingRef(helloworldResourceName)), + )) + if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) + } + + // Verify status of PipelineRun (wait for it) + if err := WaitForPipelineRunState(c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", bucketTestPipelineRunName, err) + t.Fatalf("PipelineRun execution failed") + } +} + +// updateConfigMap updates the config map for specified @name with values. We can't use the one from knativetest because +// it assumes that Data is already a non-nil map, and by default, it isn't! +func updateConfigMap(client *knativetest.KubeClient, name string, configName string, values map[string]string) error { + configMap, err := client.GetConfigMap(name).Get(configName, metav1.GetOptions{}) + if err != nil { + return err + } + + if configMap.Data == nil { + configMap.Data = make(map[string]string) + } + + for key, value := range values { + configMap.Data[key] = value + } + + _, err = client.GetConfigMap(name).Update(configMap) + return err +} + +func getBucketSecret(t *testing.T, configFilePath, namespace string) *corev1.Secret { + t.Helper() + f, err := ioutil.ReadFile(configFilePath) + if err != nil { + t.Fatalf("Failed to read json key file %s at path %s", err, configFilePath) + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: bucketSecretName, + }, + StringData: map[string]string{ + bucketSecretKey: string(f), + }, + } +} + +func deleteBucketSecret(c *clients, t *testing.T, namespace string) { + if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(bucketSecretName, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete Secret `%s`: %s", bucketSecretName, err) + } +} + +func resetConfigMap(t *testing.T, c *clients, namespace, configName string, values map[string]string) { + if err := updateConfigMap(c.KubeClient, namespace, configName, values); err != nil { + t.Log(err) + } +} + +func runTaskToDeleteBucket(c *clients, t *testing.T, namespace, bucketName, bucketSecretName, bucketSecretKey string) { + deletelbuckettask := tb.Task("deletelbuckettask", namespace, tb.TaskSpec( + tb.TaskVolume("bucket-secret-volume", tb.VolumeSource(corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: bucketSecretName, + }, + })), + tb.Step("google/cloud-sdk:alpine", tb.StepName("step1"), + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", fmt.Sprintf("gcloud auth activate-service-account --key-file /var/secret/bucket-secret/bucket-secret-key && gsutil rm -r gs://%s", bucketName)), + tb.StepVolumeMount("bucket-secret-volume", fmt.Sprintf("/var/secret/%s", bucketSecretName)), + tb.StepEnvVar("CREDENTIALS", fmt.Sprintf("/var/secret/%s/%s", bucketSecretName, bucketSecretKey)), + ), + ), + ) + + t.Logf("Creating Task %s", "deletelbuckettask") + if _, err := c.TaskClient.Create(deletelbuckettask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "deletelbuckettask", err) + } + + deletelbuckettaskrun := tb.TaskRun("deletelbuckettaskrun", namespace, + tb.TaskRunSpec(tb.TaskRunTaskRef("deletelbuckettask"))) + + t.Logf("Creating TaskRun %s", "deletelbuckettaskrun") + if _, err := c.TaskRunClient.Create(deletelbuckettaskrun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", "deletelbuckettaskrun", err) + } + + if err := WaitForTaskRunState(c, "deletelbuckettaskrun", TaskRunSucceed("deletelbuckettaskrun"), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", "deletelbuckettaskrun", err) + } +} diff --git a/test/v1alpha1/build_logs.go b/test/v1alpha1/build_logs.go new file mode 100644 index 00000000000..9b7eac9b0b0 --- /dev/null +++ b/test/v1alpha1/build_logs.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "io/ioutil" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "knative.dev/pkg/test/logging" +) + +// CollectPodLogs will get the logs for all containers in a Pod +func CollectPodLogs(c *clients, podName, namespace string, logf logging.FormatLogger) { + logs, err := getContainersLogsFromPod(c.KubeClient.Kube, podName, namespace) + if err != nil { + logf("Could not get logs for pod %s: %s", podName, err) + } + logf("build logs %s", logs) +} + +func getContainersLogsFromPod(c kubernetes.Interface, pod, namespace string) (string, error) { + p, err := c.CoreV1().Pods(namespace).Get(pod, metav1.GetOptions{}) + if err != nil { + return "", err + } + + sb := strings.Builder{} + for _, container := range p.Spec.Containers { + sb.WriteString(fmt.Sprintf("\n>>> Container %s:\n", container.Name)) + logs, err := getContainerLogsFromPod(c, pod, container.Name, namespace) + if err != nil { + return "", err + } + sb.WriteString(logs) + } + return sb.String(), nil +} + +func getContainerLogsFromPod(c kubernetes.Interface, pod, container, namespace string) (string, error) { + sb := strings.Builder{} + req := c.CoreV1().Pods(namespace).GetLogs(pod, &corev1.PodLogOptions{Follow: true, Container: container}) + rc, err := req.Stream() + if err != nil { + return "", err + } + bs, err := ioutil.ReadAll(rc) + if err != nil { + return "", err + } + sb.Write(bs) + return sb.String(), nil +} diff --git a/test/v1alpha1/cancel_test.go b/test/v1alpha1/cancel_test.go new file mode 100644 index 00000000000..0d029885d2e --- /dev/null +++ b/test/v1alpha1/cancel_test.go @@ -0,0 +1,155 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "sync" + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +// TestTaskRunPipelineRunCancel is an integration test that will +// verify that pipelinerun cancel lead to the the correct TaskRun statuses +// and pod deletions. +func TestTaskRunPipelineRunCancel(t *testing.T) { + type tests struct { + name string + retries bool + } + + tds := []tests{ + { + name: "With retries", + retries: true, + }, { + name: "No retries", + retries: false, + }, + } + + t.Parallel() + + for _, tdd := range tds { + t.Run(tdd.name, func(t *testing.T) { + tdd := tdd + var pipelineTask = tb.PipelineTask("foo", "banana") + if tdd.retries { + pipelineTask = tb.PipelineTask("foo", "banana", tb.Retries(1)) + } + + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task in namespace %s", namespace) + task := tb.Task("banana", namespace, tb.TaskSpec( + tb.Step("ubuntu", tb.StepCommand("/bin/bash"), tb.StepArgs("-c", "sleep 5000")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `banana`: %s", err) + } + + t.Logf("Creating Pipeline in namespace %s", namespace) + pipeline := tb.Pipeline("tomatoes", namespace, + tb.PipelineSpec(pipelineTask), + ) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) + } + + pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name)) + + t.Logf("Creating PipelineRun in namespace %s", namespace) + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) + } + + t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", "pear", namespace) + if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, Running("pear"), "PipelineRunRunning"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to be running: %s", "pear", err) + } + + taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: "tekton.dev/pipelineRun=pear"}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", "pear", err) + } + + var wg sync.WaitGroup + var trName []string + t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", "pear", namespace) + for _, taskrunItem := range taskrunList.Items { + trName = append(trName, taskrunItem.Name) + wg.Add(1) + go func(name string) { + defer wg.Done() + err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to be running: %v", name, err) + } + }(taskrunItem.Name) + } + wg.Wait() + + pr, err := c.PipelineRunClient.Get("pear", metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get PipelineRun `%s`: %s", "pear", err) + } + + pr.Spec.Status = v1alpha1.PipelineRunSpecStatusCancelled + if _, err := c.PipelineRunClient.Update(pr); err != nil { + t.Fatalf("Failed to cancel PipelineRun `%s`: %s", "pear", err) + } + + t.Logf("Waiting for PipelineRun %s in namespace %s to be cancelled", "pear", namespace) + if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, FailedWithReason("PipelineRunCancelled", "pear"), "PipelineRunCancelled"); err != nil { + t.Errorf("Error waiting for PipelineRun `pear` to finished: %s", err) + } + + t.Logf("Waiting for TaskRuns in PipelineRun %s in namespace %s to be cancelled", "pear", namespace) + for _, taskrunItem := range taskrunList.Items { + wg.Add(1) + go func(name string) { + defer wg.Done() + err := WaitForTaskRunState(c, name, FailedWithReason("TaskRunCancelled", name), "TaskRunCancelled") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to be finished: %v", name, err) + } + }(taskrunItem.Name) + } + wg.Wait() + + matchKinds := map[string][]string{"PipelineRun": {"pear"}, "TaskRun": trName} + expectedNumberOfEvents := 1 + len(trName) + t.Logf("Making sure %d events were created from pipelinerun with kinds %v", expectedNumberOfEvents, matchKinds) + events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Failed") + if err != nil { + t.Fatalf("Failed to collect matching events: %q", err) + } + if len(events) != expectedNumberOfEvents { + t.Fatalf("Expected %d number of successful events from pipelinerun and taskrun but got %d; list of receieved events : %#v", expectedNumberOfEvents, len(events), events) + } + }) + } +} diff --git a/test/v1alpha1/clients.go b/test/v1alpha1/clients.go new file mode 100644 index 00000000000..3d4b9efafea --- /dev/null +++ b/test/v1alpha1/clients.go @@ -0,0 +1,96 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Get access to client objects + +To initialize client objects you can use the setup function. It returns a clients struct +that contains initialized clients for accessing: + + - Kubernetes objects + - Pipelines (https://github.com/tektoncd/pipeline#pipeline) + +For example, to create a Pipeline + + _, err = clients.PipelineClient.Pipelines.Create(test.Pipeline(namespaceName, pipelineName)) + +And you can use the client to clean up resources created by your test + + func tearDown(clients *test.Clients) { + if clients != nil { + clients.Delete([]string{routeName}, []string{configName}) + } + } + +*/ +package test + +import ( + "testing" + + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + resourceversioned "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned" + resourcev1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/typed/resource/v1alpha1" + knativetest "knative.dev/pkg/test" +) + +// clients holds instances of interfaces for making requests to the Pipeline controllers. +type clients struct { + KubeClient *knativetest.KubeClient + + PipelineClient v1alpha1.PipelineInterface + TaskClient v1alpha1.TaskInterface + TaskRunClient v1alpha1.TaskRunInterface + PipelineRunClient v1alpha1.PipelineRunInterface + PipelineResourceClient resourcev1alpha1.PipelineResourceInterface + ConditionClient v1alpha1.ConditionInterface +} + +// newClients instantiates and returns several clientsets required for making requests to the +// Pipeline cluster specified by the combination of clusterName and configPath. Clients can +// make requests within namespace. +func newClients(t *testing.T, configPath, clusterName, namespace string) *clients { + t.Helper() + var err error + c := &clients{} + + c.KubeClient, err = knativetest.NewKubeClient(configPath, clusterName) + if err != nil { + t.Fatalf("failed to create kubeclient from config file at %s: %s", configPath, err) + } + + cfg, err := knativetest.BuildClientConfig(configPath, clusterName) + if err != nil { + t.Fatalf("failed to create configuration obj from %s for cluster %s: %s", configPath, clusterName, err) + } + + cs, err := versioned.NewForConfig(cfg) + if err != nil { + t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) + } + rcs, err := resourceversioned.NewForConfig(cfg) + if err != nil { + t.Fatalf("failed to create pipeline clientset from config file at %s: %s", configPath, err) + } + c.PipelineClient = cs.TektonV1alpha1().Pipelines(namespace) + c.TaskClient = cs.TektonV1alpha1().Tasks(namespace) + c.TaskRunClient = cs.TektonV1alpha1().TaskRuns(namespace) + c.PipelineRunClient = cs.TektonV1alpha1().PipelineRuns(namespace) + c.PipelineResourceClient = rcs.TektonV1alpha1().PipelineResources(namespace) + c.ConditionClient = cs.TektonV1alpha1().Conditions(namespace) + return c +} diff --git a/test/v1alpha1/cluster_resource_test.go b/test/v1alpha1/cluster_resource_test.go new file mode 100644 index 00000000000..a35e2306b51 --- /dev/null +++ b/test/v1alpha1/cluster_resource_test.go @@ -0,0 +1,159 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +func TestClusterResource(t *testing.T) { + secretName := "hw-secret" + configName := "hw-config" + resourceName := "helloworld-cluster" + taskName := "helloworld-cluster-task" + taskRunName := "helloworld-cluster-taskrun" + + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating secret %s", secretName) + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getClusterResourceTaskSecret(namespace, secretName)); err != nil { + t.Fatalf("Failed to create Secret `%s`: %s", secretName, err) + } + + t.Logf("Creating configMap %s", configName) + if _, err := c.KubeClient.Kube.CoreV1().ConfigMaps(namespace).Create(getClusterConfigMap(namespace, configName)); err != nil { + t.Fatalf("Failed to create configMap `%s`: %s", configName, err) + } + + t.Logf("Creating cluster PipelineResource %s", resourceName) + if _, err := c.PipelineResourceClient.Create(getClusterResource(namespace, resourceName, secretName)); err != nil { + t.Fatalf("Failed to create cluster Pipeline Resource `%s`: %s", resourceName, err) + } + + t.Logf("Creating Task %s", taskName) + if _, err := c.TaskClient.Create(getClusterResourceTask(namespace, taskName, configName)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", taskName, err) + } + + t.Logf("Creating TaskRun %s", taskRunName) + if _, err := c.TaskRunClient.Create(getClusterResourceTaskRun(namespace, taskRunName, taskName, resourceName)); err != nil { + t.Fatalf("Failed to create Taskrun `%s`: %s", taskRunName, err) + } + + // Verify status of TaskRun (wait for it) + if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunCompleted"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", taskRunName, err) + } +} + +func getClusterResource(namespace, name, sname string) *v1alpha1.PipelineResource { + return tb.PipelineResource(name, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeCluster, + tb.PipelineResourceSpecParam("Name", "helloworld-cluster"), + tb.PipelineResourceSpecParam("Url", "https://1.1.1.1"), + tb.PipelineResourceSpecParam("username", "test-user"), + tb.PipelineResourceSpecParam("password", "test-password"), + tb.PipelineResourceSpecSecretParam("cadata", sname, "cadatakey"), + tb.PipelineResourceSpecSecretParam("token", sname, "tokenkey"), + )) +} + +func getClusterResourceTaskSecret(namespace, name string) *corev1.Secret { + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Data: map[string][]byte{ + "cadatakey": []byte("Y2EtY2VydAo="), //ca-cert + "tokenkey": []byte("dG9rZW4K"), //token + }, + } +} + +func getClusterResourceTask(namespace, name, configName string) *v1alpha1.Task { + return tb.Task(name, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("target-cluster", v1alpha1.PipelineResourceTypeCluster)), + tb.TaskVolume("config-vol", tb.VolumeSource(corev1.VolumeSource{ + ConfigMap: &corev1.ConfigMapVolumeSource{ + LocalObjectReference: corev1.LocalObjectReference{ + Name: configName, + }, + }, + })), + tb.Step("ubuntu", tb.StepName("check-file-existence"), + tb.StepCommand("cat"), tb.StepArgs("/workspace/helloworld-cluster/kubeconfig"), + ), + tb.Step("ubuntu", tb.StepName("check-config-data"), + tb.StepCommand("cat"), tb.StepArgs("/config/test.data"), + tb.StepVolumeMount("config-vol", "/config"), + ), + tb.Step("ubuntu", tb.StepName("check-contents"), + tb.StepCommand("bash"), tb.StepArgs("-c", "cmp -b /workspace/helloworld-cluster/kubeconfig /config/test.data"), + tb.StepVolumeMount("config-vol", "/config"), + ), + )) +} + +func getClusterResourceTaskRun(namespace, name, taskName, resName string) *v1alpha1.TaskRun { + return tb.TaskRun(name, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(taskName), + tb.TaskRunInputs(tb.TaskRunInputsResource("target-cluster", tb.TaskResourceBindingRef(resName))), + )) +} + +func getClusterConfigMap(namespace, name string) *corev1.ConfigMap { + return &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: name, + }, + Data: map[string]string{ + "test.data": `apiVersion: v1 +clusters: +- cluster: + certificate-authority-data: WTJFdFkyVnlkQW89 + server: https://1.1.1.1 + name: helloworld-cluster +contexts: +- context: + cluster: helloworld-cluster + user: test-user + name: helloworld-cluster +current-context: helloworld-cluster +kind: Config +preferences: {} +users: +- name: test-user + user: + token: dG9rZW4K +`, + }, + } +} diff --git a/test/v1alpha1/controller.go b/test/v1alpha1/controller.go new file mode 100644 index 00000000000..e02c848b049 --- /dev/null +++ b/test/v1alpha1/controller.go @@ -0,0 +1,178 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "testing" + + // Link in the fakes so they get injected into injection.Fake + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + fakepipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/fake" + informersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" + fakepipelineclient "github.com/tektoncd/pipeline/pkg/client/injection/client/fake" + fakeclustertaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/clustertask/fake" + fakeconditioninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/condition/fake" + fakepipelineinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipeline/fake" + fakepipelineruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/pipelinerun/fake" + faketaskinformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/task/fake" + faketaskruninformer "github.com/tektoncd/pipeline/pkg/client/injection/informers/pipeline/v1alpha1/taskrun/fake" + fakeresourceclientset "github.com/tektoncd/pipeline/pkg/client/resource/clientset/versioned/fake" + resourceinformersv1alpha1 "github.com/tektoncd/pipeline/pkg/client/resource/informers/externalversions/resource/v1alpha1" + fakeresourceclient "github.com/tektoncd/pipeline/pkg/client/resource/injection/client/fake" + fakeresourceinformer "github.com/tektoncd/pipeline/pkg/client/resource/injection/informers/resource/v1alpha1/pipelineresource/fake" + corev1 "k8s.io/api/core/v1" + coreinformers "k8s.io/client-go/informers/core/v1" + fakekubeclientset "k8s.io/client-go/kubernetes/fake" + fakekubeclient "knative.dev/pkg/client/injection/kube/client/fake" + fakepodinformer "knative.dev/pkg/client/injection/kube/informers/core/v1/pod/fake" + "knative.dev/pkg/controller" +) + +// Data represents the desired state of the system (i.e. existing resources) to seed controllers +// with. +type Data struct { + PipelineRuns []*v1alpha1.PipelineRun + Pipelines []*v1alpha1.Pipeline + TaskRuns []*v1alpha1.TaskRun + Tasks []*v1alpha1.Task + ClusterTasks []*v1alpha1.ClusterTask + PipelineResources []*v1alpha1.PipelineResource + Conditions []*v1alpha1.Condition + Pods []*corev1.Pod + Namespaces []*corev1.Namespace +} + +// Clients holds references to clients which are useful for reconciler tests. +type Clients struct { + Pipeline *fakepipelineclientset.Clientset + Resource *fakeresourceclientset.Clientset + Kube *fakekubeclientset.Clientset +} + +// Informers holds references to informers which are useful for reconciler tests. +type Informers struct { + PipelineRun informersv1alpha1.PipelineRunInformer + Pipeline informersv1alpha1.PipelineInformer + TaskRun informersv1alpha1.TaskRunInformer + Task informersv1alpha1.TaskInformer + ClusterTask informersv1alpha1.ClusterTaskInformer + PipelineResource resourceinformersv1alpha1.PipelineResourceInformer + Condition informersv1alpha1.ConditionInformer + Pod coreinformers.PodInformer +} + +// Assets holds references to the controller, logs, clients, and informers. +type Assets struct { + Controller *controller.Impl + Clients Clients +} + +// SeedTestData returns Clients and Informers populated with the +// given Data. +// nolint: golint +func SeedTestData(t *testing.T, ctx context.Context, d Data) (Clients, Informers) { + c := Clients{ + Kube: fakekubeclient.Get(ctx), + Pipeline: fakepipelineclient.Get(ctx), + Resource: fakeresourceclient.Get(ctx), + } + + i := Informers{ + PipelineRun: fakepipelineruninformer.Get(ctx), + Pipeline: fakepipelineinformer.Get(ctx), + TaskRun: faketaskruninformer.Get(ctx), + Task: faketaskinformer.Get(ctx), + ClusterTask: fakeclustertaskinformer.Get(ctx), + PipelineResource: fakeresourceinformer.Get(ctx), + Condition: fakeconditioninformer.Get(ctx), + Pod: fakepodinformer.Get(ctx), + } + + for _, pr := range d.PipelineRuns { + if err := i.PipelineRun.Informer().GetIndexer().Add(pr); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().PipelineRuns(pr.Namespace).Create(pr); err != nil { + t.Fatal(err) + } + } + for _, p := range d.Pipelines { + if err := i.Pipeline.Informer().GetIndexer().Add(p); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Pipelines(p.Namespace).Create(p); err != nil { + t.Fatal(err) + } + } + for _, tr := range d.TaskRuns { + if err := i.TaskRun.Informer().GetIndexer().Add(tr); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().TaskRuns(tr.Namespace).Create(tr); err != nil { + t.Fatal(err) + } + } + for _, ta := range d.Tasks { + if err := i.Task.Informer().GetIndexer().Add(ta); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Tasks(ta.Namespace).Create(ta); err != nil { + t.Fatal(err) + } + } + for _, ct := range d.ClusterTasks { + if err := i.ClusterTask.Informer().GetIndexer().Add(ct); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().ClusterTasks().Create(ct); err != nil { + t.Fatal(err) + } + } + for _, r := range d.PipelineResources { + if err := i.PipelineResource.Informer().GetIndexer().Add(r); err != nil { + t.Fatal(err) + } + if _, err := c.Resource.TektonV1alpha1().PipelineResources(r.Namespace).Create(r); err != nil { + t.Fatal(err) + } + } + for _, cond := range d.Conditions { + if err := i.Condition.Informer().GetIndexer().Add(cond); err != nil { + t.Fatal(err) + } + if _, err := c.Pipeline.TektonV1alpha1().Conditions(cond.Namespace).Create(cond); err != nil { + t.Fatal(err) + } + } + for _, p := range d.Pods { + if err := i.Pod.Informer().GetIndexer().Add(p); err != nil { + t.Fatal(err) + } + if _, err := c.Kube.CoreV1().Pods(p.Namespace).Create(p); err != nil { + t.Fatal(err) + } + } + for _, n := range d.Namespaces { + if _, err := c.Kube.CoreV1().Namespaces().Create(n); err != nil { + t.Fatal(err) + } + } + c.Pipeline.ClearActions() + c.Kube.ClearActions() + return c, i +} diff --git a/test/v1alpha1/dag_test.go b/test/v1alpha1/dag_test.go new file mode 100644 index 00000000000..0e23e2d34af --- /dev/null +++ b/test/v1alpha1/dag_test.go @@ -0,0 +1,186 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "sort" + "strings" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + clientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +// TestDAGPipelineRun creates a graph of arbitrary Tasks, then looks at the corresponding +// TaskRun start times to ensure they were run in the order intended, which is: +// | +// pipeline-task-1 +// / \ +// pipeline-task-2-parallel-1 pipeline-task-2-parallel-2 +// \ / +// pipeline-task-3 +// | +// pipeline-task-4 +func TestDAGPipelineRun(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + // Create the Task that echoes text + echoTask := tb.Task("echo-task", namespace, tb.TaskSpec( + tb.TaskInputs( + tb.InputsResource("repo", v1alpha1.PipelineResourceTypeGit), + tb.InputsParamSpec("text", v1alpha1.ParamTypeString, tb.ParamSpecDescription("The text that should be echoed")), + ), + tb.TaskOutputs(tb.OutputsResource("repo", v1alpha1.PipelineResourceTypeGit)), + tb.Step("busybox", tb.StepCommand("echo"), tb.StepArgs("$(inputs.params.text)")), + tb.Step("busybox", tb.StepCommand("ln"), tb.StepArgs("-s", "$(inputs.resources.repo.path)", "$(outputs.resources.repo.path)")), + )) + if _, err := c.TaskClient.Create(echoTask); err != nil { + t.Fatalf("Failed to create echo Task: %s", err) + } + + // Create the repo PipelineResource (doesn't really matter which repo we use) + repoResource := tb.PipelineResource("repo", namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/githubtraining/example-basic"), + )) + if _, err := c.PipelineResourceClient.Create(repoResource); err != nil { + t.Fatalf("Failed to create simple repo PipelineResource: %s", err) + } + + // Intentionally declaring Tasks in a mixed up order to ensure the order + // of execution isn't at all dependent on the order they are declared in + pipeline := tb.Pipeline("dag-pipeline", namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("repo", "git"), + tb.PipelineTask("pipeline-task-3", "echo-task", + tb.PipelineTaskInputResource("repo", "repo", tb.From("pipeline-task-2-parallel-1", "pipeline-task-2-parallel-2")), + tb.PipelineTaskOutputResource("repo", "repo"), + tb.PipelineTaskParam("text", "wow"), + ), + tb.PipelineTask("pipeline-task-2-parallel-2", "echo-task", + tb.PipelineTaskInputResource("repo", "repo", tb.From("pipeline-task-1")), tb.PipelineTaskOutputResource("repo", "repo"), + tb.PipelineTaskOutputResource("repo", "repo"), + tb.PipelineTaskParam("text", "such parallel"), + ), + tb.PipelineTask("pipeline-task-4", "echo-task", + tb.RunAfter("pipeline-task-3"), + tb.PipelineTaskInputResource("repo", "repo"), + tb.PipelineTaskOutputResource("repo", "repo"), + tb.PipelineTaskParam("text", "very cloud native"), + ), + tb.PipelineTask("pipeline-task-2-parallel-1", "echo-task", + tb.PipelineTaskInputResource("repo", "repo", tb.From("pipeline-task-1")), + tb.PipelineTaskOutputResource("repo", "repo"), + tb.PipelineTaskParam("text", "much graph"), + ), + tb.PipelineTask("pipeline-task-1", "echo-task", + tb.PipelineTaskInputResource("repo", "repo"), + tb.PipelineTaskOutputResource("repo", "repo"), + tb.PipelineTaskParam("text", "how to ci/cd?"), + ), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create dag-pipeline: %s", err) + } + pipelineRun := tb.PipelineRun("dag-pipeline-run", namespace, tb.PipelineRunSpec("dag-pipeline", + tb.PipelineRunResourceBinding("repo", tb.PipelineResourceBindingRef("repo")), + )) + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create dag-pipeline-run PipelineRun: %s", err) + } + t.Logf("Waiting for DAG pipeline to complete") + if err := WaitForPipelineRunState(c, "dag-pipeline-run", pipelineRunTimeout, PipelineRunSucceed("dag-pipeline-run"), "PipelineRunSuccess"); err != nil { + t.Fatalf("Error waiting for PipelineRun to finish: %s", err) + } + + t.Logf("Verifying order of execution") + times := getTaskStartTimes(t, c.TaskRunClient) + vefifyExpectedOrder(t, times) +} + +type runTime struct { + name string + t time.Time +} + +type runTimes []runTime + +func (f runTimes) Len() int { + return len(f) +} + +func (f runTimes) Less(i, j int) bool { + return f[i].t.Before(f[j].t) +} + +func (f runTimes) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +func getTaskStartTimes(t *testing.T, c clientset.TaskRunInterface) runTimes { + taskRuns, err := c.List(metav1.ListOptions{}) + if err != nil { + t.Fatalf("Couldn't get TaskRuns (so that we could check when they executed): %v", err) + } + times := runTimes{} + for _, t := range taskRuns.Items { + times = append(times, runTime{ + name: t.Name, + t: t.Status.StartTime.Time, + }) + } + return times +} + +func vefifyExpectedOrder(t *testing.T, times runTimes) { + if len(times) != 5 { + t.Fatalf("Expected 5 Taskruns to have executed but only got start times for %d Taskruns", len(times)) + } + + sort.Sort(times) + + if !strings.HasPrefix(times[0].name, "dag-pipeline-run-pipeline-task-1") { + t.Errorf("Expected first task to execute first, but %q was first", times[0].name) + } + if !strings.HasPrefix(times[1].name, "dag-pipeline-run-pipeline-task-2") { + t.Errorf("Expected parallel tasks to run second & third, but %q was second", times[1].name) + } + if !strings.HasPrefix(times[2].name, "dag-pipeline-run-pipeline-task-2") { + t.Errorf("Expected parallel tasks to run second & third, but %q was third", times[2].name) + } + if !strings.HasPrefix(times[3].name, "dag-pipeline-run-pipeline-task-3") { + t.Errorf("Expected third task to execute third, but %q was third", times[3].name) + } + if !strings.HasPrefix(times[4].name, "dag-pipeline-run-pipeline-task-4") { + t.Errorf("Expected fourth task to execute fourth, but %q was fourth", times[4].name) + } + + // Check that the two tasks that can run in parallel did + parallelDiff := times[2].t.Sub(times[1].t) + if parallelDiff > (time.Second * 5) { + t.Errorf("Expected parallel tasks to execute more or less at the same time, but they were %v apart", parallelDiff) + } +} diff --git a/test/v1alpha1/duplicate_test.go b/test/v1alpha1/duplicate_test.go new file mode 100644 index 00000000000..083dbf5cba1 --- /dev/null +++ b/test/v1alpha1/duplicate_test.go @@ -0,0 +1,77 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "sync" + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + tb "github.com/tektoncd/pipeline/test/builder" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +// TestDuplicatePodTaskRun creates 10 builds and checks that each of them has only one build pod. +func TestDuplicatePodTaskRun(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + var wg sync.WaitGroup + for i := 0; i < 25; i++ { + wg.Add(1) + taskrunName := fmt.Sprintf("duplicate-pod-taskrun-%d", i) + t.Logf("Creating taskrun %q.", taskrunName) + + taskrun := tb.TaskRun(taskrunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskSpec(tb.Step("busybox", + tb.StepCommand("/bin/echo"), + tb.StepArgs("simple"), + )), + )) + if _, err := c.TaskRunClient.Create(taskrun); err != nil { + t.Fatalf("Error creating taskrun: %v", err) + } + go func(t *testing.T) { + defer wg.Done() + + if err := WaitForTaskRunState(c, taskrunName, TaskRunSucceed(taskrunName), "TaskRunDuplicatePodTaskRunFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + return + } + + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s", pipeline.GroupName+pipeline.TaskRunLabelKey, taskrunName), + }) + if err != nil { + t.Errorf("Error getting TaskRun pod list: %v", err) + return + } + if n := len(pods.Items); n != 1 { + t.Errorf("Error matching the number of build pods: expecting 1 pod, got %d", n) + return + } + }(t) + } + wg.Wait() +} diff --git a/test/v1alpha1/embed_test.go b/test/v1alpha1/embed_test.go new file mode 100644 index 00000000000..acd3af16926 --- /dev/null +++ b/test/v1alpha1/embed_test.go @@ -0,0 +1,90 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + knativetest "knative.dev/pkg/test" +) + +const ( + embedTaskName = "helloworld" + embedTaskRunName = "helloworld-run" + + // TODO(#127) Currently not reliable to retrieve this output + taskOutput = "do you want to build a snowman" +) + +// TestTaskRun_EmbeddedResource is an integration test that will verify a very simple "hello world" TaskRun can be +// executed with an embedded resource spec. +func TestTaskRun_EmbeddedResource(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + if _, err := c.TaskClient.Create(getEmbeddedTask(namespace, []string{"/bin/sh", "-c", fmt.Sprintf("echo %s", taskOutput)})); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", embedTaskName, err) + } + if _, err := c.TaskRunClient.Create(getEmbeddedTaskRun(namespace)); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", embedTaskRunName, err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to complete", embedTaskRunName, namespace) + if err := WaitForTaskRunState(c, embedTaskRunName, TaskRunSucceed(embedTaskRunName), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", embedTaskRunName, err) + } + + // TODO(#127) Currently we have no reliable access to logs from the TaskRun so we'll assume successful + // completion of the TaskRun means the TaskRun did what it was intended. +} + +func getEmbeddedTask(namespace string, args []string) *v1alpha1.Task { + return tb.Task(embedTaskName, namespace, + tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("docs", v1alpha1.PipelineResourceTypeGit)), + tb.Step("ubuntu", + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "cat /workspace/docs/LICENSE"), + ), + tb.Step("busybox", tb.StepCommand(args...)), + )) +} + +func getEmbeddedTaskRun(namespace string) *v1alpha1.TaskRun { + testSpec := &v1alpha1.PipelineResourceSpec{ + Type: v1alpha1.PipelineResourceTypeGit, + Params: []v1alpha1.ResourceParam{{ + Name: "URL", + Value: "https://github.com/knative/docs", + }}, + } + return tb.TaskRun(embedTaskRunName, namespace, + tb.TaskRunSpec( + tb.TaskRunInputs( + tb.TaskRunInputsResource("docs", tb.TaskResourceBindingResourceSpec(testSpec)), + ), + tb.TaskRunTaskRef(embedTaskName))) +} diff --git a/test/v1alpha1/entrypoint_test.go b/test/v1alpha1/entrypoint_test.go new file mode 100644 index 00000000000..9f21ebe8314 --- /dev/null +++ b/test/v1alpha1/entrypoint_test.go @@ -0,0 +1,64 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "testing" + + tb "github.com/tektoncd/pipeline/test/builder" + knativetest "knative.dev/pkg/test" +) + +const ( + epTaskName = "ep-task" + epTaskRunName = "ep-task-run" +) + +// TestEntrypointRunningStepsInOrder is an integration test that will +// verify attempt to the get the entrypoint of a container image +// that doesn't have a cmd defined. In addition to making sure the steps +// are executed in the order specified +func TestEntrypointRunningStepsInOrder(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := tb.Task(epTaskName, namespace, tb.TaskSpec( + tb.Step("ubuntu", tb.StepArgs("-c", "sleep 3 && touch foo")), + tb.Step("ubuntu", tb.StepArgs("-c", "ls", "foo")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := tb.TaskRun(epTaskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(epTaskName), tb.TaskRunServiceAccountName("default"), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) + if err := WaitForTaskRunState(c, epTaskRunName, TaskRunSucceed(epTaskRunName), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) + } + +} diff --git a/test/v1alpha1/git_checkout_test.go b/test/v1alpha1/git_checkout_test.go new file mode 100644 index 00000000000..c90444f716b --- /dev/null +++ b/test/v1alpha1/git_checkout_test.go @@ -0,0 +1,284 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +const ( + gitSourceResourceName = "git-source-resource" + gitTestTaskName = "git-check-task" + gitTestPipelineName = "git-check-pipeline" + gitTestPipelineRunName = "git-check-pipeline-run" +) + +// TestGitPipelineRun is an integration test that will verify the source code is either fetched or pulled +// successfully under different revision inputs (branch, commitid, tag, ref) +func TestGitPipelineRun(t *testing.T) { + t.Parallel() + + revisions := []string{"master", "c15aced0e5aaee6456fbe6f7a7e95e0b5b3b2b2f", "c15aced", "release-0.1", "v0.1.0", "refs/pull/347/head"} + + for _, revision := range revisions { + + t.Run(revision, func(t *testing.T) { + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGitPipelineResource(namespace, revision, "true", "", "", "")); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) + } + + t.Logf("Creating Task %s", gitTestTaskName) + if _, err := c.TaskClient.Create(getGitCheckTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", gitTestTaskName, err) + } + + t.Logf("Creating Pipeline %s", gitTestPipelineName) + if _, err := c.PipelineClient.Create(getGitCheckPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) + if _, err := c.PipelineRunClient.Create(getGitCheckPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineRunName, err) + } + + if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", gitTestPipelineRunName, err) + t.Fatalf("PipelineRun execution failed") + } + }) + } +} + +// TestGitPipelineRun_Disable_SSLVerify will verify the source code is retrieved even after disabling SSL certificates (sslVerify) +func TestGitPipelineRun_Disable_SSLVerify(t *testing.T) { + t.Parallel() + + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGitPipelineResource(namespace, "master", "false", "", "", "")); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) + } + + t.Logf("Creating Task %s", gitTestTaskName) + if _, err := c.TaskClient.Create(getGitCheckTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", gitTestTaskName, err) + } + + t.Logf("Creating Pipeline %s", gitTestPipelineName) + if _, err := c.PipelineClient.Create(getGitCheckPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) + if _, err := c.PipelineRunClient.Create(getGitCheckPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineRunName, err) + } + + if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", gitTestPipelineRunName, err) + t.Fatalf("PipelineRun execution failed") + } +} + +// TestGitPipelineRunFail is a test to ensure that the code extraction from github fails as expected when +// an invalid revision is passed on the pipelineresource. +func TestGitPipelineRunFail(t *testing.T) { + t.Parallel() + + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGitPipelineResource(namespace, "Idontexistrabbitmonkeydonkey", "true", "", "", "")); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) + } + + t.Logf("Creating Task %s", gitTestTaskName) + if _, err := c.TaskClient.Create(getGitCheckTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", gitTestTaskName, err) + } + + t.Logf("Creating Pipeline %s", gitTestPipelineName) + if _, err := c.PipelineClient.Create(getGitCheckPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) + if _, err := c.PipelineRunClient.Create(getGitCheckPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineRunName, err) + } + + if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + taskruns, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun list for PipelineRun %s %s", gitTestPipelineRunName, err) + } + for _, tr := range taskruns.Items { + if tr.Status.PodName != "" { + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + + for _, stat := range p.Status.ContainerStatuses { + if strings.HasPrefix(stat.Name, "step-git-source-"+gitSourceResourceName) { + if stat.State.Terminated != nil { + req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) + logContent, err := req.Do().Raw() + if err != nil { + t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) + } + // Check for failure messages from fetch and pull in the log file + if strings.Contains(strings.ToLower(string(logContent)), "couldn't find remote ref idontexistrabbitmonkeydonkey") && + strings.Contains(strings.ToLower(string(logContent)), "pathspec 'idontexistrabbitmonkeydonkey' did not match any file(s) known to git") { + t.Logf("Found exepected errors when retrieving non-existent git revision") + } else { + t.Logf("Container `%s` log File: %s", stat.Name, logContent) + t.Fatalf("The git code extraction did not fail as expected. Expected errors not found in log file.") + } + } + } + } + } + } + + } else { + t.Fatalf("PipelineRun succeeded when should have failed") + } +} + +// TestGitPipelineRunFail_HTTPS_PROXY is a test to ensure that the code extraction from github fails as expected when +// an invalid HTTPS_PROXY is passed on the pipelineresource. +func TestGitPipelineRunFail_HTTPS_PROXY(t *testing.T) { + t.Parallel() + + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", gitSourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGitPipelineResource(namespace, "master", "true", "", "invalid.https.proxy.com", "")); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", gitSourceResourceName, err) + } + + t.Logf("Creating Task %s", gitTestTaskName) + if _, err := c.TaskClient.Create(getGitCheckTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", gitTestTaskName, err) + } + + t.Logf("Creating Pipeline %s", gitTestPipelineName) + if _, err := c.PipelineClient.Create(getGitCheckPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", gitTestPipelineRunName) + if _, err := c.PipelineRunClient.Create(getGitCheckPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", gitTestPipelineRunName, err) + } + + if err := WaitForPipelineRunState(c, gitTestPipelineRunName, timeout, PipelineRunSucceed(gitTestPipelineRunName), "PipelineRunCompleted"); err != nil { + taskruns, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun list for PipelineRun %s %s", gitTestPipelineRunName, err) + } + for _, tr := range taskruns.Items { + if tr.Status.PodName != "" { + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + + for _, stat := range p.Status.ContainerStatuses { + if strings.HasPrefix(stat.Name, "step-git-source-"+gitSourceResourceName) { + if stat.State.Terminated != nil { + req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) + logContent, err := req.Do().Raw() + if err != nil { + t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) + } + // Check for failure messages from fetch and pull in the log file + if strings.Contains(strings.ToLower(string(logContent)), "could not resolve proxy: invalid.https.proxy.com") && + strings.Contains(strings.ToLower(string(logContent)), "pathspec 'master' did not match any file(s) known to git") { + t.Logf("Found exepected errors when using non-existent https proxy") + } else { + t.Logf("Container `%s` log File: %s", stat.Name, logContent) + t.Fatalf("The git code extraction did not fail as expected. Expected errors not found in log file.") + } + } + } + } + } + } + + } else { + t.Fatalf("PipelineRun succeeded when should have failed") + } +} + +func getGitPipelineResource(namespace, revision, sslverify, httpproxy, httpsproxy, noproxy string) *v1alpha1.PipelineResource { + return tb.PipelineResource(gitSourceResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/tektoncd/pipeline"), + tb.PipelineResourceSpecParam("Revision", revision), + tb.PipelineResourceSpecParam("sslVerify", sslverify), + tb.PipelineResourceSpecParam("httpProxy", httpproxy), + tb.PipelineResourceSpecParam("httpsProxy", httpsproxy), + tb.PipelineResourceSpecParam("noProxy", noproxy), + )) +} + +func getGitCheckTask(namespace string) *v1alpha1.Task { + return tb.Task(gitTestTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)), + tb.Step("alpine/git", tb.StepArgs("--git-dir=/workspace/gitsource/.git", "show")), + )) +} + +func getGitCheckPipeline(namespace string) *v1alpha1.Pipeline { + return tb.Pipeline(gitTestPipelineName, namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("git-repo", "git"), + tb.PipelineTask("git-check", gitTestTaskName, + tb.PipelineTaskInputResource("gitsource", "git-repo"), + ), + )) +} + +func getGitCheckPipelineRun(namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(gitTestPipelineRunName, namespace, tb.PipelineRunSpec( + gitTestPipelineName, + tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef(gitSourceResourceName)), + )) +} diff --git a/test/v1alpha1/helm_task_test.go b/test/v1alpha1/helm_task_test.go new file mode 100644 index 00000000000..740dc4dd132 --- /dev/null +++ b/test/v1alpha1/helm_task_test.go @@ -0,0 +1,359 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/names" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1beta1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + knativetest "knative.dev/pkg/test" +) + +const ( + sourceResourceName = "go-helloworld-git" + sourceImageName = "go-helloworld-image" + createImageTaskName = "create-image-task" + helmDeployTaskName = "helm-deploy-task" + helmDeployPipelineName = "helm-deploy-pipeline" + helmDeployPipelineRunName = "helm-deploy-pipeline-run" + helmDeployServiceName = "gohelloworld-chart" +) + +var ( + clusterRoleBindings [3]*rbacv1.ClusterRoleBinding + tillerServiceAccount *corev1.ServiceAccount +) + +// TestHelmDeployPipelineRun is an integration test that will verify a pipeline build an image +// and then using helm to deploy it +func TestHelmDeployPipelineRun(t *testing.T) { + repo := ensureDockerRepo(t) + c, namespace := setup(t) + setupClusterBindingForHelm(c, t, namespace) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", sourceResourceName) + if _, err := c.PipelineResourceClient.Create(getGoHelloworldGitResource(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", sourceResourceName, err) + } + + t.Logf("Creating Image PipelineResource %s", sourceImageName) + if _, err := c.PipelineResourceClient.Create(getHelmImageResource(namespace, repo)); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", sourceImageName, err) + } + + t.Logf("Creating Task %s", createImageTaskName) + if _, err := c.TaskClient.Create(getCreateImageTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", createImageTaskName, err) + } + + t.Logf("Creating Task %s", helmDeployTaskName) + if _, err := c.TaskClient.Create(getHelmDeployTask(namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmDeployTaskName, err) + } + + t.Logf("Creating Pipeline %s", helmDeployPipelineName) + if _, err := c.PipelineClient.Create(getHelmDeployPipeline(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineName, err) + } + + t.Logf("Creating PipelineRun %s", helmDeployPipelineRunName) + if _, err := c.PipelineRunClient.Create(getHelmDeployPipelineRun(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", helmDeployPipelineRunName, err) + } + + // Verify status of PipelineRun (wait for it) + if err := WaitForPipelineRunState(c, helmDeployPipelineRunName, timeout, PipelineRunSucceed(helmDeployPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", helmDeployPipelineRunName, err) + t.Fatalf("PipelineRun execution failed; helm may or may not have been installed :(") + } + + t.Log("Waiting for service to get external IP") + var serviceIP string + if err := WaitForServiceExternalIPState(c, namespace, helmDeployServiceName, func(svc *corev1.Service) (bool, error) { + ingress := svc.Status.LoadBalancer.Ingress + if ingress != nil { + if len(ingress) > 0 { + serviceIP = ingress[0].IP + return true, nil + } + } + return false, nil + }, "ServiceExternalIPisReady"); err != nil { + t.Errorf("Error waiting for Service %s to get an external IP: %s", helmDeployServiceName, err) + } + + // cleanup task to remove helm from cluster, will not fail the test if it fails, just log + knativetest.CleanupOnInterrupt(func() { helmCleanup(c, t, namespace) }, t.Logf) + defer helmCleanup(c, t, namespace) + + if serviceIP != "" { + t.Log("Polling service with external IP") + waitErr := wait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (bool, error) { + resp, err := http.Get(fmt.Sprintf("http://%s:8080", serviceIP)) + if err != nil { + return false, nil + } + if resp != nil && resp.StatusCode != http.StatusOK { + return true, fmt.Errorf("expected 200 but received %d response code from service at http://%s:8080", resp.StatusCode, serviceIP) + } + return true, nil + }) + if waitErr != nil { + t.Errorf("Error from pinging service IP %s : %s", serviceIP, waitErr) + } + + } else { + t.Errorf("Service IP is empty.") + } +} + +func getGoHelloworldGitResource(namespace string) *v1alpha1.PipelineResource { + return tb.PipelineResource(sourceResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("url", "https://github.com/tektoncd/pipeline"), + )) +} + +func getHelmImageResource(namespace, dockerRepo string) *v1alpha1.PipelineResource { + imageName := fmt.Sprintf("%s/%s", dockerRepo, names.SimpleNameGenerator.RestrictLengthWithRandomSuffix(sourceImageName)) + + return tb.PipelineResource(sourceImageName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeImage, + tb.PipelineResourceSpecParam("url", imageName), + )) +} + +func getCreateImageTask(namespace string) *v1alpha1.Task { + return tb.Task(createImageTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)), + tb.TaskOutputs(tb.OutputsResource("builtimage", v1alpha1.PipelineResourceTypeImage)), + tb.Step("gcr.io/kaniko-project/executor:v0.17.1", tb.StepName("kaniko"), tb.StepArgs( + "--dockerfile=/workspace/gitsource/test/gohelloworld/Dockerfile", + "--context=/workspace/gitsource/", + "--destination=$(outputs.resources.builtimage.url)", + )), + )) +} + +func getHelmDeployTask(namespace string) *v1alpha1.Task { + return tb.Task(helmDeployTaskName, namespace, tb.TaskSpec( + tb.TaskInputs( + tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit), + tb.InputsResource("image", v1alpha1.PipelineResourceTypeImage), + tb.InputsParamSpec("pathToHelmCharts", v1alpha1.ParamTypeString, tb.ParamSpecDescription("Path to the helm charts")), + tb.InputsParamSpec("chartname", v1alpha1.ParamTypeString, tb.ParamSpecDefault("")), + ), + tb.Step("alpine/helm:2.14.0", tb.StepArgs("init", "--wait")), + tb.Step("alpine/helm:2.14.0", tb.StepArgs( + "install", + "--debug", + "--name=$(inputs.params.chartname)", + "$(inputs.params.pathToHelmCharts)", + "--set", + "image.repository=$(inputs.resources.image.url)", + )), + )) +} + +func getHelmDeployPipeline(namespace string) *v1alpha1.Pipeline { + return tb.Pipeline(helmDeployPipelineName, namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("git-repo", "git"), + tb.PipelineDeclaredResource("the-image", "image"), + tb.PipelineParamSpec("chartname", v1alpha1.ParamTypeString), + tb.PipelineTask("push-image", createImageTaskName, + tb.PipelineTaskInputResource("gitsource", "git-repo"), + tb.PipelineTaskOutputResource("builtimage", "the-image"), + ), + tb.PipelineTask("helm-deploy", helmDeployTaskName, + tb.PipelineTaskInputResource("gitsource", "git-repo"), + tb.PipelineTaskInputResource("image", "the-image", tb.From("push-image")), + tb.PipelineTaskParam("pathToHelmCharts", "/workspace/gitsource/test/gohelloworld/gohelloworld-chart"), + tb.PipelineTaskParam("chartname", "$(params.chartname)"), + ), + )) +} + +func getHelmDeployPipelineRun(namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(helmDeployPipelineRunName, namespace, tb.PipelineRunSpec( + helmDeployPipelineName, + tb.PipelineRunParam("chartname", "v1alpha1-gohelloworld"), + tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef(sourceResourceName)), + tb.PipelineRunResourceBinding("the-image", tb.PipelineResourceBindingRef(sourceImageName)), + )) +} + +func setupClusterBindingForHelm(c *clients, t *testing.T, namespace string) { + tillerServiceAccount = &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Name: "tiller", + Namespace: "kube-system", + }, + } + + t.Logf("Creating tiller service account") + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts("kube-system").Create(tillerServiceAccount); err != nil { + if !errors.IsAlreadyExists(err) { + t.Fatalf("Failed to create default Service account for Helm %s", err) + } + } + + clusterRoleBindings[0] = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "tiller", + Namespace: "kube-system", + }}, + } + + clusterRoleBindings[1] = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("default-tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "default", + Namespace: namespace, + }}, + } + + clusterRoleBindings[2] = &rbacv1.ClusterRoleBinding{ + ObjectMeta: metav1.ObjectMeta{ + Name: names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("default-tiller"), + }, + RoleRef: rbacv1.RoleRef{ + APIGroup: "rbac.authorization.k8s.io", + Kind: "ClusterRole", + Name: "cluster-admin", + }, + Subjects: []rbacv1.Subject{{ + Kind: "ServiceAccount", + Name: "default", + Namespace: "kube-system", + }}, + } + + for _, crb := range clusterRoleBindings { + t.Logf("Creating Cluster Role binding %s for helm", crb.Name) + if _, err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Create(crb); err != nil { + t.Fatalf("Failed to create cluster role binding for Helm %s", err) + } + } +} + +func helmCleanup(c *clients, t *testing.T, namespace string) { + t.Logf("Cleaning up helm from cluster...") + + removeAllHelmReleases(c, t, namespace) + removeHelmFromCluster(c, t, namespace) + + t.Logf("Deleting tiller service account") + if err := c.KubeClient.Kube.CoreV1().ServiceAccounts("kube-system").Delete("tiller", &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete default Service account for Helm %s", err) + } + + for _, crb := range clusterRoleBindings { + t.Logf("Deleting Cluster Role binding %s for helm", crb.Name) + if err := c.KubeClient.Kube.RbacV1beta1().ClusterRoleBindings().Delete(crb.Name, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete cluster role binding for Helm %s", err) + } + } +} + +func removeAllHelmReleases(c *clients, t *testing.T, namespace string) { + helmRemoveAllTaskName := "helm-remove-all-task" + helmRemoveAllTask := tb.Task(helmRemoveAllTaskName, namespace, tb.TaskSpec( + tb.Step("alpine/helm:2.14.0", tb.StepName("helm-remove-all"), tb.StepCommand("/bin/sh"), + tb.StepArgs("-c", "helm ls --short --all | xargs -n1 helm del --purge"), + ), + )) + + helmRemoveAllTaskRunName := "helm-remove-all-taskrun" + helmRemoveAllTaskRun := tb.TaskRun(helmRemoveAllTaskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(helmRemoveAllTaskName), + )) + + t.Logf("Creating Task %s", helmRemoveAllTaskName) + if _, err := c.TaskClient.Create(helmRemoveAllTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmRemoveAllTaskName, err) + } + + t.Logf("Creating TaskRun %s", helmRemoveAllTaskRunName) + if _, err := c.TaskRunClient.Create(helmRemoveAllTaskRun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", helmRemoveAllTaskRunName, err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to complete", helmRemoveAllTaskRunName, namespace) + if err := WaitForTaskRunState(c, helmRemoveAllTaskRunName, TaskRunSucceed(helmRemoveAllTaskRunName), "TaskRunSuccess"); err != nil { + t.Logf("TaskRun %s failed to finish: %s", helmRemoveAllTaskRunName, err) + } +} + +func removeHelmFromCluster(c *clients, t *testing.T, namespace string) { + helmResetTaskName := "helm-reset-task" + helmResetTask := tb.Task(helmResetTaskName, namespace, tb.TaskSpec( + tb.Step("alpine/helm:2.14.0", tb.StepArgs("reset", "--force")), + )) + + helmResetTaskRunName := "helm-reset-taskrun" + helmResetTaskRun := tb.TaskRun(helmResetTaskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(helmResetTaskName), + )) + + t.Logf("Creating Task %s", helmResetTaskName) + if _, err := c.TaskClient.Create(helmResetTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", helmResetTaskName, err) + } + + t.Logf("Creating TaskRun %s", helmResetTaskRunName) + if _, err := c.TaskRunClient.Create(helmResetTaskRun); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", helmResetTaskRunName, err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to complete", helmResetTaskRunName, namespace) + if err := WaitForTaskRunState(c, helmResetTaskRunName, TaskRunSucceed(helmResetTaskRunName), "TaskRunSuccess"); err != nil { + t.Logf("TaskRun %s failed to finish: %s", helmResetTaskRunName, err) + } +} diff --git a/test/v1alpha1/init_test.go b/test/v1alpha1/init_test.go new file mode 100644 index 00000000000..efac58ae8ee --- /dev/null +++ b/test/v1alpha1/init_test.go @@ -0,0 +1,213 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains initialization logic for the tests, such as special magical global state that needs to be initialized. + +package test + +import ( + "flag" + "fmt" + "os" + "strings" + "sync" + "testing" + + "github.com/ghodss/yaml" + "github.com/tektoncd/pipeline/pkg/names" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + knativetest "knative.dev/pkg/test" + "knative.dev/pkg/test/logging" + + // Mysteriously by k8s libs, or they fail to create `KubeClient`s from config. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + // Mysteriously by k8s libs, or they fail to create `KubeClient`s when using oidc authentication. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/345 + _ "k8s.io/client-go/plugin/pkg/client/auth/oidc" +) + +var initMetrics sync.Once + +func setup(t *testing.T, fn ...func(*testing.T, *clients, string)) (*clients, string) { + t.Helper() + namespace := names.SimpleNameGenerator.RestrictLengthWithRandomSuffix("arendelle") + + initializeLogsAndMetrics(t) + + c := newClients(t, knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace) + createNamespace(t, namespace, c.KubeClient) + verifyServiceAccountExistence(t, namespace, c.KubeClient) + + for _, f := range fn { + f(t, c, namespace) + } + + return c, namespace +} + +func header(logf logging.FormatLogger, text string) { + left := "### " + right := " ###" + txt := left + text + right + bar := strings.Repeat("#", len(txt)) + logf(bar) + logf(txt) + logf(bar) +} + +func tearDown(t *testing.T, cs *clients, namespace string) { + t.Helper() + if cs.KubeClient == nil { + return + } + if t.Failed() { + header(t.Logf, fmt.Sprintf("Dumping objects from %s", namespace)) + bs, err := getCRDYaml(cs, namespace) + if err != nil { + t.Error(err) + } else { + t.Log(string(bs)) + } + header(t.Logf, fmt.Sprintf("Dumping logs from Pods in the %s", namespace)) + taskruns, err := cs.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun list %s", err) + } + for _, tr := range taskruns.Items { + if tr.Status.PodName != "" { + CollectPodLogs(cs, tr.Status.PodName, namespace, t.Logf) + } + } + } + + if os.Getenv("TEST_KEEP_NAMESPACES") == "" { + t.Logf("Deleting namespace %s", namespace) + if err := cs.KubeClient.Kube.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil { + t.Errorf("Failed to delete namespace %s: %s", namespace, err) + } + } +} + +func initializeLogsAndMetrics(t *testing.T) { + initMetrics.Do(func() { + flag.Parse() + flag.Set("alsologtostderr", "true") + logging.InitializeLogger() + + //if knativetest.Flags.EmitMetrics { + logging.InitializeMetricExporter(t.Name()) + //} + }) +} + +func createNamespace(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { + t.Logf("Create namespace %s to deploy to", namespace) + if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(&corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }); err != nil { + t.Fatalf("Failed to create namespace %s for tests: %s", namespace, err) + } +} + +func verifyServiceAccountExistence(t *testing.T, namespace string, kubeClient *knativetest.KubeClient) { + defaultSA := "default" + t.Logf("Verify SA %q is created in namespace %q", defaultSA, namespace) + + if err := wait.PollImmediate(interval, timeout, func() (bool, error) { + _, err := kubeClient.Kube.CoreV1().ServiceAccounts(namespace).Get(defaultSA, metav1.GetOptions{}) + if err != nil && errors.IsNotFound(err) { + return false, nil + } + return true, err + }); err != nil { + t.Fatalf("Failed to get SA %q in namespace %q for tests: %s", defaultSA, namespace, err) + } +} + +// TestMain initializes anything global needed by the tests. Right now this is just log and metric +// setup since the log and metric libs we're using use global state :( +func TestMain(m *testing.M) { + c := m.Run() + fmt.Fprintf(os.Stderr, "Using kubeconfig at `%s` with cluster `%s`\n", knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster) + os.Exit(c) +} + +func getCRDYaml(cs *clients, ns string) ([]byte, error) { + var output []byte + printOrAdd := func(i interface{}) { + bs, err := yaml.Marshal(i) + if err != nil { + return + } + output = append(output, []byte("\n---\n")...) + output = append(output, bs...) + } + + ps, err := cs.PipelineClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pipeline: %w", err) + } + for _, i := range ps.Items { + printOrAdd(i) + } + + prs, err := cs.PipelineResourceClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pipelinerun resource: %w", err) + } + for _, i := range prs.Items { + printOrAdd(i) + } + + prrs, err := cs.PipelineRunClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pipelinerun: %w", err) + } + for _, i := range prrs.Items { + printOrAdd(i) + } + + ts, err := cs.TaskClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get tasks: %w", err) + } + for _, i := range ts.Items { + printOrAdd(i) + } + trs, err := cs.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get taskrun: %w", err) + } + for _, i := range trs.Items { + printOrAdd(i) + } + + pods, err := cs.KubeClient.Kube.CoreV1().Pods(ns).List(metav1.ListOptions{}) + if err != nil { + return nil, fmt.Errorf("could not get pods: %w", err) + } + for _, i := range pods.Items { + printOrAdd(i) + } + + return output, nil +} diff --git a/test/v1alpha1/kaniko_task_test.go b/test/v1alpha1/kaniko_task_test.go new file mode 100644 index 00000000000..cbe9a608ff4 --- /dev/null +++ b/test/v1alpha1/kaniko_task_test.go @@ -0,0 +1,209 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +const ( + kanikoTaskName = "kanikotask" + kanikoTaskRunName = "kanikotask-run" + kanikoGitResourceName = "go-example-git" + kanikoImageResourceName = "go-example-image" + // This is a random revision chosen on 10/11/2019 + revision = "1c9d566ecd13535f93789595740f20932f655905" +) + +var ( + skipRootUserTests = "false" +) + +// TestTaskRun is an integration test that will verify a TaskRun using kaniko +func TestKanikoTaskRun(t *testing.T) { + if skipRootUserTests == "true" { + t.Skip("Skip test as skipRootUserTests set to true") + } + + c, namespace := setup(t, withRegistry) + t.Parallel() + + repo := fmt.Sprintf("registry.%s:5000/kanikotasktest", namespace) + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Git PipelineResource %s", kanikoGitResourceName) + if _, err := c.PipelineResourceClient.Create(getGitResource(namespace)); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) + } + + t.Logf("Creating Image PipelineResource %s", repo) + if _, err := c.PipelineResourceClient.Create(getImageResource(namespace, repo)); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) + } + + t.Logf("Creating Task %s", kanikoTaskName) + if _, err := c.TaskClient.Create(getTask(repo, namespace)); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", kanikoTaskName, err) + } + + t.Logf("Creating TaskRun %s", kanikoTaskRunName) + if _, err := c.TaskRunClient.Create(getTaskRun(namespace)); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", kanikoTaskRunName, err) + } + + // Verify status of TaskRun (wait for it) + + if err := WaitForTaskRunState(c, kanikoTaskRunName, Succeed(kanikoTaskRunName), "TaskRunCompleted"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", kanikoTaskRunName, err) + } + + tr, err := c.TaskRunClient.Get(kanikoTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + digest := "" + commit := "" + for _, rr := range tr.Status.ResourcesResult { + switch rr.Key { + case "digest": + digest = rr.Value + case "commit": + commit = rr.Value + } + } + if digest == "" { + t.Errorf("Digest not found in TaskRun.Status: %v", tr.Status) + } + if commit == "" { + t.Errorf("Commit not found in TaskRun.Status: %v", tr.Status) + } + + if revision != commit { + t.Fatalf("Expected remote commit to match local revision: %s, %s", commit, revision) + } + + // match the local digest, which is first capture group against the remote image + remoteDigest, err := getRemoteDigest(t, c, namespace, repo) + if err != nil { + t.Fatalf("Expected to get digest for remote image %s: %v", repo, err) + } + if d := cmp.Diff(digest, remoteDigest); d != "" { + t.Fatalf("Expected local digest %s to match remote digest %s: %s", digest, remoteDigest, d) + } +} + +func getGitResource(namespace string) *v1alpha1.PipelineResource { + return tb.PipelineResource(kanikoGitResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/GoogleContainerTools/kaniko"), + tb.PipelineResourceSpecParam("Revision", revision), + )) +} + +func getImageResource(namespace, repo string) *v1alpha1.PipelineResource { + return tb.PipelineResource(kanikoImageResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeImage, + tb.PipelineResourceSpecParam("url", repo), + )) +} + +func getTask(repo, namespace string) *v1alpha1.Task { + root := int64(0) + taskSpecOps := []tb.TaskSpecOp{ + tb.TaskInputs(tb.InputsResource("gitsource", v1alpha1.PipelineResourceTypeGit)), + tb.TaskOutputs(tb.OutputsResource("builtImage", v1alpha1.PipelineResourceTypeImage)), + } + stepOps := []tb.StepOp{ + tb.StepName("kaniko"), + tb.StepArgs( + "--dockerfile=/workspace/gitsource/integration/dockerfiles/Dockerfile_test_label", + fmt.Sprintf("--destination=%s", repo), + "--context=/workspace/gitsource", + "--oci-layout-path=/workspace/output/builtImage", + "--insecure", + "--insecure-pull", + "--insecure-registry=registry."+namespace+":5000/", + ), + tb.StepSecurityContext(&corev1.SecurityContext{RunAsUser: &root}), + } + step := tb.Step("gcr.io/kaniko-project/executor:v0.17.1", stepOps...) + taskSpecOps = append(taskSpecOps, step) + sidecar := tb.Sidecar("registry", "registry") + taskSpecOps = append(taskSpecOps, sidecar) + + return tb.Task(kanikoTaskName, namespace, tb.TaskSpec(taskSpecOps...)) +} + +func getTaskRun(namespace string) *v1alpha1.TaskRun { + return tb.TaskRun(kanikoTaskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(kanikoTaskName), + tb.TaskRunTimeout(2*time.Minute), + tb.TaskRunInputs(tb.TaskRunInputsResource("gitsource", tb.TaskResourceBindingRef(kanikoGitResourceName))), + tb.TaskRunOutputs(tb.TaskRunOutputsResource("builtImage", tb.TaskResourceBindingRef(kanikoImageResourceName))), + )) +} + +// getRemoteDigest starts a pod to query the registry from the namespace itself, using skopeo (and jq). +// The reason we have to do that is because the image is pushed on a local registry that is not exposed +// to the "outside" of the test, this means it can be query by the test itself. It can only be query from +// a pod in the namespace. skopeo is able to do that query and we use jq to extract the digest from its +// output. The image used for this pod is build in the tektoncd/plumbing repository. +func getRemoteDigest(t *testing.T, c *clients, namespace, image string) (string, error) { + t.Helper() + podName := "skopeo-jq" + if _, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Create(&corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: podName, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "skopeo", + Image: "gcr.io/tekton-releases/dogfooding/skopeo:latest", + Command: []string{"/bin/sh", "-c"}, + Args: []string{"skopeo inspect --tls-verify=false docker://" + image + ":latest| jq '.Digest'"}, + }}, + RestartPolicy: corev1.RestartPolicyNever, + }, + }); err != nil { + t.Fatalf("Failed to create the skopeo-jq pod: %v", err) + } + if err := WaitForPodState(c, podName, namespace, func(pod *corev1.Pod) (bool, error) { + return pod.Status.Phase == "Succeeded" || pod.Status.Phase == "Failed", nil + }, "PodContainersTerminated"); err != nil { + t.Fatalf("Error waiting for Pod %q to terminate: %v", podName, err) + } + logs, err := getContainerLogsFromPod(c.KubeClient.Kube, podName, "skopeo", namespace) + if err != nil { + t.Fatalf("Could not get logs for pod %s: %s", podName, err) + } + return strings.TrimSpace(strings.ReplaceAll(logs, "\"", "")), nil +} diff --git a/test/v1alpha1/ko_test.go b/test/v1alpha1/ko_test.go new file mode 100644 index 00000000000..8fd393fd600 --- /dev/null +++ b/test/v1alpha1/ko_test.go @@ -0,0 +1,53 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "errors" + "fmt" + "os" + "testing" +) + +var ( + // Wether missing KO_DOCKER_REPO environment variable should be fatal or not + missingKoFatal = "true" +) + +func ensureDockerRepo(t *testing.T) string { + repo, err := getDockerRepo() + if err != nil { + if missingKoFatal == "false" { + t.Skip("KO_DOCKER_REPO env variable is required") + } + t.Fatal("KO_DOCKER_REPO env variable is required") + } + return repo +} + +func getDockerRepo() (string, error) { + // according to knative/test-infra readme (https://github.com/knative/test-infra/blob/13055d769cc5e1756e605fcb3bcc1c25376699f1/scripts/README.md) + // the KO_DOCKER_REPO will be set with according to the project where the cluster is created + // it is used here to dynamically get the docker registry to push the image to + dockerRepo := os.Getenv("KO_DOCKER_REPO") + if dockerRepo == "" { + return "", errors.New("KO_DOCKER_REPO env variable is required") + } + return fmt.Sprintf("%s/kanikotasktest", dockerRepo), nil +} diff --git a/test/v1alpha1/pipelinerun_test.go b/test/v1alpha1/pipelinerun_test.go new file mode 100644 index 00000000000..575c67996ae --- /dev/null +++ b/test/v1alpha1/pipelinerun_test.go @@ -0,0 +1,558 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "encoding/base64" + "fmt" + "strings" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/artifacts" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/apis" + knativetest "knative.dev/pkg/test" +) + +var ( + pipelineName = "pipeline" + pipelineRunName = "pipelinerun" + secretName = "secret" + saName = "service-account" + taskName = "task" + task1Name = "task1" + cond1Name = "cond-1" + pipelineRunTimeout = 10 * time.Minute +) + +func TestPipelineRun(t *testing.T) { + t.Parallel() + type tests struct { + name string + testSetup func(t *testing.T, c *clients, namespace string, index int) + expectedTaskRuns []string + expectedNumberOfEvents int + pipelineRunFunc func(int, string) *v1alpha1.PipelineRun + } + + tds := []tests{{ + name: "fan-in and fan-out", + testSetup: func(t *testing.T, c *clients, namespace string, index int) { + t.Helper() + for _, task := range getFanInFanOutTasks(namespace) { + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) + } + } + + for _, res := range getFanInFanOutGitResources(namespace) { + if _, err := c.PipelineResourceClient.Create(res); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", kanikoGitResourceName, err) + } + } + + if _, err := c.PipelineClient.Create(getFanInFanOutPipeline(index, namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + } + }, + pipelineRunFunc: getFanInFanOutPipelineRun, + expectedTaskRuns: []string{"create-file-kritis", "create-fan-out-1", "create-fan-out-2", "check-fan-in"}, + // 1 from PipelineRun and 4 from Tasks defined in pipelinerun + expectedNumberOfEvents: 5, + }, { + name: "service account propagation and pipeline param", + testSetup: func(t *testing.T, c *clients, namespace string, index int) { + t.Helper() + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getPipelineRunSecret(index, namespace)); err != nil { + t.Fatalf("Failed to create secret `%s`: %s", getName(secretName, index), err) + } + + if _, err := c.KubeClient.Kube.CoreV1().ServiceAccounts(namespace).Create(getPipelineRunServiceAccount(index, namespace)); err != nil { + t.Fatalf("Failed to create SA `%s`: %s", getName(saName, index), err) + } + + task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsParamSpec("path", v1alpha1.ParamTypeString), + tb.InputsParamSpec("dest", v1alpha1.ParamTypeString)), + // Reference build: https://github.com/knative/build/tree/master/test/docker-basic + tb.Step("quay.io/rhpipeline/skopeo:alpine", tb.StepName("config-docker"), + tb.StepCommand("skopeo"), + tb.StepArgs("copy", "$(inputs.params.path)", "$(inputs.params.dest)"), + ), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + } + + if _, err := c.PipelineClient.Create(getHelloWorldPipelineWithSingularTask(index, namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + } + }, + expectedTaskRuns: []string{task1Name}, + // 1 from PipelineRun and 1 from Tasks defined in pipelinerun + expectedNumberOfEvents: 2, + pipelineRunFunc: getHelloWorldPipelineRun, + }, { + name: "pipeline succeeds when task skipped due to failed condition", + testSetup: func(t *testing.T, c *clients, namespace string, index int) { + t.Helper() + cond := getFailingCondition(namespace) + if _, err := c.ConditionClient.Create(cond); err != nil { + t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) + } + + task := tb.Task(getName(taskName, index), namespace, tb.TaskSpec( + tb.Step("ubuntu", + tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo hello, world"), + ), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + } + if _, err := c.PipelineClient.Create(getPipelineWithFailingCondition(index, namespace)); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + } + }, + expectedTaskRuns: []string{}, + // 1 from PipelineRun; 0 from taskrun since it should not be executed due to condition failing + expectedNumberOfEvents: 1, + pipelineRunFunc: getConditionalPipelineRun, + }} + + for i, td := range tds { + t.Run(td.name, func(t *testing.T) { + td := td + t.Parallel() + c, namespace := setup(t) + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) + td.testSetup(t, c, namespace, i) + + prName := fmt.Sprintf("%s%d", pipelineRunName, i) + pipelineRun, err := c.PipelineRunClient.Create(td.pipelineRunFunc(i, namespace)) + if err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) + } + + t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace) + if err := WaitForPipelineRunState(c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to finish: %s", prName, err) + } + + t.Logf("Making sure the expected TaskRuns %s were created", td.expectedTaskRuns) + actualTaskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", prName)}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", prName, err) + } + expectedTaskRunNames := []string{} + for _, runName := range td.expectedTaskRuns { + taskRunName := strings.Join([]string{prName, runName}, "-") + // check the actual task name starting with prName+runName with a random suffix + for _, actualTaskRunItem := range actualTaskrunList.Items { + if strings.HasPrefix(actualTaskRunItem.Name, taskRunName) { + taskRunName = actualTaskRunItem.Name + } + } + expectedTaskRunNames = append(expectedTaskRunNames, taskRunName) + r, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + if !r.Status.GetCondition(apis.ConditionSucceeded).IsTrue() { + t.Fatalf("Expected TaskRun %s to have succeeded but Status is %v", taskRunName, r.Status) + } + + t.Logf("Checking that labels were propagated correctly for TaskRun %s", r.Name) + checkLabelPropagation(t, c, namespace, prName, r) + t.Logf("Checking that annotations were propagated correctly for TaskRun %s", r.Name) + checkAnnotationPropagation(t, c, namespace, prName, r) + } + + matchKinds := map[string][]string{"PipelineRun": {prName}, "TaskRun": expectedTaskRunNames} + + t.Logf("Making sure %d events were created from taskrun and pipelinerun with kinds %v", td.expectedNumberOfEvents, matchKinds) + + events, err := collectMatchingEvents(c.KubeClient, namespace, matchKinds, "Succeeded") + if err != nil { + t.Fatalf("Failed to collect matching events: %q", err) + } + if len(events) != td.expectedNumberOfEvents { + t.Fatalf("Expected %d number of successful events from pipelinerun and taskrun but got %d; list of receieved events : %#v", td.expectedNumberOfEvents, len(events), events) + } + + // Wait for up to 10 minutes and restart every second to check if + // the PersistentVolumeClaims has the DeletionTimestamp + if err := wait.PollImmediate(interval, timeout, func() (bool, error) { + // Check to make sure the PipelineRun's artifact storage PVC has been "deleted" at the end of the run. + pvc, errWait := c.KubeClient.Kube.CoreV1().PersistentVolumeClaims(namespace).Get(artifacts.GetPVCName(pipelineRun), metav1.GetOptions{}) + if errWait != nil && !errors.IsNotFound(errWait) { + return true, fmt.Errorf("error looking up PVC %s for PipelineRun %s: %s", artifacts.GetPVCName(pipelineRun), prName, errWait) + } + // If we are not found then we are okay since it got cleaned up + if errors.IsNotFound(errWait) { + return true, nil + } + return pvc.DeletionTimestamp != nil, nil + }); err != nil { + t.Fatalf("Error while waiting for the PVC to be set as deleted: %s: %s: %s", artifacts.GetPVCName(pipelineRun), err, prName) + } + t.Logf("Successfully finished test %q", td.name) + }) + } +} + +func getHelloWorldPipelineWithSingularTask(suffix int, namespace string) *v1alpha1.Pipeline { + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineParamSpec("path", v1alpha1.ParamTypeString), + tb.PipelineParamSpec("dest", v1alpha1.ParamTypeString), + tb.PipelineTask(task1Name, getName(taskName, suffix), + tb.PipelineTaskParam("path", "$(params.path)"), + tb.PipelineTaskParam("dest", "$(params.dest)")), + )) +} + +func getFanInFanOutTasks(namespace string) []*v1alpha1.Task { + inWorkspaceResource := tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit) + outWorkspaceResource := tb.OutputsResource("workspace", v1alpha1.PipelineResourceTypeGit) + return []*v1alpha1.Task{ + tb.Task("create-file", namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, + tb.ResourceTargetPath("brandnewspace"), + )), + tb.TaskOutputs(outWorkspaceResource), + tb.Step("ubuntu", tb.StepName("write-data-task-0-step-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo stuff > $(outputs.resources.workspace.path)/stuff"), + ), + tb.Step("ubuntu", tb.StepName("write-data-task-0-step-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo other > $(outputs.resources.workspace.path)/other"), + ), + )), + tb.Task("check-create-files-exists", namespace, tb.TaskSpec( + tb.TaskInputs(inWorkspaceResource), + tb.TaskOutputs(outWorkspaceResource), + tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ stuff == $(cat $(inputs.resources.workspace.path)/stuff) ]]"), + ), + tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo something > $(outputs.resources.workspace.path)/something"), + ), + )), + tb.Task("check-create-files-exists-2", namespace, tb.TaskSpec( + tb.TaskInputs(inWorkspaceResource), + tb.TaskOutputs(outWorkspaceResource), + tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ other == $(cat $(inputs.resources.workspace.path)/other) ]]"), + ), + tb.Step("ubuntu", tb.StepName("write-data-task-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "echo else > $(outputs.resources.workspace.path)/else"), + ), + )), + tb.Task("read-files", namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource("workspace", v1alpha1.PipelineResourceTypeGit, + tb.ResourceTargetPath("readingspace"), + )), + tb.Step("ubuntu", tb.StepName("read-from-task-0"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ something == $(cat $(inputs.resources.workspace.path)/something) ]]"), + ), + tb.Step("ubuntu", tb.StepName("read-from-task-1"), tb.StepCommand("/bin/bash"), + tb.StepArgs("-c", "[[ else == $(cat $(inputs.resources.workspace.path)/else) ]]"), + ), + )), + } +} + +func getFanInFanOutPipeline(suffix int, namespace string) *v1alpha1.Pipeline { + outGitResource := tb.PipelineTaskOutputResource("workspace", "git-repo") + + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("git-repo", "git"), + tb.PipelineTask("create-file-kritis", "create-file", + tb.PipelineTaskInputResource("workspace", "git-repo"), + outGitResource, + ), + tb.PipelineTask("create-fan-out-1", "check-create-files-exists", + tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), + outGitResource, + ), + tb.PipelineTask("create-fan-out-2", "check-create-files-exists-2", + tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-file-kritis")), + outGitResource, + ), + tb.PipelineTask("check-fan-in", "read-files", + tb.PipelineTaskInputResource("workspace", "git-repo", tb.From("create-fan-out-2", "create-fan-out-1")), + ), + )) +} + +func getFanInFanOutGitResources(namespace string) []*v1alpha1.PipelineResource { + return []*v1alpha1.PipelineResource{ + tb.PipelineResource("kritis-resource-git", namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/grafeas/kritis"), + tb.PipelineResourceSpecParam("Revision", "master"), + )), + } +} + +func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceAccount { + return &corev1.ServiceAccount{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: getName(saName, suffix), + }, + Secrets: []corev1.ObjectReference{{ + Name: getName(secretName, suffix), + }}, + } +} +func getFanInFanOutPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, + tb.PipelineRunSpec(getName(pipelineName, suffix), + tb.PipelineRunResourceBinding("git-repo", tb.PipelineResourceBindingRef("kritis-resource-git")), + )) +} + +func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { + // Generated by: + // cat /tmp/key.json | base64 -w 0 + // This service account is JUST a storage reader on gcr.io/build-crd-testing + encoedDockercred := "ewogICJ0eXBlIjogInNlcnZpY2VfYWNjb3VudCIsCiAgInByb2plY3RfaWQiOiAiYnVpbGQtY3JkLXRlc3RpbmciLAogICJwcml2YXRlX2tleV9pZCI6ICIwNTAyYTQxYTgxMmZiNjRjZTU2YTY4ZWM1ODMyYWIwYmExMWMxMWU2IiwKICAicHJpdmF0ZV9rZXkiOiAiLS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tXG5NSUlFdlFJQkFEQU5CZ2txaGtpRzl3MEJBUUVGQUFTQ0JLY3dnZ1NqQWdFQUFvSUJBUUM5WDRFWU9BUmJ4UU04XG5EMnhYY2FaVGsrZ1k4ZWp1OTh0THFDUXFUckdNVzlSZVQyeE9ZNUF5Z2FsUFArcDd5WEVja3dCRC9IaE0wZ2xJXG43TVRMZGVlS1dyK3JBMUx3SFp5V0ZXN0gwT25mN3duWUhFSExXVW1jM0JDT1JFRHRIUlo3WnJQQmYxSFRBQS8zXG5Nblc1bFpIU045b2p6U1NGdzZBVnU2ajZheGJCSUlKNzU0THJnS2VBWXVyd2ZJUTJSTFR1MjAxazJJcUxZYmhiXG4zbVNWRzVSK3RiS3oxQ3ZNNTNuSENiN0NmdVZlV3NyQThrazd4SHJyTFFLTW1JOXYyc2dSdWd5TUF6d3ovNnpOXG5oNS9pTXh4Z2VxNVc4eGtWeDNKMm5ZOEpKZEhhZi9UNkFHc09ORW80M3B4ZWlRVmpuUmYvS24xMFRDYzJFc0lZXG5TNDlVc1o3QkFnTUJBQUVDZ2dFQUF1cGxkdWtDUVF1RDVVL2dhbUh0N0dnVzNBTVYxOGVxbkhuQ2EyamxhaCtTXG5BZVVHbmhnSmpOdkUrcE1GbFN2NXVmMnAySzRlZC9veEQ2K0NwOVpYRFJqZ3ZmdEl5cWpsemJ3dkZjZ3p3TnVEXG55Z1VrdXA3SGVjRHNEOFR0ZUFvYlQvVnB3cTZ6S01yQndDdk5rdnk2YlZsb0VqNXgzYlhzYXhlOTVETy95cHU2XG53MFc5N3p4d3dESlk2S1FjSVdNamhyR3h2d1g3bmlVQ2VNNGxlV0JEeUd0dzF6ZUpuNGhFYzZOM2FqUWFjWEtjXG4rNFFseGNpYW1ZcVFXYlBudHhXUWhoUXpjSFdMaTJsOWNGYlpENyt1SkxGNGlONnk4bVZOVTNLM0sxYlJZclNEXG5SVXAzYVVWQlhtRmcrWi8ycHVWTCttVTNqM0xMV1l5Qk9rZXZ1T21kZ1FLQmdRRGUzR0lRa3lXSVMxNFRkTU9TXG5CaUtCQ0R5OGg5NmVoTDBIa0RieU9rU3RQS2RGOXB1RXhaeGh5N29qSENJTTVGVnJwUk4yNXA0c0V6d0ZhYyt2XG5KSUZnRXZxN21YZm1YaVhJTmllUG9FUWFDbm54RHhXZ21yMEhVS0VtUzlvTWRnTGNHVStrQ1ZHTnN6N0FPdW0wXG5LcVkzczIyUTlsUTY3Rk95cWl1OFdGUTdRUUtCZ1FEWmlGaFRFWmtQRWNxWmpud0pwVEI1NlpXUDlLVHNsWlA3XG53VTRiemk2eSttZXlmM01KKzRMMlN5SGMzY3BTTWJqdE5PWkN0NDdiOTA4RlVtTFhVR05oY3d1WmpFUXhGZXkwXG5tNDFjUzVlNFA0OWI5bjZ5TEJqQnJCb3FzMldCYWwyZWdkaE5KU3NDV29pWlA4L1pUOGVnWHZoN2I5MWp6b0syXG5xMlBVbUE0RGdRS0JnQVdMMklqdkVJME95eDJTMTFjbi9lM1dKYVRQZ05QVEc5MDNVcGErcW56aE9JeCtNYXFoXG5QRjRXc3VBeTBBb2dHSndnTkpiTjhIdktVc0VUdkE1d3l5TjM5WE43dzBjaGFyRkwzN29zVStXT0F6RGpuamNzXG5BcTVPN0dQR21YdWI2RUJRQlBKaEpQMXd5NHYvSzFmSGcvRjQ3cTRmNDBMQUpPa2FZUkpENUh6QkFvR0JBTlVoXG5uSUJQSnFxNElNdlE2Y0M5ZzhCKzF4WURlYTkvWWsxdytTbVBHdndyRVh5M0dLeDRLN2xLcGJQejdtNFgzM3N4XG5zRVUvK1kyVlFtd1JhMXhRbS81M3JLN1YybDVKZi9ENDAwalJtNlpmU0FPdmdEVHJ0Wm5VR0pNcno5RTd1Tnc3XG5sZ1VIM0pyaXZ5Ri9meE1JOHFzelFid1hQMCt4bnlxQXhFQWdkdUtCQW9HQUlNK1BTTllXQ1pYeERwU0hJMThkXG5qS2tvQWJ3Mk1veXdRSWxrZXVBbjFkWEZhZDF6c1hRR2RUcm1YeXY3TlBQKzhHWEJrbkJMaTNjdnhUaWxKSVN5XG51Y05yQ01pcU5BU24vZHE3Y1dERlVBQmdqWDE2SkgyRE5GWi9sL1VWRjNOREFKalhDczFYN3lJSnlYQjZveC96XG5hU2xxbElNVjM1REJEN3F4Unl1S3Nnaz1cbi0tLS0tRU5EIFBSSVZBVEUgS0VZLS0tLS1cbiIsCiAgImNsaWVudF9lbWFpbCI6ICJwdWxsLXNlY3JldC10ZXN0aW5nQGJ1aWxkLWNyZC10ZXN0aW5nLmlhbS5nc2VydmljZWFjY291bnQuY29tIiwKICAiY2xpZW50X2lkIjogIjEwNzkzNTg2MjAzMzAyNTI1MTM1MiIsCiAgImF1dGhfdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi9hdXRoIiwKICAidG9rZW5fdXJpIjogImh0dHBzOi8vYWNjb3VudHMuZ29vZ2xlLmNvbS9vL29hdXRoMi90b2tlbiIsCiAgImF1dGhfcHJvdmlkZXJfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9vYXV0aDIvdjEvY2VydHMiLAogICJjbGllbnRfeDUwOV9jZXJ0X3VybCI6ICJodHRwczovL3d3dy5nb29nbGVhcGlzLmNvbS9yb2JvdC92MS9tZXRhZGF0YS94NTA5L3B1bGwtc2VjcmV0LXRlc3RpbmclNDBidWlsZC1jcmQtdGVzdGluZy5pYW0uZ3NlcnZpY2VhY2NvdW50LmNvbSIKfQo=" + + decoded, err := base64.StdEncoding.DecodeString(encoedDockercred) + if err != nil { + return nil + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: getName(secretName, suffix), + Annotations: map[string]string{ + "tekton.dev/docker-0": "https://us.gcr.io", + "tekton.dev/docker-1": "https://eu.gcr.io", + "tekton.dev/docker-2": "https://asia.gcr.io", + "tekton.dev/docker-3": "https://gcr.io", + }, + }, + Type: "kubernetes.io/basic-auth", + Data: map[string][]byte{ + "username": []byte("_json_key"), + "password": decoded, + }, + } +} + +func getHelloWorldPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, + tb.PipelineRunLabel("hello-world-key", "hello-world-value"), + tb.PipelineRunSpec(getName(pipelineName, suffix), + tb.PipelineRunParam("path", "docker://gcr.io/build-crd-testing/secret-sauce"), + tb.PipelineRunParam("dest", "dir:///tmp/"), + tb.PipelineRunServiceAccountName(fmt.Sprintf("%s%d", saName, suffix)), + ), + ) +} + +func getName(namespace string, suffix int) string { + return fmt.Sprintf("%s%d", namespace, suffix) +} + +// collectMatchingEvents collects list of events under 5 seconds that match +// 1. matchKinds which is a map of Kind of Object with name of objects +// 2. reason which is the expected reason of event +func collectMatchingEvents(kubeClient *knativetest.KubeClient, namespace string, kinds map[string][]string, reason string) ([]*corev1.Event, error) { + var events []*corev1.Event + + watchEvents, err := kubeClient.Kube.CoreV1().Events(namespace).Watch(metav1.ListOptions{}) + // close watchEvents channel + defer watchEvents.Stop() + if err != nil { + return events, err + } + + // create timer to not wait for events longer than 5 seconds + timer := time.NewTimer(5 * time.Second) + + for { + select { + case wevent := <-watchEvents.ResultChan(): + event := wevent.Object.(*corev1.Event) + if val, ok := kinds[event.InvolvedObject.Kind]; ok { + for _, expectedName := range val { + if event.InvolvedObject.Name == expectedName && event.Reason == reason { + events = append(events, event) + } + } + } + case <-timer.C: + return events, nil + } + } +} + +// checkLabelPropagation checks that labels are correctly propagating from +// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. +func checkLabelPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { + // Our controllers add 4 labels automatically. If custom labels are set on + // the Pipeline, PipelineRun, or Task then the map will have to be resized. + labels := make(map[string]string, 4) + + // Check label propagation to PipelineRuns. + pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) + } + p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) + } + for key, val := range p.ObjectMeta.Labels { + labels[key] = val + } + // This label is added to every PipelineRun by the PipelineRun controller + labels[pipeline.GroupName+pipeline.PipelineLabelKey] = p.Name + assertLabelsMatch(t, labels, pr.ObjectMeta.Labels) + + // Check label propagation to TaskRuns. + for key, val := range pr.ObjectMeta.Labels { + labels[key] = val + } + // This label is added to every TaskRun by the PipelineRun controller + labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] = pr.Name + if tr.Spec.TaskRef != nil { + task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) + } + for key, val := range task.ObjectMeta.Labels { + labels[key] = val + } + // This label is added to TaskRuns that reference a Task by the TaskRun controller + labels[pipeline.GroupName+pipeline.TaskLabelKey] = task.Name + } + assertLabelsMatch(t, labels, tr.ObjectMeta.Labels) + + // PodName is "" iff a retry happened and pod is deleted + // This label is added to every Pod by the TaskRun controller + if tr.Status.PodName != "" { + // Check label propagation to Pods. + pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + // This label is added to every Pod by the TaskRun controller + labels[pipeline.GroupName+pipeline.TaskRunLabelKey] = tr.Name + assertLabelsMatch(t, labels, pod.ObjectMeta.Labels) + } +} + +// checkAnnotationPropagation checks that annotations are correctly propagating from +// Pipelines, PipelineRuns, and Tasks to TaskRuns and Pods. +func checkAnnotationPropagation(t *testing.T, c *clients, namespace string, pipelineRunName string, tr *v1alpha1.TaskRun) { + annotations := make(map[string]string) + + // Check annotation propagation to PipelineRuns. + pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected PipelineRun for %s: %s", tr.Name, err) + } + p, err := c.PipelineClient.Get(pr.Spec.PipelineRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Pipeline for %s: %s", pr.Name, err) + } + for key, val := range p.ObjectMeta.Annotations { + annotations[key] = val + } + assertAnnotationsMatch(t, annotations, pr.ObjectMeta.Annotations) + + // Check annotation propagation to TaskRuns. + for key, val := range pr.ObjectMeta.Annotations { + annotations[key] = val + } + if tr.Spec.TaskRef != nil { + task, err := c.TaskClient.Get(tr.Spec.TaskRef.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected Task for %s: %s", tr.Name, err) + } + for key, val := range task.ObjectMeta.Annotations { + annotations[key] = val + } + } + assertAnnotationsMatch(t, annotations, tr.ObjectMeta.Annotations) + + // Check annotation propagation to Pods. + pod := getPodForTaskRun(t, c.KubeClient, namespace, tr) + assertAnnotationsMatch(t, annotations, pod.ObjectMeta.Annotations) +} + +func getPodForTaskRun(t *testing.T, kubeClient *knativetest.KubeClient, namespace string, tr *v1alpha1.TaskRun) *corev1.Pod { + // The Pod name has a random suffix, so we filter by label to find the one we care about. + pods, err := kubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{ + LabelSelector: pipeline.GroupName + pipeline.TaskRunLabelKey + " = " + tr.Name, + }) + if err != nil { + t.Fatalf("Couldn't get expected Pod for %s: %s", tr.Name, err) + } + if numPods := len(pods.Items); numPods != 1 { + t.Fatalf("Expected 1 Pod for %s, but got %d Pods", tr.Name, numPods) + } + return &pods.Items[0] +} + +func assertLabelsMatch(t *testing.T, expectedLabels, actualLabels map[string]string) { + for key, expectedVal := range expectedLabels { + if actualVal := actualLabels[key]; actualVal != expectedVal { + t.Errorf("Expected labels containing %s=%s but labels were %v", key, expectedVal, actualLabels) + } + } +} + +func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations map[string]string) { + for key, expectedVal := range expectedAnnotations { + if actualVal := actualAnnotations[key]; actualVal != expectedVal { + t.Errorf("Expected annotations containing %s=%s but annotations were %v", key, expectedVal, actualAnnotations) + } + } +} + +func getPipelineWithFailingCondition(suffix int, namespace string) *v1alpha1.Pipeline { + return tb.Pipeline(getName(pipelineName, suffix), namespace, tb.PipelineSpec( + tb.PipelineTask(task1Name, getName(taskName, suffix), tb.PipelineTaskCondition(cond1Name)), + tb.PipelineTask("task2", getName(taskName, suffix), tb.RunAfter(task1Name)), + )) +} + +func getFailingCondition(namespace string) *v1alpha1.Condition { + return tb.Condition(cond1Name, namespace, tb.ConditionSpec(tb.ConditionSpecCheck("", "ubuntu", + tb.Command("/bin/bash"), tb.Args("exit 1")))) +} + +func getConditionalPipelineRun(suffix int, namespace string) *v1alpha1.PipelineRun { + return tb.PipelineRun(getName(pipelineRunName, suffix), namespace, + tb.PipelineRunLabel("hello-world-key", "hello-world-value"), + tb.PipelineRunSpec(getName(pipelineName, suffix)), + ) +} diff --git a/test/v1alpha1/registry_test.go b/test/v1alpha1/registry_test.go new file mode 100644 index 00000000000..32654b62c4f --- /dev/null +++ b/test/v1alpha1/registry_test.go @@ -0,0 +1,93 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package test + +import ( + "testing" + + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func withRegistry(t *testing.T, c *clients, namespace string) { + deployment := getRegistryDeployment(namespace) + if _, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Create(deployment); err != nil { + t.Fatalf("Failed to create the local registry deployment: %v", err) + } + if err := WaitForDeploymentState(c, deployment.Name, namespace, func(d *appsv1.Deployment) (bool, error) { + var replicas int32 = 1 + if d.Spec.Replicas != nil { + replicas = *d.Spec.Replicas + } + return d.Status.ReadyReplicas == replicas, nil + }, "DeploymentPodRunning"); err != nil { + t.Fatalf("Error waiting for Deployment %q to be ready: %v", deployment.Name, err) + } + + service := getRegistryService(namespace) + if _, err := c.KubeClient.Kube.CoreV1().Services(namespace).Create(service); err != nil { + t.Fatalf("Failed to create the local registry service: %v", err) + } +} + +func getRegistryDeployment(namespace string) *appsv1.Deployment { + return &appsv1.Deployment{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "registry", + }, + Spec: appsv1.DeploymentSpec{ + Selector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "app": "registry", + }, + }, + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "app": "registry", + }, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{{ + Name: "registry", + Image: "registry", + }}, + }, + }, + }, + } +} + +func getRegistryService(namespace string) *corev1.Service { + return &corev1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: "registry", + }, + Spec: corev1.ServiceSpec{ + Ports: []corev1.ServicePort{{ + Port: 5000, + }}, + Selector: map[string]string{ + "app": "registry", + }, + }, + } +} diff --git a/test/v1alpha1/retry_test.go b/test/v1alpha1/retry_test.go new file mode 100644 index 00000000000..ab7add10491 --- /dev/null +++ b/test/v1alpha1/retry_test.go @@ -0,0 +1,141 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + duckv1beta1 "knative.dev/pkg/apis/duck/v1beta1" + knativetest "knative.dev/pkg/test" +) + +// TestTaskRunRetry tests that retries behave as expected, by creating multiple +// Pods for the same TaskRun each time it fails, up to the configured max. +func TestTaskRunRetry(t *testing.T) { + c, namespace := setup(t) + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + // Create a PipelineRun with a single TaskRun that can only fail, + // configured to retry 5 times. + pipelineRunName := "retry-pipeline" + numRetries := 5 + if _, err := c.PipelineRunClient.Create(&v1alpha1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName}, + Spec: v1alpha1.PipelineRunSpec{ + PipelineSpec: &v1alpha1.PipelineSpec{ + Tasks: []v1alpha1.PipelineTask{{ + Name: "retry-me", + TaskSpec: &v1alpha1.TaskSpec{TaskSpec: v1beta1.TaskSpec{ + Steps: []v1alpha1.Step{{ + Container: corev1.Container{Image: "busybox"}, + Script: "exit 1", + }}, + }}, + Retries: numRetries, + }}, + }, + }, + }); err != nil { + t.Fatalf("Failed to create PipelineRun %q: %v", pipelineRunName, err) + } + + // Wait for the PipelineRun to fail, when retries are exhausted. + if err := WaitForPipelineRunState(c, pipelineRunName, 5*time.Minute, PipelineRunFailed(pipelineRunName), "PipelineRunFailed"); err != nil { + t.Fatalf("Waiting for PipelineRun to fail: %v", err) + } + + // Get the status of the PipelineRun. + pr, err := c.PipelineRunClient.Get(pipelineRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Failed to get PipelineRun %q: %v", pipelineRunName, err) + } + + // PipelineRunStatus should have 1 TaskRun status, and it should be failed. + if len(pr.Status.TaskRuns) != 1 { + t.Errorf("Got %d TaskRun statuses, wanted %d", len(pr.Status.TaskRuns), numRetries) + } + for taskRunName, trs := range pr.Status.TaskRuns { + if !isFailed(t, taskRunName, trs.Status.Conditions) { + t.Errorf("TaskRun status %q is not failed", taskRunName) + } + } + + // There should only be one TaskRun created. + trs, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Failed to list TaskRuns: %v", err) + } else if len(trs.Items) != 1 { + t.Errorf("Found %d TaskRuns, want 1", len(trs.Items)) + } + + // The TaskRun status should have N retriesStatuses, all failures. + tr := trs.Items[0] + podNames := map[string]struct{}{} + for idx, r := range tr.Status.RetriesStatus { + if !isFailed(t, tr.Name, r.Conditions) { + t.Errorf("TaskRun %q retry status %d is not failed", tr.Name, idx) + } + podNames[r.PodName] = struct{}{} + } + podNames[tr.Status.PodName] = struct{}{} + if len(tr.Status.RetriesStatus) != numRetries { + t.Errorf("TaskRun %q had %d retriesStatuses, want %d", tr.Name, len(tr.Status.RetriesStatus), numRetries) + } + + // There should be N Pods created, all failed, all owned by the TaskRun. + pods, err := c.KubeClient.Kube.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + // We expect N+1 Pods total, one for each failed and retried attempt, and one for the final attempt. + wantPods := numRetries + 1 + + if err != nil { + t.Fatalf("Failed to list Pods: %v", err) + } else if len(pods.Items) != wantPods { + t.Errorf("BUG: Found %d Pods, want %d", len(pods.Items), wantPods) + } + for _, p := range pods.Items { + if _, found := podNames[p.Name]; !found { + t.Errorf("BUG: TaskRunStatus.RetriesStatus did not report pod name %q", p.Name) + } + if p.Status.Phase != corev1.PodFailed { + t.Errorf("BUG: Pod %q is not failed: %v", p.Name, p.Status.Phase) + } + } +} + +// This method is necessary because PipelineRunTaskRunStatus and TaskRunStatus +// don't have an IsFailed method. +func isFailed(t *testing.T, taskRunName string, conds duckv1beta1.Conditions) bool { + for _, c := range conds { + if c.Type == apis.ConditionSucceeded { + if c.Status != corev1.ConditionFalse { + t.Errorf("TaskRun status %q is not failed, got %q", taskRunName, c.Status) + } + return true + } + } + t.Errorf("TaskRun status %q had no Succeeded condition", taskRunName) + return false +} diff --git a/test/v1alpha1/secret.go b/test/v1alpha1/secret.go new file mode 100644 index 00000000000..37ea00c080a --- /dev/null +++ b/test/v1alpha1/secret.go @@ -0,0 +1,63 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "io/ioutil" + "os" + "testing" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +// CreateGCPServiceAccountSecret will create a kube secret called secretName in namespace +// from the value in the GCP_SERVICE_ACCOUNT_KEY_PATH environment variable. If the env var +// doesn't exist, no secret will be created. Returns true if the secret was created, false +// otherwise. +func CreateGCPServiceAccountSecret(t *testing.T, c *knativetest.KubeClient, namespace string, secretName string) (bool, error) { + t.Helper() + file := os.Getenv("GCP_SERVICE_ACCOUNT_KEY_PATH") + if file == "" { + t.Logf("Not creating service account secret, relying on default credentials in namespace %s.", namespace) + return false, nil + } + + sec := &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + } + + bs, err := ioutil.ReadFile(file) + if err != nil { + return false, fmt.Errorf("couldn't read secret json from %s: %w", file, err) + } + + sec.Data = map[string][]byte{ + "config.json": bs, + } + _, err = c.Kube.CoreV1().Secrets(namespace).Create(sec) + + t.Log("Creating service account secret") + return true, err +} diff --git a/test/v1alpha1/sidecar_test.go b/test/v1alpha1/sidecar_test.go new file mode 100644 index 00000000000..832961c8b45 --- /dev/null +++ b/test/v1alpha1/sidecar_test.go @@ -0,0 +1,164 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "testing" + "time" + + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + sidecarTaskName = "sidecar-test-task" + sidecarTaskRunName = "sidecar-test-task-run" + sidecarContainerName = "sidecar-container" + primaryContainerName = "primary" +) + +// TestSidecarTaskSupport checks whether support for sidecars is working +// as expected by running a Task with a Sidecar defined and confirming +// that both the primary and sidecar containers terminate. +func TestSidecarTaskSupport(t *testing.T) { + tests := []struct { + desc string + stepCommand []string + sidecarCommand []string + }{{ + desc: "A sidecar that runs forever is terminated when Steps complete", + stepCommand: []string{"echo", "\"hello world\""}, + sidecarCommand: []string{"sh", "-c", "while [[ true ]] ; do echo \"hello from sidecar\" ; done"}, + }, { + desc: "A sidecar that terminates early does not cause problems running Steps", + stepCommand: []string{"echo", "\"hello world\""}, + sidecarCommand: []string{"echo", "\"hello from sidecar\""}, + }} + + clients, namespace := setup(t) + + for i, test := range tests { + t.Run(test.desc, func(t *testing.T) { + sidecarTaskName := fmt.Sprintf("%s-%d", sidecarTaskName, i) + sidecarTaskRunName := fmt.Sprintf("%s-%d", sidecarTaskRunName, i) + task := tb.Task(sidecarTaskName, namespace, + tb.TaskSpec( + tb.Step( + "busybox:1.31.0-musl", + tb.StepName(primaryContainerName), + tb.StepCommand(test.stepCommand...), + ), + tb.Sidecar( + sidecarContainerName, + "busybox:1.31.0-musl", + tb.Command(test.sidecarCommand...), + ), + ), + ) + + taskRun := tb.TaskRun(sidecarTaskRunName, namespace, + tb.TaskRunSpec(tb.TaskRunTaskRef(sidecarTaskName), + tb.TaskRunTimeout(1*time.Minute), + ), + ) + + t.Logf("Creating Task %q", sidecarTaskName) + if _, err := clients.TaskClient.Create(task); err != nil { + t.Errorf("Failed to create Task %q: %v", sidecarTaskName, err) + } + + t.Logf("Creating TaskRun %q", sidecarTaskRunName) + if _, err := clients.TaskRunClient.Create(taskRun); err != nil { + t.Errorf("Failed to create TaskRun %q: %v", sidecarTaskRunName, err) + } + + if err := WaitForTaskRunState(clients, sidecarTaskRunName, Succeed(sidecarTaskRunName), "TaskRunSucceed"); err != nil { + t.Errorf("Error waiting for TaskRun %q to finish: %v", sidecarTaskRunName, err) + } + + tr, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error getting Taskrun: %v", err) + } + podName := tr.Status.PodName + + if err := WaitForPodState(clients, podName, namespace, func(pod *corev1.Pod) (bool, error) { + terminatedCount := 0 + for _, c := range pod.Status.ContainerStatuses { + if c.State.Terminated != nil { + terminatedCount++ + } + } + return terminatedCount == 2, nil + }, "PodContainersTerminated"); err != nil { + t.Errorf("Error waiting for Pod %q to terminate both the primary and sidecar containers: %v", podName, err) + } + + pod, err := clients.KubeClient.Kube.CoreV1().Pods(namespace).Get(podName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun pod: %v", err) + } + + primaryTerminated := false + sidecarTerminated := false + + for _, c := range pod.Status.ContainerStatuses { + if c.Name == fmt.Sprintf("step-%s", primaryContainerName) { + if c.State.Terminated == nil || c.State.Terminated.Reason != "Completed" { + t.Errorf("Primary container has nil Terminated state or did not complete successfully. Actual Terminated state: %v", c.State.Terminated) + } else { + primaryTerminated = true + } + } + if c.Name == fmt.Sprintf("sidecar-%s", sidecarContainerName) { + if c.State.Terminated == nil { + t.Errorf("Sidecar container has a nil Terminated status but non-nil is expected.") + } else { + sidecarTerminated = true + } + } + } + + if !primaryTerminated || !sidecarTerminated { + t.Errorf("Either the primary or sidecar containers did not terminate") + } + + trCheckSidecarStatus, err := clients.TaskRunClient.Get(sidecarTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun: %v", err) + } + + sidecarFromStatus := trCheckSidecarStatus.Status.Sidecars[0] + + // Check if Sidecar ContainerName is present for SidecarStatus + if sidecarFromStatus.ContainerName != fmt.Sprintf("sidecar-%s", sidecarContainerName) { + t.Errorf("Sidecar ContainerName should be: %s", sidecarContainerName) + } + + // Check if Terminated status is present for SidecarStatus + if trCheckSidecarStatus.Name == "sidecar-test-task-run-1" && sidecarFromStatus.Terminated == nil { + t.Errorf("TaskRunStatus: Sidecar container has a nil Terminated status but non-nil is expected.") + } else if trCheckSidecarStatus.Name == "sidecar-test-task-run-1" && sidecarFromStatus.Terminated.Reason != "Completed" { + t.Errorf("TaskRunStatus: Sidecar container has a nil Terminated reason of %s but should be Completed", sidecarFromStatus.Terminated.Reason) + } + }) + } +} diff --git a/test/v1alpha1/start_time_test.go b/test/v1alpha1/start_time_test.go new file mode 100644 index 00000000000..8dd8e742a70 --- /dev/null +++ b/test/v1alpha1/start_time_test.go @@ -0,0 +1,93 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package test + +import ( + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +// TestStartTime tests that step start times are reported accurately. +// +// It runs a TaskRun with 5 steps that each sleep 10 seconds, then checks that +// the reported step start times are 10+ seconds apart from each other. +// Scheduling and reporting specifics can result in start times being reported +// more than 10s apart, but they shouldn't be less than 10s apart. +func TestStartTime(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + t.Logf("Creating TaskRun in namespace %q", namespace) + tr, err := c.TaskRunClient.Create(&v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + GenerateName: "start-time-test-", + Namespace: namespace, + }, + Spec: v1alpha1.TaskRunSpec{ + TaskSpec: &v1alpha1.TaskSpec{TaskSpec: v1beta1.TaskSpec{ + Steps: []v1alpha1.Step{{ + Container: corev1.Container{Image: "ubuntu"}, + Script: "sleep 10", + }, { + Container: corev1.Container{Image: "ubuntu"}, + Script: "sleep 10", + }, { + Container: corev1.Container{Image: "ubuntu"}, + Script: "sleep 10", + }, { + Container: corev1.Container{Image: "ubuntu"}, + Script: "sleep 10", + }, { + Container: corev1.Container{Image: "ubuntu"}, + Script: "sleep 10", + }}, + }}, + }, + }) + if err != nil { + t.Fatalf("Error creating TaskRun: %v", err) + } + t.Logf("Created TaskRun %q in namespace %q", tr.Name, namespace) + // Wait for the TaskRun to complete. + if err := WaitForTaskRunState(c, tr.Name, TaskRunSucceed(tr.Name), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun to succeed: %v", err) + } + tr, err = c.TaskRunClient.Get(tr.Name, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting TaskRun: %v", err) + } + if got, want := len(tr.Status.Steps), len(tr.Spec.TaskSpec.Steps); got != want { + t.Errorf("Got unexpected number of step states: got %d, want %d", got, want) + } + var lastStart metav1.Time + for idx, s := range tr.Status.Steps { + if s.Terminated == nil { + t.Errorf("Step state %d was not terminated", idx) + continue + } + diff := s.Terminated.StartedAt.Time.Sub(lastStart.Time) + if diff < 10*time.Second { + t.Errorf("Step %d start time was %s since last start, wanted >10s", idx, diff) + } + lastStart = s.Terminated.StartedAt + } +} diff --git a/test/v1alpha1/status_test.go b/test/v1alpha1/status_test.go new file mode 100644 index 00000000000..31cd8791c5a --- /dev/null +++ b/test/v1alpha1/status_test.go @@ -0,0 +1,74 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "testing" + + tb "github.com/tektoncd/pipeline/test/builder" + knativetest "knative.dev/pkg/test" +) + +// TestTaskRunPipelineRunStatus is an integration test that will +// verify a very simple "hello world" TaskRun and PipelineRun failure +// execution lead to the correct TaskRun status. +func TestTaskRunPipelineRunStatus(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := tb.Task("banana", namespace, tb.TaskSpec( + tb.Step("busybox", tb.StepCommand("ls", "-la")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := tb.TaskRun("apple", namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef("banana"), tb.TaskRunServiceAccountName("inexistent"), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) + if err := WaitForTaskRunState(c, "apple", TaskRunFailed("apple"), "BuildValidationFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + pipeline := tb.Pipeline("tomatoes", namespace, + tb.PipelineSpec(tb.PipelineTask("foo", "banana")), + ) + pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec( + "tomatoes", tb.PipelineRunServiceAccountName("inexistent"), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", "tomatoes", err) + } + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", "pear", err) + } + + t.Logf("Waiting for PipelineRun in namespace %s to fail", namespace) + if err := WaitForPipelineRunState(c, "pear", pipelineRunTimeout, PipelineRunFailed("pear"), "BuildValidationFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } +} diff --git a/test/v1alpha1/taskrun_test.go b/test/v1alpha1/taskrun_test.go new file mode 100644 index 00000000000..7ac7a7fbd50 --- /dev/null +++ b/test/v1alpha1/taskrun_test.go @@ -0,0 +1,167 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +func TestTaskRunFailure(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + taskRunName := "failing-taskrun" + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := tb.Task("failing-task", namespace, tb.TaskSpec( + tb.Step("busybox", + tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "echo hello"), + ), + tb.Step("busybox", + tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "exit 1"), + ), + tb.Step("busybox", + tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 30s"), + ), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef("failing-task"), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) + if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "TaskRunFailed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + + expectedStepState := []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + Reason: "Completed", + }, + }, + Name: "unnamed-0", + ContainerName: "step-unnamed-0", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + Name: "unnamed-1", + ContainerName: "step-unnamed-1", + }, { + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + Name: "unnamed-2", + ContainerName: "step-unnamed-2", + }} + ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") + ignoreStepFields := cmpopts.IgnoreFields(v1alpha1.StepState{}, "ImageID") + if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { + t.Fatalf("-got, +want: %v", d) + } +} + +func TestTaskRunStatus(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + taskRunName := "status-taskrun" + + fqImageName := "busybox@sha256:895ab622e92e18d6b461d671081757af7dbaa3b00e3e28e12505af7817f73649" + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + task := tb.Task("status-task", namespace, tb.TaskSpec( + // This was the digest of the latest tag as of 8/12/2019 + tb.Step("busybox@sha256:895ab622e92e18d6b461d671081757af7dbaa3b00e3e28e12505af7817f73649", + tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "echo hello"), + ), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef("status-task"), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to fail", namespace) + if err := WaitForTaskRunState(c, taskRunName, TaskRunSucceed(taskRunName), "TaskRunSucceed"); err != nil { + t.Errorf("Error waiting for TaskRun to finish: %s", err) + } + + taskrun, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Couldn't get expected TaskRun %s: %s", taskRunName, err) + } + + expectedStepState := []v1alpha1.StepState{{ + ContainerState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 0, + Reason: "Completed", + }, + }, + Name: "unnamed-0", + ContainerName: "step-unnamed-0", + }} + + ignoreTerminatedFields := cmpopts.IgnoreFields(corev1.ContainerStateTerminated{}, "StartedAt", "FinishedAt", "ContainerID") + ignoreStepFields := cmpopts.IgnoreFields(v1alpha1.StepState{}, "ImageID") + if d := cmp.Diff(taskrun.Status.Steps, expectedStepState, ignoreTerminatedFields, ignoreStepFields); d != "" { + t.Fatalf("-got, +want: %v", d) + } + // Note(chmouel): Sometime we have docker-pullable:// or docker.io/library as prefix, so let only compare the suffix + if !strings.HasSuffix(taskrun.Status.Steps[0].ImageID, fqImageName) { + t.Fatalf("`ImageID: %s` does not end with `%s`", taskrun.Status.Steps[0].ImageID, fqImageName) + } +} diff --git a/test/v1alpha1/timeout_test.go b/test/v1alpha1/timeout_test.go new file mode 100644 index 00000000000..0da8cb4c1dc --- /dev/null +++ b/test/v1alpha1/timeout_test.go @@ -0,0 +1,273 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "sync" + "testing" + "time" + + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/tektoncd/pipeline/pkg/reconciler/pipelinerun/resources" + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "knative.dev/pkg/apis" + knativetest "knative.dev/pkg/test" +) + +// TestPipelineRunTimeout is an integration test that will +// verify that pipelinerun timeout works and leads to the the correct TaskRun statuses +// and pod deletions. +func TestPipelineRunTimeout(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task in namespace %s", namespace) + task := tb.Task("banana", namespace, tb.TaskSpec( + tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 10")))) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "banana", err) + } + + pipeline := tb.Pipeline("tomatoes", namespace, + tb.PipelineSpec(tb.PipelineTask("foo", "banana")), + ) + pipelineRun := tb.PipelineRun("pear", namespace, tb.PipelineRunSpec(pipeline.Name, + tb.PipelineRunTimeout(5*time.Second), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) + } + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) + if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err) + } + + taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", pipelineRun.Name, namespace) + errChan := make(chan error, len(taskrunList.Items)) + defer close(errChan) + + for _, taskrunItem := range taskrunList.Items { + go func(name string) { + err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + errChan <- err + }(taskrunItem.Name) + } + + for i := 1; i <= len(taskrunList.Items); i++ { + if <-errChan != nil { + t.Errorf("Error waiting for TaskRun %s to be running: %s", taskrunList.Items[i-1].Name, err) + } + } + + if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for PipelineRun %s in namespace %s to be timed out", pipelineRun.Name, namespace) + if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, FailedWithReason(resources.ReasonTimedOut, pipelineRun.Name), "PipelineRunTimedOut"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be cancelled", pipelineRun.Name, namespace) + var wg sync.WaitGroup + for _, taskrunItem := range taskrunList.Items { + wg.Add(1) + go func(name string) { + defer wg.Done() + err := WaitForTaskRunState(c, name, FailedWithReason("TaskRunTimeout", name), "TaskRunTimeout") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err) + } + }(taskrunItem.Name) + } + wg.Wait() + + if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) + } + + // Verify that we can create a second Pipeline using the same Task without a Pipeline-level timeout that will not + // time out + secondPipeline := tb.Pipeline("peppers", namespace, + tb.PipelineSpec(tb.PipelineTask("foo", "banana"))) + secondPipelineRun := tb.PipelineRun("kiwi", namespace, tb.PipelineRunSpec("peppers")) + if _, err := c.PipelineClient.Create(secondPipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", secondPipeline.Name, err) + } + if _, err := c.PipelineRunClient.Create(secondPipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", secondPipelineRun.Name, err) + } + + t.Logf("Waiting for PipelineRun %s in namespace %s to complete", secondPipelineRun.Name, namespace) + if err := WaitForPipelineRunState(c, secondPipelineRun.Name, timeout, PipelineRunSucceed(secondPipelineRun.Name), "PipelineRunSuccess"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to finish: %s", secondPipelineRun.Name, err) + } +} + +// TestTaskRunTimeout is an integration test that will verify a TaskRun can be timed out. +func TestTaskRunTimeout(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Task and TaskRun in namespace %s", namespace) + if _, err := c.TaskClient.Create(tb.Task("giraffe", namespace, + tb.TaskSpec(tb.Step("busybox", tb.StepCommand("/bin/sh"), tb.StepArgs("-c", "sleep 3000"))))); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", "giraffe", err) + } + if _, err := c.TaskRunClient.Create(tb.TaskRun("run-giraffe", namespace, tb.TaskRunSpec(tb.TaskRunTaskRef("giraffe"), + // Do not reduce this timeout. Taskrun e2e test is also verifying + // if reconcile is triggered from timeout handler and not by pod informers + tb.TaskRunTimeout(30*time.Second)))); err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", "run-giraffe", err) + } + + t.Logf("Waiting for TaskRun %s in namespace %s to complete", "run-giraffe", namespace) + if err := WaitForTaskRunState(c, "run-giraffe", FailedWithReason("TaskRunTimeout", "run-giraffe"), "TaskRunTimeout"); err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", "run-giraffe", err) + } +} + +func TestPipelineTaskTimeout(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + t.Logf("Creating Tasks in namespace %s", namespace) + task1 := tb.Task("success", namespace, tb.TaskSpec( + tb.Step("busybox", tb.StepCommand("sleep"), tb.StepArgs("1s")))) + + task2 := tb.Task("timeout", namespace, tb.TaskSpec( + tb.Step("busybox", tb.StepCommand("sleep"), tb.StepArgs("10s")))) + + if _, err := c.TaskClient.Create(task1); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", task1.Name, err) + } + if _, err := c.TaskClient.Create(task2); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", task2.Name, err) + } + + pipeline := tb.Pipeline("pipelinetasktimeout", namespace, + tb.PipelineSpec( + tb.PipelineTask("pipelinetask1", task1.Name, tb.PipelineTaskTimeout(60*time.Second)), + tb.PipelineTask("pipelinetask2", task2.Name, tb.PipelineTaskTimeout(5*time.Second)), + ), + ) + + pipelineRun := tb.PipelineRun("prtasktimeout", namespace, tb.PipelineRunSpec(pipeline.Name)) + + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", pipeline.Name, err) + } + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for Pipelinerun %s in namespace %s to be started", pipelineRun.Name, namespace) + if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, Running(pipelineRun.Name), "PipelineRunRunning"); err != nil { + t.Fatalf("Error waiting for PipelineRun %s to be running: %s", pipelineRun.Name, err) + } + + taskrunList, err := c.TaskRunClient.List(metav1.ListOptions{LabelSelector: fmt.Sprintf("tekton.dev/pipelineRun=%s", pipelineRun.Name)}) + if err != nil { + t.Fatalf("Error listing TaskRuns for PipelineRun %s: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for TaskRuns from PipelineRun %s in namespace %s to be running", pipelineRun.Name, namespace) + errChan := make(chan error, len(taskrunList.Items)) + defer close(errChan) + + for _, taskrunItem := range taskrunList.Items { + go func(name string) { + err := WaitForTaskRunState(c, name, Running(name), "TaskRunRunning") + errChan <- err + }(taskrunItem.Name) + } + + for i := 1; i <= len(taskrunList.Items); i++ { + if <-errChan != nil { + t.Errorf("Error waiting for TaskRun %s to be running: %s", taskrunList.Items[i-1].Name, err) + } + } + + if _, err := c.PipelineRunClient.Get(pipelineRun.Name, metav1.GetOptions{}); err != nil { + t.Fatalf("Failed to get PipelineRun `%s`: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for PipelineRun %s with PipelineTask timeout in namespace %s to fail", pipelineRun.Name, namespace) + if err := WaitForPipelineRunState(c, pipelineRun.Name, timeout, FailedWithReason(resources.ReasonFailed, pipelineRun.Name), "PipelineRunTimedOut"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", pipelineRun.Name, err) + } + + t.Logf("Waiting for TaskRun from PipelineRun %s in namespace %s to be timed out", pipelineRun.Name, namespace) + var wg sync.WaitGroup + for _, taskrunItem := range taskrunList.Items { + wg.Add(1) + go func(tr v1alpha1.TaskRun) { + defer wg.Done() + name := tr.Name + err := WaitForTaskRunState(c, name, func(ca apis.ConditionAccessor) (bool, error) { + cond := ca.GetCondition(apis.ConditionSucceeded) + if cond != nil { + if tr.Spec.TaskRef.Name == task1.Name && cond.Status == corev1.ConditionTrue { + if cond.Reason == "Succeeded" { + return true, nil + } + return true, fmt.Errorf("taskRun %q completed with the wrong reason: %s", task1.Name, cond.Reason) + } else if tr.Spec.TaskRef.Name == task1.Name && cond.Status == corev1.ConditionFalse { + return true, fmt.Errorf("taskRun %q failed, but should have been Succeeded", name) + } + + if tr.Spec.TaskRef.Name == task2.Name && cond.Status == corev1.ConditionFalse { + if cond.Reason == "TaskRunTimeout" { + return true, nil + } + return true, fmt.Errorf("taskRun %q completed with the wrong reason: %s", task2.Name, cond.Reason) + } else if tr.Spec.TaskRef.Name == task2.Name && cond.Status == corev1.ConditionTrue { + return true, fmt.Errorf("taskRun %q should have timed out", name) + } + } + return false, nil + }, "TaskRunTimeout") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to timeout: %s", name, err) + } + }(taskrunItem) + } + wg.Wait() +} diff --git a/test/v1alpha1/wait.go b/test/v1alpha1/wait.go new file mode 100644 index 00000000000..7a56c1b1d9b --- /dev/null +++ b/test/v1alpha1/wait.go @@ -0,0 +1,266 @@ +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Poll Pipeline resources + +After creating Pipeline resources or making changes to them, you will need to +wait for the system to realize those changes. You can use polling methods to +check the resources reach the desired state. + +The WaitFor* functions use the kubernetes +wait package (https://godoc.org/k8s.io/apimachinery/pkg/util/wait). To poll +they use +PollImmediate (https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate) +and the return values of the function you provide behave the same as +ConditionFunc (https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc): +a boolean to indicate if the function should stop or continue polling, and an +error to indicate if there has been an error. + + +For example, you can poll a TaskRun object to wait for it to have a Status.Condition: + + err = WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + if len(tr.Status.Conditions) > 0 { + return true, nil + } + return false, nil + }, "TaskRunHasCondition") + +*/ +package test + +import ( + "context" + "fmt" + "strings" + "time" + + "go.opencensus.io/trace" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "knative.dev/pkg/apis" +) + +const ( + interval = 1 * time.Second + timeout = 10 * time.Minute +) + +// ConditionAccessorFn is a condition function used polling functions +type ConditionAccessorFn func(ca apis.ConditionAccessor) (bool, error) + +// WaitForTaskRunState polls the status of the TaskRun called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForTaskRunState(c *clients, name string, inState ConditionAccessorFn, desc string) error { + metricName := fmt.Sprintf("WaitForTaskRunState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.TaskRunClient.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(&r.Status) + }) +} + +// WaitForDeploymentState polls the status of the Deployment called name +// from client every interval until inState returns `true` indicating it is done, +// returns an error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForDeploymentState(c *clients, name string, namespace string, inState func(d *appsv1.Deployment) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForDeploymentState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + d, err := c.KubeClient.Kube.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(d) + }) +} + +// WaitForPodState polls the status of the Pod called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForPodState(c *clients, name string, namespace string, inState func(r *corev1.Pod) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForPodState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} + +// WaitForPipelineRunState polls the status of the PipelineRun called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForPipelineRunState(c *clients, name string, polltimeout time.Duration, inState ConditionAccessorFn, desc string) error { + metricName := fmt.Sprintf("WaitForPipelineRunState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, polltimeout, func() (bool, error) { + r, err := c.PipelineRunClient.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(&r.Status) + }) +} + +// WaitForServiceExternalIPState polls the status of the a k8s Service called name from client every +// interval until an external ip is assigned indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForServiceExternalIPState(c *clients, namespace, name string, inState func(s *corev1.Service) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForServiceExternalIPState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.KubeClient.Kube.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} + +// Succeed provides a poll condition function that checks if the ConditionAccessor +// resource has sucessfully completed or not. +func Succeed(name string) ConditionAccessorFn { + return func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } else if c.Status == corev1.ConditionFalse { + return true, fmt.Errorf("%q failed", name) + } + } + return false, nil + } +} + +// Failed provides a poll condition function that checks if the ConditionAccessor +// resource has failed or not. +func Failed(name string) ConditionAccessorFn { + return func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, fmt.Errorf("%q succeeded", name) + } else if c.Status == corev1.ConditionFalse { + return true, nil + } + } + return false, nil + } +} + +// FailedWithReason provides a poll function that checks if the ConditionAccessor +// resource has failed with the TimeoudOut reason +func FailedWithReason(reason, name string) ConditionAccessorFn { + return func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionFalse { + if c.Reason == reason { + return true, nil + } + return true, fmt.Errorf("%q completed with the wrong reason: %s", name, c.Reason) + } else if c.Status == corev1.ConditionTrue { + return true, fmt.Errorf("%q completed successfully, should have been failed with reason %q", name, reason) + } + } + return false, nil + } +} + +// FailedWithMessage provides a poll function that checks if the ConditionAccessor +// resource has failed with the TimeoudOut reason +func FailedWithMessage(message, name string) ConditionAccessorFn { + return func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionFalse { + if strings.Contains(c.Message, message) { + return true, nil + } + return true, fmt.Errorf("%q completed with the wrong message: %s", name, c.Message) + } else if c.Status == corev1.ConditionTrue { + return true, fmt.Errorf("%q completed successfully, should have been failed with message %q", name, message) + } + } + return false, nil + } +} + +// Running provides a poll condition function that checks if the ConditionAccessor +// resource is currently running. +func Running(name string) ConditionAccessorFn { + return func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue || c.Status == corev1.ConditionFalse { + return true, fmt.Errorf(`%q already finished`, name) + } else if c.Status == corev1.ConditionUnknown && (c.Reason == "Running" || c.Reason == "Pending") { + return true, nil + } + } + return false, nil + } +} + +// TaskRunSucceed provides a poll condition function that checks if the TaskRun +// has successfully completed. +func TaskRunSucceed(name string) ConditionAccessorFn { + return Succeed(name) +} + +// TaskRunFailed provides a poll condition function that checks if the TaskRun +// has failed. +func TaskRunFailed(name string) ConditionAccessorFn { + return Failed(name) +} + +// PipelineRunSucceed provides a poll condition function that checks if the PipelineRun +// has successfully completed. +func PipelineRunSucceed(name string) ConditionAccessorFn { + return Succeed(name) +} + +// PipelineRunFailed provides a poll condition function that checks if the PipelineRun +// has failed. +func PipelineRunFailed(name string) ConditionAccessorFn { + return Failed(name) +} diff --git a/test/v1alpha1/wait_example_test.go b/test/v1alpha1/wait_example_test.go new file mode 100644 index 00000000000..968626971c9 --- /dev/null +++ b/test/v1alpha1/wait_example_test.go @@ -0,0 +1,69 @@ +// +build examples + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "time" + + corev1 "k8s.io/api/core/v1" + "knative.dev/pkg/apis" +) + +var ( + // This is a "hack" to make the example "look" like tests. + // Golang Example functions do not take `t *testing.T` as argument, so we "fake" + // it so that examples still compiles (`go test` tries to compile those) and look + // nice in the go documentation. + t testingT + c *clients +) + +type testingT interface { + Errorf(string, ...interface{}) +} + +func ExampleWaitForTaskRunState() { + // […] setup the test, get clients + if err := WaitForTaskRunState(c, "taskRunName", func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } + } + return false, nil + }, "TaskRunHasCondition"); err != nil { + t.Errorf("Error waiting for TaskRun taskRunName to finish: %s", err) + } +} + +func ExampleWaitForPipelineRunState() { + // […] setup the test, get clients + if err := WaitForPipelineRunState(c, "pipelineRunName", 1*time.Minute, func(ca apis.ConditionAccessor) (bool, error) { + c := ca.GetCondition(apis.ConditionSucceeded) + if c != nil { + if c.Status == corev1.ConditionTrue { + return true, nil + } + } + return false, nil + }, "PipelineRunHasCondition"); err != nil { + t.Errorf("Error waiting for PipelineRun pipelineRunName to finish: %s", err) + } +} diff --git a/test/wait_test.go b/test/v1alpha1/wait_test.go similarity index 100% rename from test/wait_test.go rename to test/v1alpha1/wait_test.go diff --git a/test/v1alpha1/workingdir_test.go b/test/v1alpha1/workingdir_test.go new file mode 100644 index 00000000000..3c91beb42f0 --- /dev/null +++ b/test/v1alpha1/workingdir_test.go @@ -0,0 +1,132 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" + + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +const ( + wdTaskName = "wd-task" + wdTaskRunName = "wd-task-run" +) + +func TestWorkingDirCreated(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(wdTaskName, namespace, tb.TaskSpec( + tb.Step("ubuntu", tb.StepWorkingDir("/workspace/HELLOMOTO"), tb.StepArgs("-c", "echo YES")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + t.Logf("Creating TaskRun namespace %s", namespace) + taskRun := tb.TaskRun(wdTaskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(wdTaskName), tb.TaskRunServiceAccountName("default"), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) + if err := WaitForTaskRunState(c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) + } + + tr, err := c.TaskRunClient.Get(wdTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + if tr.Status.PodName == "" { + t.Fatal("Error getting a PodName (empty)") + } + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + for _, stat := range p.Status.ContainerStatuses { + if strings.HasPrefix(stat.Name, "working-dir-initializer") { + if stat.State.Terminated != nil { + req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) + logContent, err := req.Do().Raw() + if err != nil { + t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) + } + if string(logContent) != "" { + t.Logf("Found some content in workingdir pod: %s, `%s` when it should be empty", tr.Status.PodName, logContent) + } + } + } + } +} + +func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { + c, namespace := setup(t) + t.Parallel() + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(wdTaskName, namespace, tb.TaskSpec( + tb.Step("ubuntu", tb.StepWorkingDir("/HELLOMOTO"), tb.StepArgs("-c", "echo YES")), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + t.Logf("Creating TaskRun namespace %s", namespace) + taskRun := tb.TaskRun(wdTaskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(wdTaskName), tb.TaskRunServiceAccountName("default"), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to finish successfully", namespace) + if err := WaitForTaskRunState(c, wdTaskRunName, TaskRunSucceed(wdTaskRunName), "TaskRunSuccess"); err != nil { + t.Errorf("Error waiting for TaskRun to finish successfully: %s", err) + } + + tr, err := c.TaskRunClient.Get(wdTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + for _, stat := range p.Status.ContainerStatuses { + if strings.HasPrefix(stat.Name, "working-dir-initializer") { + t.Logf("Found a working dir container called `%s` in `%s` when it should have been excluded:", stat.Name, tr.Status.PodName) + } + } + +} diff --git a/test/v1alpha1/workspace_test.go b/test/v1alpha1/workspace_test.go new file mode 100644 index 00000000000..d029090dab9 --- /dev/null +++ b/test/v1alpha1/workspace_test.go @@ -0,0 +1,168 @@ +// +build e2e + +/* +Copyright 2019 The Tekton Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "strings" + "testing" + "time" + + tb "github.com/tektoncd/pipeline/test/builder" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + knativetest "knative.dev/pkg/test" +) + +func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { + c, namespace := setup(t) + + taskName := "write-disallowed" + taskRunName := "write-disallowed-tr" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("echo foo > /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( + tb.TaskRunTaskRef(taskName), tb.TaskRunServiceAccountName("default"), + tb.TaskRunWorkspaceEmptyDir("test", ""), + )) + if _, err := c.TaskRunClient.Create(taskRun); err != nil { + t.Fatalf("Failed to create TaskRun: %s", err) + } + + t.Logf("Waiting for TaskRun in namespace %s to finish", namespace) + if err := WaitForTaskRunState(c, taskRunName, TaskRunFailed(taskRunName), "error"); err != nil { + t.Errorf("Error waiting for TaskRun to finish with error: %s", err) + } + + tr, err := c.TaskRunClient.Get(taskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Error retrieving taskrun: %s", err) + } + if tr.Status.PodName == "" { + t.Fatal("Error getting a PodName (empty)") + } + p, err := c.KubeClient.Kube.CoreV1().Pods(namespace).Get(tr.Status.PodName, metav1.GetOptions{}) + + if err != nil { + t.Fatalf("Error getting pod `%s` in namespace `%s`", tr.Status.PodName, namespace) + } + for _, stat := range p.Status.ContainerStatuses { + if strings.Contains(stat.Name, "step-attempt-write") { + req := c.KubeClient.Kube.CoreV1().Pods(namespace).GetLogs(p.Name, &corev1.PodLogOptions{Container: stat.Name}) + logContent, err := req.Do().Raw() + if err != nil { + t.Fatalf("Error getting pod logs for pod `%s` and container `%s` in namespace `%s`", tr.Status.PodName, stat.Name, namespace) + } + if !strings.Contains(string(logContent), "Read-only file system") { + t.Fatalf("Expected read-only file system error but received %v", logContent) + } + } + } +} + +func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { + c, namespace := setup(t) + + taskName := "read-workspace" + pipelineName := "read-workspace-pipeline" + pipelineRunName := "read-workspace-pipelinerun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pipelineRun := tb.PipelineRun(pipelineRunName, namespace, + tb.PipelineRunSpec( + pipelineName, + // These are the duplicated workspace entries that are being tested. + tb.PipelineRunWorkspaceBindingEmptyDir("foo"), + tb.PipelineRunWorkspaceBindingEmptyDir("foo"), + ), + ) + _, err := c.PipelineRunClient.Create(pipelineRun) + + if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { + t.Fatalf("Expected error when creating pipelinerun with duplicate workspace entries but received: %v", err) + } +} + +func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { + c, namespace := setup(t) + + taskName := "read-workspace" + pipelineName := "read-workspace-pipeline" + pipelineRunName := "read-workspace-pipelinerun" + + knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) + defer tearDown(t, c, namespace) + + task := tb.Task(taskName, namespace, tb.TaskSpec( + tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), + tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), + )) + if _, err := c.TaskClient.Create(task); err != nil { + t.Fatalf("Failed to create Task: %s", err) + } + + pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( + tb.PipelineWorkspaceDeclaration("foo"), + tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), + )) + if _, err := c.PipelineClient.Create(pipeline); err != nil { + t.Fatalf("Failed to create Pipeline: %s", err) + } + + pipelineRun := tb.PipelineRun(pipelineRunName, namespace, + tb.PipelineRunSpec( + pipelineName, + ), + ) + if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun: %s", err) + } + + if err := WaitForPipelineRunState(c, pipelineRunName, 10*time.Second, FailedWithMessage(`pipeline expects workspace with name "foo" be provided by pipelinerun`, pipelineRunName), "PipelineRunHasCondition"); err != nil { + t.Fatalf("Failed to wait for PipelineRun %q to finish: %s", pipelineRunName, err) + } + +} diff --git a/test/workingdir_test.go b/test/workingdir_test.go index 3c91beb42f0..f828f5794cc 100644 --- a/test/workingdir_test.go +++ b/test/workingdir_test.go @@ -22,7 +22,7 @@ import ( "strings" "testing" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" @@ -40,17 +40,28 @@ func TestWorkingDirCreated(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(wdTaskName, namespace, tb.TaskSpec( - tb.Step("ubuntu", tb.StepWorkingDir("/workspace/HELLOMOTO"), tb.StepArgs("-c", "echo YES")), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: wdTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + WorkingDir: "/workspace/HELLOMOTO", + Args: []string{"-c", "echo YES"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } t.Logf("Creating TaskRun namespace %s", namespace) - taskRun := tb.TaskRun(wdTaskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(wdTaskName), tb.TaskRunServiceAccountName("default"), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: wdTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: wdTaskName}, + ServiceAccountName: "default", + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } @@ -94,17 +105,28 @@ func TestWorkingDirIgnoredNonSlashWorkspace(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(wdTaskName, namespace, tb.TaskSpec( - tb.Step("ubuntu", tb.StepWorkingDir("/HELLOMOTO"), tb.StepArgs("-c", "echo YES")), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: wdTaskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{Container: corev1.Container{ + Image: "ubuntu", + WorkingDir: "/HELLOMOTO", + Args: []string{"-c", "echo YES"}, + }}}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } t.Logf("Creating TaskRun namespace %s", namespace) - taskRun := tb.TaskRun(wdTaskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(wdTaskName), tb.TaskRunServiceAccountName("default"), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: wdTaskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: wdTaskName}, + ServiceAccountName: "default", + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } diff --git a/test/workspace_test.go b/test/workspace_test.go index c0e5ef5307d..78e4573a43c 100644 --- a/test/workspace_test.go +++ b/test/workspace_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - tb "github.com/tektoncd/pipeline/test/builder" + "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" knativetest "knative.dev/pkg/test" @@ -38,18 +38,36 @@ func TestWorkspaceReadOnlyDisallowsWrite(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(taskName, namespace, tb.TaskSpec( - tb.Step("alpine", tb.StepScript("echo foo > /workspace/test/file")), - tb.TaskWorkspace("test", "test workspace", "/workspace/test", true), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Image: "alpine"}, + Script: "echo foo > /workspace/test/file", + }}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test", + ReadOnly: true, + }}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - taskRun := tb.TaskRun(taskRunName, namespace, tb.TaskRunSpec( - tb.TaskRunTaskRef(taskName), tb.TaskRunServiceAccountName("default"), - tb.TaskRunWorkspaceEmptyDir("test", ""), - )) + taskRun := &v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{Name: taskRunName, Namespace: namespace}, + Spec: v1beta1.TaskRunSpec{ + TaskRef: &v1beta1.TaskRef{Name: taskName}, + ServiceAccountName: "default", + Workspaces: []v1beta1.WorkspaceBinding{{ + Name: "test", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + } if _, err := c.TaskRunClient.Create(taskRun); err != nil { t.Fatalf("Failed to create TaskRun: %s", err) } @@ -95,30 +113,58 @@ func TestWorkspacePipelineRunDuplicateWorkspaceEntriesInvalid(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(taskName, namespace, tb.TaskSpec( - tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), - tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Image: "alpine"}, + Script: "cat /workspace/test/file", + }}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test/file", + ReadOnly: true, + }}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Workspaces: []v1beta1.WorkspacePipelineDeclaration{{ + Name: "foo", + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "task1", + TaskRef: &v1beta1.TaskRef{Name: taskName}, + Workspaces: []v1beta1.WorkspacePipelineTaskBinding{{ + Name: "test", + Workspace: "foo", + }}, + }}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } - pipelineRun := tb.PipelineRun(pipelineRunName, namespace, - tb.PipelineRunSpec( - pipelineName, - // These are the duplicated workspace entries that are being tested. - tb.PipelineRunWorkspaceBindingEmptyDir("foo"), - tb.PipelineRunWorkspaceBindingEmptyDir("foo"), - ), - ) + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipelineName}, + Workspaces: []v1beta1.WorkspaceBinding{{ + Name: "foo", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, { + Name: "foo", + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }}, + }, + } _, err := c.PipelineRunClient.Create(pipelineRun) if err == nil || !strings.Contains(err.Error(), "provided by pipelinerun more than once") { @@ -136,27 +182,51 @@ func TestWorkspacePipelineRunMissingWorkspaceInvalid(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(t, c, namespace) }, t.Logf) defer tearDown(t, c, namespace) - task := tb.Task(taskName, namespace, tb.TaskSpec( - tb.Step("alpine", tb.StepScript("cat /workspace/test/file")), - tb.TaskWorkspace("test", "test workspace", "/workspace/test/file", true), - )) + task := &v1beta1.Task{ + ObjectMeta: metav1.ObjectMeta{Name: taskName, Namespace: namespace}, + Spec: v1beta1.TaskSpec{ + Steps: []v1beta1.Step{{ + Container: corev1.Container{Image: "alpine"}, + Script: "cat /workspace/test/file", + }}, + Workspaces: []v1beta1.WorkspaceDeclaration{{ + Name: "test", + Description: "test workspace", + MountPath: "/workspace/test/file", + ReadOnly: true, + }}, + }, + } if _, err := c.TaskClient.Create(task); err != nil { t.Fatalf("Failed to create Task: %s", err) } - pipeline := tb.Pipeline(pipelineName, namespace, tb.PipelineSpec( - tb.PipelineWorkspaceDeclaration("foo"), - tb.PipelineTask("task1", taskName, tb.PipelineTaskWorkspaceBinding("test", "foo")), - )) + pipeline := &v1beta1.Pipeline{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineName, Namespace: namespace}, + Spec: v1beta1.PipelineSpec{ + Workspaces: []v1beta1.WorkspacePipelineDeclaration{{ + Name: "foo", + }}, + Tasks: []v1beta1.PipelineTask{{ + Name: "task1", + TaskRef: &v1beta1.TaskRef{Name: taskName}, + Workspaces: []v1beta1.WorkspacePipelineTaskBinding{{ + Name: "test", + Workspace: "foo", + }}, + }}, + }, + } if _, err := c.PipelineClient.Create(pipeline); err != nil { t.Fatalf("Failed to create Pipeline: %s", err) } - pipelineRun := tb.PipelineRun(pipelineRunName, namespace, - tb.PipelineRunSpec( - pipelineName, - ), - ) + pipelineRun := &v1beta1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{Name: pipelineRunName, Namespace: namespace}, + Spec: v1beta1.PipelineRunSpec{ + PipelineRef: &v1beta1.PipelineRef{Name: pipelineName}, + }, + } if _, err := c.PipelineRunClient.Create(pipelineRun); err != nil { t.Fatalf("Failed to create PipelineRun: %s", err) }