diff --git a/README.md b/README.md index c4a2f5f6c3c..6a9622e2fbf 100644 --- a/README.md +++ b/README.md @@ -27,7 +27,7 @@ Tekton Pipelines are **Typed**: ## Want to start using Pipelines - [Installing Tekton Pipelines](docs/install.md) -- Jump in with [the tutorial!](docs/tutorial.md) +- Jump in with [the "Getting started" tutorial!](https://tekton.dev/docs/getting-started/tasks/) - Take a look at our [roadmap](roadmap.md) ### Required Kubernetes Version @@ -42,6 +42,7 @@ Tekton Pipelines are **Typed**: | Version | Docs | Examples | |------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------| | [HEAD](DEVELOPMENT.md#install-pipeline) | [Docs @ HEAD](/docs/README.md) | [Examples @ HEAD](/examples) | +| [v0.35.0](https://github.com/tektoncd/pipeline/releases/tag/v0.35.0) | [Docs @ v0.35.0](https://github.com/tektoncd/pipeline/tree/v0.35.0/docs#tekton-pipelines) | [Examples @ v0.35.0](https://github.com/tektoncd/pipeline/tree/v0.35.0/examples#examples) | | [v0.34.1](https://github.com/tektoncd/pipeline/releases/tag/v0.34.1) | [Docs @ v0.34.1](https://github.com/tektoncd/pipeline/tree/v0.34.1/docs#tekton-pipelines) | [Examples @ v0.34.1](https://github.com/tektoncd/pipeline/tree/v0.34.1/examples#examples) | | [v0.34.0](https://github.com/tektoncd/pipeline/releases/tag/v0.34.0) | [Docs @ v0.34.0](https://github.com/tektoncd/pipeline/tree/v0.34.0/docs#tekton-pipelines) | [Examples @ v0.34.0](https://github.com/tektoncd/pipeline/tree/v0.34.0/examples#examples) | | [v0.33.4](https://github.com/tektoncd/pipeline/releases/tag/v0.33.4) | [Docs @ v0.33.4](https://github.com/tektoncd/pipeline/tree/v0.33.4/docs#tekton-pipelines) | [Examples @ v0.33.4](https://github.com/tektoncd/pipeline/tree/v0.33.4/examples#examples) | diff --git a/cmd/git-init/main.go b/cmd/git-init/main.go index b01d0380a1b..1295a5b651d 100644 --- a/cmd/git-init/main.go +++ b/cmd/git-init/main.go @@ -62,7 +62,7 @@ func main() { output := []v1beta1.PipelineResourceResult{ { Key: "commit", - Value: commit, + Value: *v1beta1.NewArrayOrString(commit), ResourceRef: &v1beta1.PipelineResourceRef{ Name: resourceName, }, @@ -70,7 +70,7 @@ func main() { }, { Key: "url", - Value: fetchSpec.URL, + Value: *v1beta1.NewArrayOrString(fetchSpec.URL), ResourceRef: &v1beta1.PipelineResourceRef{ Name: resourceName, }, diff --git a/cmd/imagedigestexporter/main.go b/cmd/imagedigestexporter/main.go index 53db04d9a6a..25c077acd31 100644 --- a/cmd/imagedigestexporter/main.go +++ b/cmd/imagedigestexporter/main.go @@ -65,7 +65,7 @@ func main() { } output = append(output, v1beta1.PipelineResourceResult{ Key: "digest", - Value: digest.String(), + Value: *v1beta1.NewArrayOrString(digest.String()), ResourceName: imageResource.Name, ResourceRef: &v1beta1.PipelineResourceRef{ Name: imageResource.Name, @@ -73,7 +73,7 @@ func main() { }) output = append(output, v1beta1.PipelineResourceResult{ Key: "url", - Value: imageResource.URL, + Value: *v1beta1.NewArrayOrString(imageResource.URL), ResourceName: imageResource.Name, ResourceRef: &v1beta1.PipelineResourceRef{ Name: imageResource.Name, diff --git a/docs/tutorial.md b/docs/tutorial.md index 2cb2e51cef7..7c4bf373212 100644 --- a/docs/tutorial.md +++ b/docs/tutorial.md @@ -1,578 +1,7 @@ # Tekton Pipelines Tutorial -This tutorial uses a simple `Hello World` example to show you how to: -- Create a `Task` -- Create a `Pipeline` containing your `Tasks` -- Use a `TaskRun` to instantiate and execute a `Task` outside of a `Pipeline` -- Use a `PipelineRun` to instantiate and run a `Pipeline` containing your `Tasks` - -This tutorial consists of the following sections: - -- [Creating and running a `Task`](#creating-and-running-a-task) -- [Creating and running a `Pipeline`](#creating-and-running-a-pipeline) - -**Note:** Items requiring configuration are marked with the `#configure` annotation. -This includes Docker registries, log output locations, and other configuration items -specific to a given cloud computing service. - -> :warning: **`PipelineResources` are [deprecated](deprecations.md#deprecation-table)**, but this tutorial uses them. -> -> Read more about the deprecation in [TEP-0074](https://github.com/tektoncd/community/blob/main/teps/0074-deprecate-pipelineresources.md). -> This tutorial will be updated to use the replacement features instead - https://github.com/tektoncd/pipeline/issues/3705. -> - -## Before you begin - -Before you begin this tutorial, make sure you have [installed and configured](https://github.com/tektoncd/pipeline/blob/main/docs/install.md) -the latest release of Tekton on your Kubernetes cluster, including the -[Tekton CLI](https://github.com/tektoncd/cli). - -If you would like to complete this tutorial on your local workstation, see [Running this tutorial locally](#running-this-tutorial-locally). To learn more about the Tekton entities involved in this tutorial, see [Further reading](#further-reading). - -## Creating and running a `Task` - -A [`Task`](tasks.md) defines a series of `steps` that run in a desired order and complete a set amount of build work. Every `Task` runs as a Pod on your Kubernetes cluster with each `step` as its own container. For example, the following `Task` outputs "Hello World": - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: echo-hello-world -spec: - steps: - - name: echo - image: ubuntu - command: - - echo - args: - - "Hello World" -``` - -Apply your `Task` YAML file as follows: - -```bash -kubectl apply -f -``` - -To see details about your created `Task`, use the following command: -```bash -tkn task describe echo-hello-world -``` - -The output will look similar to the following: - -``` -Name: echo-hello-world -Namespace: default - -📨 Input Resources - - No input resources - -📡 Output Resources - - No output resources - -⚓ Params - - No params - -🦶 Steps - - ∙ echo - -🗂 Taskruns - - No taskruns -``` - -To run this `Task`, instantiate it using a [`TaskRun`](taskruns.md): - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: TaskRun -metadata: - name: echo-hello-world-task-run -spec: - taskRef: - name: echo-hello-world -``` - -Apply your `TaskRun` YAML file as follows: - -```bash -kubectl apply -f -``` - -To check whether running your `TaskRun` succeeded, use the following command: - -```bash -tkn taskrun describe echo-hello-world-task-run -``` - -The output will look similar to the following: - -``` -Name: echo-hello-world-task-run -Namespace: default -Task Ref: echo-hello-world - -Status -STARTED DURATION STATUS -4 minutes ago 9 seconds Succeeded - -Input Resources -No resources - -Output Resources -No resources - -Params -No params - -Steps -NAME -echo -``` - -The `Succeeded` status confirms that the `TaskRun` completed with no errors. - -To see more detail about the execution of your `TaskRun`, view its logs as follows: - -```bash -tkn taskrun logs echo-hello-world-task-run -``` - -The output will look similar to the following: - -``` -[echo] hello world -``` - -### Specifying `Task` inputs and outputs - -In more complex scenarios, a `Task` requires you to define inputs and outputs. For example, a -`Task` could fetch source code from a GitHub repository and build a Docker image from it. - -Use one or more [`PipelineResources`](resources.md) to define the artifacts you want to pass in -and out of your `Task`. The following are examples of the most commonly needed resources. - -The [`git` resource](resources.md#git-resource) specifies a git repository with -a specific revision from which the `Task` will pull the source code: - -```yaml -apiVersion: tekton.dev/v1alpha1 -kind: PipelineResource -metadata: - name: skaffold-git -spec: - type: git - params: - - name: revision - value: main - - name: url - value: https://github.com/GoogleContainerTools/skaffold #configure: change if you want to build something else, perhaps from your own local git repository. -``` - -The [`image` resource](resources.md#image-resource) specifies the repository to which the image built by the `Task` will be pushed: - -```yaml -apiVersion: tekton.dev/v1alpha1 -kind: PipelineResource -metadata: - name: skaffold-image-leeroy-web -spec: - type: image - params: - - name: url - value: gcr.io//leeroy-web #configure: replace with where the image should go: perhaps your local registry or Dockerhub with a secret and configured service account -``` - -In the following example, you can see a `Task` definition with the `git` input and `image` output -introduced earlier. The arguments of the `Task` command support variable substitution so that -the `Task` definition is constant and the value of parameters can change during runtime. - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: build-docker-image-from-git-source -spec: - params: - - name: pathToDockerFile - type: string - description: The path to the dockerfile to build - default: $(resources.inputs.docker-source.path)/Dockerfile - - name: pathToContext - type: string - description: | - The build context used by Kaniko - (https://github.com/GoogleContainerTools/kaniko#kaniko-build-contexts) - default: $(resources.inputs.docker-source.path) - resources: - inputs: - - name: docker-source - type: git - outputs: - - name: builtImage - type: image - steps: - - name: build-and-push - image: gcr.io/kaniko-project/executor:latest - # specifying DOCKER_CONFIG is required to allow kaniko to detect docker credential - env: - - name: "DOCKER_CONFIG" - value: "/tekton/home/.docker/" - command: - - /kaniko/executor - args: - - --dockerfile=$(params.pathToDockerFile) - - --destination=$(resources.outputs.builtImage.url) - - --context=$(params.pathToContext) - - --build-arg=BASE=alpine:3 -``` - -### Configuring `Task` execution credentials - -Before you can execute your `TaskRun`, you must create a `secret` to push your image -to your desired image registry: - -```bash -kubectl create secret docker-registry regcred \ - --docker-server= \ - --docker-username= \ - --docker-password= \ - --docker-email= -``` - -You must also specify a `ServiceAccount` that uses this `secret` to execute your `TaskRun`: - -```yaml -apiVersion: v1 -kind: ServiceAccount -metadata: - name: tutorial-service -secrets: - - name: regcred -``` - -Save the `ServiceAccount` definition above to a file and apply the YAML file to make the `ServiceAccount` available for your `TaskRun`: - -```bash -kubectl apply -f -``` - -### Running your `Task` - -You are now ready for your first `TaskRun`! - -A `TaskRun` binds the inputs and outputs to already defined `PipelineResources`, sets values -for variable substitution parameters, and executes the `Steps` in the `Task`. - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: TaskRun -metadata: - name: build-docker-image-from-git-source-task-run -spec: - serviceAccountName: tutorial-service - taskRef: - name: build-docker-image-from-git-source - params: - - name: pathToDockerFile - value: Dockerfile - - name: pathToContext - value: $(resources.inputs.docker-source.path)/examples/microservices/leeroy-web #configure: may change according to your source - resources: - inputs: - - name: docker-source - resourceRef: - name: skaffold-git - outputs: - - name: builtImage - resourceRef: - name: skaffold-image-leeroy-web -``` - -Save the YAML files that contain your `Task`, `TaskRun`, and `PipelineResource` definitions and apply them using the following command: - -```bash -kubectl apply -f -``` - -To examine the resources you've created so far, use the following command: - -```bash -kubectl get tekton-pipelines -``` - -The output will look similar to the following: - -``` -NAME AGE -taskruns/build-docker-image-from-git-source-task-run 30s - -NAME AGE -pipelineresources/skaffold-git 6m -pipelineresources/skaffold-image-leeroy-web 7m - -NAME AGE -tasks/build-docker-image-from-git-source 7m -``` - -To see the result of executing your `TaskRun`, use the following command: - -```bash -tkn taskrun describe build-docker-image-from-git-source-task-run -``` - -The output will look similar to the following: - -``` -Name: build-docker-image-from-git-source-task-run -Namespace: default -Task Ref: build-docker-image-from-git-source - -Status -STARTED DURATION STATUS -2 hours ago 56 seconds Succeeded - -Input Resources -NAME RESOURCE REF -docker-source skaffold-git - -Output Resources -NAME RESOURCE REF -builtImage skaffold-image-leeroy-web - -Params -NAME VALUE -pathToDockerFile Dockerfile -pathToContext /workspace/docker-source/examples/microservices/leeroy-web - -Steps -NAME -build-and-push -create-dir-builtimage-wtjh9 -git-source-skaffold-git-tck6k -image-digest-exporter-hlbsq -``` - -The `Succeeded` status indicates the `Task` has completed with no errors. You -can also confirm that the output Docker image has been created in the location specified in the resource definition. - -To view detailed information about the execution of your `TaskRun`, view the logs: - -```bash -tkn taskrun logs build-docker-image-from-git-source-task-run -``` - -## Creating and running a `Pipeline` - -A [`Pipeline`](pipelines.md) defines an ordered series of `Tasks` that you want to execute -along with the corresponding inputs and outputs for each `Task`. You can specify whether the output of one -`Task` is used as an input for the next `Task` using the [`from`](pipelines.md#from) property. -`Pipelines` offer the same variable substitution as `Tasks`. - -Below is an example definition of a `Pipeline`: - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: Pipeline -metadata: - name: tutorial-pipeline -spec: - resources: - - name: source-repo - type: git - - name: web-image - type: image - tasks: - - name: build-skaffold-web - taskRef: - name: build-docker-image-from-git-source - params: - - name: pathToDockerFile - value: Dockerfile - - name: pathToContext - value: /workspace/docker-source/examples/microservices/leeroy-web #configure: may change according to your source - resources: - inputs: - - name: docker-source - resource: source-repo - outputs: - - name: builtImage - resource: web-image - - name: deploy-web - taskRef: - name: deploy-using-kubectl - resources: - inputs: - - name: source - resource: source-repo - - name: image - resource: web-image - from: - - build-skaffold-web - params: - - name: path - value: /workspace/source/examples/microservices/leeroy-web/kubernetes/deployment.yaml #configure: may change according to your source - - name: yamlPathToImage - value: "spec.template.spec.containers[0].image" -``` - -The above `Pipeline` is referencing a `Task` called `deploy-using-kubectl` defined as follows: - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: Task -metadata: - name: deploy-using-kubectl -spec: - params: - - name: path - type: string - description: Path to the manifest to apply - - name: yamlPathToImage - type: string - description: | - The path to the image to replace in the yaml manifest (arg to yq) - resources: - inputs: - - name: source - type: git - - name: image - type: image - steps: - - name: replace-image - image: mikefarah/yq:3.4.1 - command: ["yq"] - args: - - "w" - - "-i" - - "$(params.path)" - - "$(params.yamlPathToImage)" - - "$(resources.inputs.image.url)" - - name: run-kubectl - image: lachlanevenson/k8s-kubectl - command: ["kubectl"] - args: - - "apply" - - "-f" - - "$(params.path)" -``` - -### Configuring `Pipeline` execution credentials - -The `run-kubectl` step in the above example requires additional permissions. You must grant those -permissions to your `ServiceAccount`. - -First, create a new role called `tutorial-role`: - -```bash -kubectl create clusterrole tutorial-role \ - --verb=* \ - --resource=deployments,deployments.apps -``` - -Next, assign this new role to your `ServiceAccount`: - -```bash -kubectl create clusterrolebinding tutorial-binding \ - --clusterrole=tutorial-role \ - --serviceaccount=default:tutorial-service -``` - -To run your `Pipeline`, instantiate it with a [`PipelineRun`](pipelineruns.md) as follows: - -```yaml -apiVersion: tekton.dev/v1beta1 -kind: PipelineRun -metadata: - name: tutorial-pipeline-run-1 -spec: - serviceAccountName: tutorial-service - pipelineRef: - name: tutorial-pipeline - resources: - - name: source-repo - resourceRef: - name: skaffold-git - - name: web-image - resourceRef: - name: skaffold-image-leeroy-web -``` - -The `PipelineRun` automatically defines a corresponding `TaskRun` for each `Task` you have defined -in your `Pipeline` collects the results of executing each `TaskRun`. In our example, the -`TaskRun` order is as follows: - -1. `tutorial-pipeline-run-1-build-skaffold-web` runs `build-skaffold-web`, - since it has no [`from` or `runAfter` clauses](pipelines.md#ordering). -1. `tutorial-pipeline-run-1-deploy-web` runs `deploy-web` because - its [input](tasks.md#inputs) `web-image` comes [`from`](pipelines.md#from) - `build-skaffold-web`. Thus, `build-skaffold-web` must run before `deploy-web`. - -Save the `Task`, `Pipeline`, and `PipelineRun` definitions above to as YAML files and apply them using the following command: - -```bash -kubectl apply -f -``` -**Note:** Also apply the `deploy-task` or the `PipelineRun` will not execute. - -You can monitor the execution of your `PipelineRun` in realtime as follows: - -```bash -tkn pipelinerun logs tutorial-pipeline-run-1 -f -``` - -To view detailed information about your `PipelineRun`, use the following command: - -```bash -tkn pipelinerun describe tutorial-pipeline-run-1 -``` - -The output will look similar to the following: - -```bash -Name: tutorial-pipeline-run-1 -Namespace: default -Pipeline Ref: tutorial-pipeline - -Status -STARTED DURATION STATUS -4 hours ago 1 minute Succeeded - -Resources -NAME RESOURCE REF -source-repo skaffold-git -web-image skaffold-image-leeroy-web - -Params -No params - -Taskruns -NAME TASK NAME STARTED DURATION STATUS -tutorial-pipeline-run-1-deploy-web deploy-web 4 hours ago 14 seconds Succeeded -tutorial-pipeline-run-1-build-skaffold-web build-skaffold-web 4 hours ago 1 minute Succeeded -``` - -The `Succeeded` status indicates that your `PipelineRun` completed without errors. -You can also see the statuses of the individual `TaskRuns`. - -## Running this tutorial locally - -To run this tutorial on your local workstation, see the docs for [setting up your local environment](developers/local-setup.md). - -## Further reading - -To learn more about the Tekton Pipelines entities involved in this tutorial, see the following topics: - -- [`Tasks`](tasks.md) -- [`TaskRuns`](taskruns.md) -- [`Pipelines`](pipelines.md) -- [`PipelineResources`](resources.md) -- [`PipelineRuns`](pipelineruns.md) +Check our ["Hello Word" example on our documentation +website](https://tekton.dev/docs/getting-started/). --- diff --git a/examples/v1beta1/pipelineruns/pipelinerun-with-final-tasks.yaml b/examples/v1beta1/pipelineruns/pipelinerun-with-final-tasks.yaml index 2ae4acef801..efc826fabb0 100644 --- a/examples/v1beta1/pipelineruns/pipelinerun-with-final-tasks.yaml +++ b/examples/v1beta1/pipelineruns/pipelinerun-with-final-tasks.yaml @@ -58,6 +58,8 @@ spec: steps: - name: clone image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:latest + securityContext: + runAsUser: 0 # This needs root, and git-init is nonroot by default script: | CHECKOUT_DIR="$(workspaces.output.path)/$(params.subdirectory)" diff --git a/examples/v1beta1/pipelineruns/pipelinerun.yaml b/examples/v1beta1/pipelineruns/pipelinerun.yaml index 210c27425bf..e650c220151 100644 --- a/examples/v1beta1/pipelineruns/pipelinerun.yaml +++ b/examples/v1beta1/pipelineruns/pipelinerun.yaml @@ -81,6 +81,8 @@ spec: steps: - name: clone image: gcr.io/tekton-releases/github.com/tektoncd/pipeline/cmd/git-init:latest + securityContext: + runAsUser: 0 # This needs root, and git-init is nonroot by default script: | CHECKOUT_DIR="$(workspaces.output.path)/$(params.subdirectory)" cleandir() { diff --git a/pkg/apis/pipeline/v1beta1/resource_types.go b/pkg/apis/pipeline/v1beta1/resource_types.go index 5b07109e857..1898acbc0ad 100644 --- a/pkg/apis/pipeline/v1beta1/resource_types.go +++ b/pkg/apis/pipeline/v1beta1/resource_types.go @@ -128,9 +128,9 @@ type PipelineResourceBinding struct { // PipelineResourceResult used to export the image name and digest as json type PipelineResourceResult struct { - Key string `json:"key"` - Value string `json:"value"` - ResourceName string `json:"resourceName,omitempty"` + Key string `json:"key"` + Value ArrayOrString `json:"value"` + ResourceName string `json:"resourceName,omitempty"` // The field ResourceRef should be deprecated and removed in the next API version. // See https://github.com/tektoncd/pipeline/issues/2694 for more information. ResourceRef *PipelineResourceRef `json:"resourceRef,omitempty"` diff --git a/pkg/apis/pipeline/v1beta1/resource_types_test.go b/pkg/apis/pipeline/v1beta1/resource_types_test.go index ef481daa87d..1e70ae41f2a 100644 --- a/pkg/apis/pipeline/v1beta1/resource_types_test.go +++ b/pkg/apis/pipeline/v1beta1/resource_types_test.go @@ -154,16 +154,20 @@ func TestPipelineResourceResult_UnmarshalJSON(t *testing.T) { }{{ name: "type defined as string - TaskRunResult", data: "{\"key\":\"resultName\",\"value\":\"resultValue\", \"type\": \"TaskRunResult\"}", - pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: "resultValue", ResultType: v1beta1.TaskRunResultType}, + pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: v1beta1.ArrayOrString{StringVal: "resultValue", Type: "string"}, ResultType: v1beta1.TaskRunResultType}, }, { name: "type defined as string - InternalTektonResult", data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": \"InternalTektonResult\"}", - pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: "", ResultType: v1beta1.InternalTektonResultType}, + pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: v1beta1.ArrayOrString{StringVal: "", Type: "string"}, ResultType: v1beta1.InternalTektonResultType}, + }, { + name: "type defined as array - InternalTektonResult", + data: "{\"key\":\"resultName\",\"value\":[\"cat\",\"dog\",\"squirrel\"], \"type\": \"InternalTektonResult\"}", + pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: v1beta1.ArrayOrString{ArrayVal: []string{"cat", "dog", "squirrel"}, Type: "array"}, ResultType: v1beta1.InternalTektonResultType}, }, { name: "type defined as int", data: "{\"key\":\"resultName\",\"value\":\"\", \"type\": 1}", - pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: "", ResultType: v1beta1.TaskRunResultType}, + pr: v1beta1.PipelineResourceResult{Key: "resultName", Value: v1beta1.ArrayOrString{StringVal: "", Type: "string"}, ResultType: v1beta1.TaskRunResultType}, }} for _, tc := range testcases { diff --git a/pkg/apis/pipeline/v1beta1/taskrun_types.go b/pkg/apis/pipeline/v1beta1/taskrun_types.go index abccc5df7a8..73f6af91a3f 100644 --- a/pkg/apis/pipeline/v1beta1/taskrun_types.go +++ b/pkg/apis/pipeline/v1beta1/taskrun_types.go @@ -241,7 +241,7 @@ type TaskRunResult struct { Name string `json:"name"` // Value the given value of the result - Value string `json:"value"` + Value ArrayOrString `json:"value"` } // TaskRunStepOverride is used to override the values of a Step in the corresponding Task. diff --git a/pkg/entrypoint/entrypointer.go b/pkg/entrypoint/entrypointer.go index d4c3877fe65..5cd8a35a19d 100644 --- a/pkg/entrypoint/entrypointer.go +++ b/pkg/entrypoint/entrypointer.go @@ -123,7 +123,7 @@ func (e Entrypointer) Go() error { } output = append(output, v1beta1.PipelineResourceResult{ Key: "StartedAt", - Value: time.Now().Format(timeFormat), + Value: *v1beta1.NewArrayOrString(time.Now().Format(timeFormat)), ResultType: v1beta1.InternalTektonResultType, }) return err @@ -132,7 +132,7 @@ func (e Entrypointer) Go() error { output = append(output, v1beta1.PipelineResourceResult{ Key: "StartedAt", - Value: time.Now().Format(timeFormat), + Value: *v1beta1.NewArrayOrString(time.Now().Format(timeFormat)), ResultType: v1beta1.InternalTektonResultType, }) @@ -152,7 +152,7 @@ func (e Entrypointer) Go() error { if err == context.DeadlineExceeded { output = append(output, v1beta1.PipelineResourceResult{ Key: "Reason", - Value: "TimeoutExceeded", + Value: *v1beta1.NewArrayOrString("TimeoutExceeded"), ResultType: v1beta1.InternalTektonResultType, }) } @@ -167,7 +167,7 @@ func (e Entrypointer) Go() error { exitCode := strconv.Itoa(ee.ExitCode()) output = append(output, v1beta1.PipelineResourceResult{ Key: "ExitCode", - Value: exitCode, + Value: *v1beta1.NewArrayOrString("exitCode"), ResultType: v1beta1.InternalTektonResultType, }) e.WritePostFile(e.PostFile, nil) @@ -204,10 +204,17 @@ func (e Entrypointer) readResultsFromDisk() error { } else if err != nil { return err } + + // unmarshal the filesContents to string or array + aos := v1beta1.ArrayOrString{} + if err := aos.UnmarshalJSON(fileContents);err!=nil{ + return err + } + // if the file doesn't exist, ignore it output = append(output, v1beta1.PipelineResourceResult{ - Key: resultFile, - Value: string(fileContents), + Key: resultFile, + Value: aos, ResultType: v1beta1.TaskRunResultType, }) } diff --git a/pkg/pod/status.go b/pkg/pod/status.go index 082201797a0..123bc01ae25 100644 --- a/pkg/pod/status.go +++ b/pkg/pod/status.go @@ -147,7 +147,6 @@ func setTaskRunStatusBasedOnStepStatus(logger *zap.SugaredLogger, stepStatuses [ for _, s := range stepStatuses { if s.State.Terminated != nil && len(s.State.Terminated.Message) != 0 { msg := s.State.Terminated.Message - results, err := termination.ParseMessage(logger, msg) if err != nil { logger.Errorf("termination message could not be parsed as JSON: %v", err) @@ -228,6 +227,7 @@ func filterResultsAndResources(results []v1beta1.PipelineResourceResult) ([]v1be Name: r.Key, Value: r.Value, } + //TODO(yongxuanzhang): validate r.value against results.type schema taskResults = append(taskResults, taskRunResult) filteredResults = append(filteredResults, r) case v1beta1.InternalTektonResultType: @@ -266,7 +266,7 @@ func removeDuplicateResults(taskRunResult []v1beta1.TaskRunResult) []v1beta1.Tas func extractStartedAtTimeFromResults(results []v1beta1.PipelineResourceResult) (*metav1.Time, error) { for _, result := range results { if result.Key == "StartedAt" { - t, err := time.Parse(timeFormat, result.Value) + t, err := time.Parse(timeFormat, result.Value.StringVal) if err != nil { return nil, fmt.Errorf("could not parse time value %q in StartedAt field: %w", result.Value, err) } @@ -281,7 +281,7 @@ func extractExitCodeFromResults(results []v1beta1.PipelineResourceResult) (*int3 for _, result := range results { if result.Key == "ExitCode" { // We could just pass the string through but this provides extra validation - i, err := strconv.ParseUint(result.Value, 10, 32) + i, err := strconv.ParseUint(result.Value.StringVal, 10, 32) if err != nil { return nil, fmt.Errorf("could not parse int value %q in ExitCode field: %w", result.Value, err) } @@ -353,7 +353,7 @@ func getFailureMessage(logger *zap.SugaredLogger, pod *corev1.Pod) string { msg := status.State.Terminated.Message r, _ := termination.ParseMessage(logger, msg) for _, result := range r { - if result.ResultType == v1beta1.InternalTektonResultType && result.Key == "Reason" && result.Value == "TimeoutExceeded" { + if result.ResultType == v1beta1.InternalTektonResultType && result.Key == "Reason" && result.Value.StringVal == "TimeoutExceeded" { // Newline required at end to prevent yaml parser from breaking the log help text at 80 chars return fmt.Sprintf("%q exited because the step exceeded the specified timeout limit; for logs run: kubectl -n %s logs %s -c %s\n", status.Name, diff --git a/pkg/pod/status_test.go b/pkg/pod/status_test.go index 4de7deaae72..7492860cfae 100644 --- a/pkg/pod/status_test.go +++ b/pkg/pod/status_test.go @@ -17,6 +17,7 @@ limitations under the License. package pod import ( + "strings" "testing" "time" @@ -33,6 +34,61 @@ import ( var ignoreVolatileTime = cmp.Comparer(func(_, _ apis.VolatileTime) bool { return true }) +func TestSetTaskRunStatusBasedOnStepStatus(t *testing.T) { + for _, c := range []struct { + desc string + ContainerStatuses []corev1.ContainerStatus + }{{ + desc: "test result with large pipeline result", + ContainerStatuses: []corev1.ContainerStatus{{ + Name: "step-bar-0", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234","resourceRef":{"name":"source-image"}}]`, + }, + }, + }, + { + Name: "step-bar1", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234` + strings.Repeat("a", 3072) + `","resourceRef":{"name":"source-image"}}]`, + }, + }, + }, + { + Name: "step-bar2", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + Message: `[{"key":"resultName","value":"resultValue", "type":1}, {"key":"digest","value":"sha256:1234` + strings.Repeat("a", 3072) + `","resourceRef":{"name":"source-image"}}]`, + }, + }, + }}, + }} { + t.Run(c.desc, func(t *testing.T) { + startTime := time.Date(2010, 1, 1, 1, 1, 1, 1, time.UTC) + tr := v1beta1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-run", + Namespace: "foo", + }, + Status: v1beta1.TaskRunStatus{ + TaskRunStatusFields: v1beta1.TaskRunStatusFields{ + StartTime: &metav1.Time{Time: startTime}, + }, + }, + } + + logger, _ := logging.NewLogger("", "status") + merr := setTaskRunStatusBasedOnStepStatus(logger, c.ContainerStatuses, &tr) + if merr != nil { + t.Errorf("setTaskRunStatusBasedOnStepStatus: %s", merr) + } + + }) + } +} + func TestMakeTaskRunStatus(t *testing.T) { for _, c := range []struct { desc string @@ -534,7 +590,7 @@ func TestMakeTaskRunStatus(t *testing.T) { Sidecars: []v1beta1.SidecarState{}, ResourcesResult: []v1beta1.PipelineResourceResult{{ Key: "digest", - Value: "sha256:12345", + Value: *v1beta1.NewArrayOrString("sha256:12345"), ResourceRef: &v1beta1.PipelineResourceRef{Name: "source-image"}, }}, // We don't actually care about the time, just that it's not nil @@ -568,12 +624,12 @@ func TestMakeTaskRunStatus(t *testing.T) { Sidecars: []v1beta1.SidecarState{}, ResourcesResult: []v1beta1.PipelineResourceResult{{ Key: "digest", - Value: "sha256:1234", + Value: *v1beta1.NewArrayOrString("sha256:1234"), ResourceRef: &v1beta1.PipelineResourceRef{Name: "source-image"}, }}, TaskRunResults: []v1beta1.TaskRunResult{{ Name: "resultName", - Value: "resultValue", + Value: *v1beta1.NewArrayOrString("resultValue"), }}, // We don't actually care about the time, just that it's not nil CompletionTime: &metav1.Time{Time: time.Now()}, @@ -606,12 +662,12 @@ func TestMakeTaskRunStatus(t *testing.T) { Sidecars: []v1beta1.SidecarState{}, ResourcesResult: []v1beta1.PipelineResourceResult{{ Key: "digest", - Value: "sha256:1234", + Value: *v1beta1.NewArrayOrString("sha256:1234"), ResourceRef: &v1beta1.PipelineResourceRef{Name: "source-image"}, }}, TaskRunResults: []v1beta1.TaskRunResult{{ Name: "resultName", - Value: "resultValue", + Value: *v1beta1.NewArrayOrString("resultValue"), }}, // We don't actually care about the time, just that it's not nil CompletionTime: &metav1.Time{Time: time.Now()}, @@ -658,10 +714,10 @@ func TestMakeTaskRunStatus(t *testing.T) { Sidecars: []v1beta1.SidecarState{}, TaskRunResults: []v1beta1.TaskRunResult{{ Name: "resultNameOne", - Value: "resultValueThree", + Value: *v1beta1.NewArrayOrString("resultValueThree"), }, { Name: "resultNameTwo", - Value: "resultValueTwo", + Value: *v1beta1.NewArrayOrString("resultValueTwo"), }}, // We don't actually care about the time, just that it's not nil CompletionTime: &metav1.Time{Time: time.Now()}, @@ -745,12 +801,12 @@ func TestMakeTaskRunStatus(t *testing.T) { Sidecars: []v1beta1.SidecarState{}, ResourcesResult: []v1beta1.PipelineResourceResult{{ Key: "resultNameOne", - Value: "", + Value: *v1beta1.NewArrayOrString(""), ResultType: v1beta1.PipelineResourceResultType, }}, TaskRunResults: []v1beta1.TaskRunResult{{ Name: "resultNameThree", - Value: "", + Value: *v1beta1.NewArrayOrString(""), }}, // We don't actually care about the time, just that it's not nil CompletionTime: &metav1.Time{Time: time.Now()}, @@ -783,12 +839,12 @@ func TestMakeTaskRunStatus(t *testing.T) { Sidecars: []v1beta1.SidecarState{}, ResourcesResult: []v1beta1.PipelineResourceResult{{ Key: "resultNameOne", - Value: "", + Value: *v1beta1.NewArrayOrString(""), ResultType: v1beta1.PipelineResourceResultType, }}, TaskRunResults: []v1beta1.TaskRunResult{{ Name: "resultNameThree", - Value: "", + Value: *v1beta1.NewArrayOrString(""), }}, // We don't actually care about the time, just that it's not nil CompletionTime: &metav1.Time{Time: time.Now()}, diff --git a/pkg/reconciler/pipelinerun/pipelinerun_test.go b/pkg/reconciler/pipelinerun/pipelinerun_test.go index 646f92ccd4c..293036aa08f 100644 --- a/pkg/reconciler/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/pipelinerun/pipelinerun_test.go @@ -182,7 +182,9 @@ func initializePipelineRunControllerAssets(t *testing.T, d test.Data, opts pipel configMapWatcher := cminformer.NewInformedWatcher(c.Kube, system.Namespace()) ctl := NewController(&opts, testClock)(ctx, configMapWatcher) if la, ok := ctl.Reconciler.(reconciler.LeaderAware); ok { - la.Promote(reconciler.UniversalBucket(), func(reconciler.Bucket, types.NamespacedName) {}) + if err := la.Promote(reconciler.UniversalBucket(), func(reconciler.Bucket, types.NamespacedName) {}); err != nil { + t.Fatalf("error promoting reconciler leader: %v", err) + } } if err := configMapWatcher.Start(ctx.Done()); err != nil { t.Fatalf("error starting configmap watcher: %v", err) @@ -205,7 +207,7 @@ func getTaskRunCreations(t *testing.T, actions []ktesting.Action, minActionCount t.Fatalf("Expected client to have at least %d action implementation but it has %d", minActionCount, len(actions)) } - outputs := []*v1beta1.TaskRun{} + var outputs []*v1beta1.TaskRun for _, a := range actions { if action, ok := a.(ktesting.CreateAction); ok { if output, ok := action.GetObject().(*v1beta1.TaskRun); ok { @@ -226,7 +228,7 @@ func getTaskRunCreations(t *testing.T, actions []ktesting.Action, minActionCount // set of them. It will fatal the test if none are found. func getPipelineRunUpdates(t *testing.T, actions []ktesting.Action) []*v1beta1.PipelineRun { t.Helper() - outputs := []*v1beta1.PipelineRun{} + var outputs []*v1beta1.PipelineRun for _, a := range actions { if action, ok := a.(ktesting.UpdateAction); ok { if output, ok := action.GetObject().(*v1beta1.PipelineRun); ok { @@ -276,7 +278,6 @@ func runTestReconcileWithEmbeddedStatus(t *testing.T, embeddedStatus string) { // TestReconcile runs "Reconcile" on a PipelineRun with one Task that has not been started yet. // It verifies that the TaskRun is created, it checks the resulting API actions, status and events. names.TestingSeed() - const pipelineRunName = "test-pipeline-run-success" prs := []*v1beta1.PipelineRun{parse.MustParsePipelineRun(t, ` metadata: name: test-pipeline-run-success @@ -299,7 +300,6 @@ spec: type: image serviceAccountName: test-sa `)} - const pipelineName = "test-pipeline" ps := []*v1beta1.Pipeline{parse.MustParsePipeline(t, ` metadata: name: test-pipeline @@ -591,7 +591,6 @@ spec: func TestReconcile_CustomTask(t *testing.T) { names.TestingSeed() const pipelineRunName = "test-pipelinerun" - const pipelineTaskName = "custom-task" const namespace = "namespace" simpleCustomTaskPRYAML := `metadata: @@ -1458,21 +1457,21 @@ func newFeatureFlagsConfigMap() *corev1.ConfigMap { } func withEnabledAlphaAPIFields(cm *corev1.ConfigMap) *corev1.ConfigMap { - new := cm.DeepCopy() - new.Data[apiFieldsFeatureFlag] = config.AlphaAPIFields - return new + newCM := cm.DeepCopy() + newCM.Data[apiFieldsFeatureFlag] = config.AlphaAPIFields + return newCM } func withCustomTasks(cm *corev1.ConfigMap) *corev1.ConfigMap { - new := cm.DeepCopy() - new.Data[customTasksFeatureFlag] = "true" - return new + newCM := cm.DeepCopy() + newCM.Data[customTasksFeatureFlag] = "true" + return newCM } func withOCIBundles(cm *corev1.ConfigMap) *corev1.ConfigMap { - new := cm.DeepCopy() - new.Data[ociBundlesFeatureFlag] = "true" - return new + newCM := cm.DeepCopy() + newCM.Data[ociBundlesFeatureFlag] = "true" + return newCM } func withEmbeddedStatus(cm *corev1.ConfigMap, flagVal string) *corev1.ConfigMap { @@ -1638,7 +1637,7 @@ status: } // The patch operation to cancel the run must be executed. - got := []jsonpatch.Operation{} + var got []jsonpatch.Operation for _, a := range actions { if action, ok := a.(ktesting.PatchAction); ok { if a.(ktesting.PatchAction).Matches("patch", "runs") { @@ -1750,7 +1749,7 @@ status: } // The patch operation to cancel the run must be executed. - got := []jsonpatch.Operation{} + var got []jsonpatch.Operation for _, a := range actions { if action, ok := a.(ktesting.PatchAction); ok { if a.(ktesting.PatchAction).Matches("patch", "runs") { @@ -2414,8 +2413,8 @@ spec: status: PipelineRunPending `)} ps := []*v1beta1.Pipeline{simpleHelloWorldPipeline} - ts := []*v1beta1.Task{} - trs := []*v1beta1.TaskRun{} + var ts []*v1beta1.Task + var trs []*v1beta1.TaskRun d := test.Data{ PipelineRuns: prs, @@ -2426,7 +2425,7 @@ spec: prt := newPipelineRunTest(d, t) defer prt.Cancel() - wantEvents := []string{} + var wantEvents []string reconciledRun, _ := prt.reconcileRun("foo", "test-pipeline-run-pending", wantEvents, false) checkPipelineRunConditionStatusAndReason(t, reconciledRun, corev1.ConditionUnknown, v1beta1.PipelineRunReasonPending.String()) @@ -2654,7 +2653,7 @@ spec: "Warning InternalError 1 error occurred", } err = eventstest.CheckEventsOrdered(t, testAssets.Recorder.Events, prName, wantEvents) - if !(err == nil) { + if err != nil { t.Errorf(err.Error()) } @@ -6218,8 +6217,7 @@ spec: `(?s)dev.tekton.event.pipelinerun.running.v1.*test-pipelinerun`, } ceClient := clients.CloudEvents.(cloudevent.FakeClient) - err := eventstest.CheckEventsUnordered(t, ceClient.Events, "reconcile-cloud-events", wantCloudEvents) - if !(err == nil) { + if err := eventstest.CheckEventsUnordered(t, ceClient.Events, "reconcile-cloud-events", wantCloudEvents); err != nil { t.Errorf(err.Error()) } } @@ -7076,8 +7074,7 @@ spec: defer prt.Cancel() wantEvents := []string(nil) - permanentError := false - pipelinerun, _ := prt.reconcileRun(pr.Namespace, pr.Name, wantEvents, permanentError) + pipelinerun, _ := prt.reconcileRun(pr.Namespace, pr.Name, wantEvents, false) checkPipelineRunConditionStatusAndReason(t, pipelinerun, corev1.ConditionUnknown, ReasonResolvingPipelineRef) client := prt.TestAssets.Clients.ResolutionRequests.ResolutionV1alpha1().ResolutionRequests("default") diff --git a/pkg/reconciler/pipelinerun/resources/apply.go b/pkg/reconciler/pipelinerun/resources/apply.go index 4187b915a84..d5c95a5f5f4 100644 --- a/pkg/reconciler/pipelinerun/resources/apply.go +++ b/pkg/reconciler/pipelinerun/resources/apply.go @@ -221,7 +221,6 @@ func ApplyTaskResultsToPipelineResults( }) } } - return runResults } @@ -231,7 +230,7 @@ func ApplyTaskResultsToPipelineResults( func taskResultValue(taskName string, resultName string, taskResults map[string][]v1beta1.TaskRunResult) *string { for _, trResult := range taskResults[taskName] { if trResult.Name == resultName { - return &trResult.Value + return &trResult.Value.StringVal } } return nil diff --git a/pkg/reconciler/pipelinerun/resources/apply_test.go b/pkg/reconciler/pipelinerun/resources/apply_test.go index 8dd15896080..495f39b9483 100644 --- a/pkg/reconciler/pipelinerun/resources/apply_test.go +++ b/pkg/reconciler/pipelinerun/resources/apply_test.go @@ -791,7 +791,7 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { taskResults: map[string][]v1beta1.TaskRunResult{ "pt1": {{ Name: "foo", - Value: "bar", + Value: *v1beta1.NewArrayOrString("bar"), }}, }, expected: nil, @@ -804,7 +804,7 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { taskResults: map[string][]v1beta1.TaskRunResult{ "pt1": {{ Name: "foo", - Value: "bar", + Value: *v1beta1.NewArrayOrString("bar"), }}, }, expected: nil, @@ -827,7 +827,7 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { taskResults: map[string][]v1beta1.TaskRunResult{ "definitely-not-pt1": {{ Name: "foo", - Value: "bar", + Value: *v1beta1.NewArrayOrString("bar"), }}, }, expected: nil, @@ -840,7 +840,7 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { taskResults: map[string][]v1beta1.TaskRunResult{ "pt1": {{ Name: "definitely-not-foo", - Value: "bar", + Value: *v1beta1.NewArrayOrString("bar"), }}, }, expected: nil, @@ -864,7 +864,7 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { taskResults: map[string][]v1beta1.TaskRunResult{ "pt2": {{ Name: "bar", - Value: "rae", + Value: *v1beta1.NewArrayOrString("rae"), }}, }, expected: []v1beta1.PipelineRunResult{{ @@ -884,15 +884,15 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { "pt1": { { Name: "foo", - Value: "do", + Value: *v1beta1.NewArrayOrString("do"), }, { Name: "bar", - Value: "mi", + Value: *v1beta1.NewArrayOrString("mi"), }, }, "pt2": {{ Name: "baz", - Value: "rae", + Value: *v1beta1.NewArrayOrString("rae"), }}, }, expected: []v1beta1.PipelineRunResult{{ @@ -969,7 +969,7 @@ func TestApplyTaskResultsToPipelineResults(t *testing.T) { taskResults: map[string][]v1beta1.TaskRunResult{ "normaltask": {{ Name: "baz", - Value: "rae", + Value: *v1beta1.NewArrayOrString("rae"), }}, }, expected: []v1beta1.PipelineRunResult{{ diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go index ad4da12b550..d5a8d8a4f75 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunresolution_test.go @@ -3252,7 +3252,7 @@ func TestResolvedPipelineRunTask_IsFinallySkipped(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "commit", - Value: "SHA2", + Value: *v1beta1.NewArrayOrString("SHA2"), }}, }, }, @@ -3385,7 +3385,7 @@ func TestResolvedPipelineRunTask_IsFinalTask(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "commit", - Value: "SHA2", + Value: *v1beta1.NewArrayOrString("SHA2"), }}, }, }, diff --git a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go index ffc8ddff6c7..78c44e68685 100644 --- a/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go +++ b/pkg/reconciler/pipelinerun/resources/pipelinerunstate_test.go @@ -2242,10 +2242,10 @@ func TestPipelineRunState_GetResultsFuncs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "foo", - Value: "oof", + Value: *v1beta1.NewArrayOrString("oof"), }, { Name: "bar", - Value: "rab", + Value: *v1beta1.NewArrayOrString("rab"), }}, }, }, @@ -2278,7 +2278,7 @@ func TestPipelineRunState_GetResultsFuncs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "fail-foo", - Value: "fail-oof", + Value: *v1beta1.NewArrayOrString("fail-oof"), }}, }, }, @@ -2297,7 +2297,7 @@ func TestPipelineRunState_GetResultsFuncs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "unknown-foo", - Value: "unknown-oof", + Value: *v1beta1.NewArrayOrString("unknown-oof"), }}, }, }, @@ -2394,10 +2394,10 @@ func TestPipelineRunState_GetResultsFuncs(t *testing.T) { expectedTaskResults := map[string][]v1beta1.TaskRunResult{ "successful-task-with-results-1": {{ Name: "foo", - Value: "oof", + Value: *v1beta1.NewArrayOrString("oof"), }, { Name: "bar", - Value: "rab", + Value: *v1beta1.NewArrayOrString("rab"), }}, "successful-task-without-results-1": nil, } diff --git a/pkg/reconciler/pipelinerun/resources/resultrefresolution.go b/pkg/reconciler/pipelinerun/resources/resultrefresolution.go index 641355415b6..78fff372d26 100644 --- a/pkg/reconciler/pipelinerun/resources/resultrefresolution.go +++ b/pkg/reconciler/pipelinerun/resources/resultrefresolution.go @@ -176,7 +176,7 @@ func findTaskResultForParam(taskRun *v1beta1.TaskRun, reference *v1beta1.ResultR results := taskRun.Status.TaskRunStatusFields.TaskRunResults for _, result := range results { if result.Name == reference.Result { - return result.Value, nil + return result.Value.StringVal, nil } } return "", fmt.Errorf("Could not find result with name %s for task %s", reference.Result, reference.PipelineTask) diff --git a/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go b/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go index 810ab509cde..327585cfd7f 100644 --- a/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go +++ b/pkg/reconciler/pipelinerun/resources/resultrefresolution_test.go @@ -43,7 +43,7 @@ var pipelineRunState = PipelineRunState{{ TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "aResult", - Value: "aResultValue", + Value: *v1beta1.NewArrayOrString("aResultValue"), }}, }, }, @@ -159,7 +159,7 @@ func TestTaskParamResolver_ResolveResultRefs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "aResult", - Value: "aResultValue", + Value: *v1beta1.NewArrayOrString("aResultValue"), }}, }, }, @@ -195,7 +195,7 @@ func TestTaskParamResolver_ResolveResultRefs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "aResult", - Value: "aResultValue", + Value: *v1beta1.NewArrayOrString("aResultValue"), }}, }, }, @@ -215,7 +215,7 @@ func TestTaskParamResolver_ResolveResultRefs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "bResult", - Value: "bResultValue", + Value: *v1beta1.NewArrayOrString("bResultValue"), }}, }, }, @@ -258,7 +258,7 @@ func TestTaskParamResolver_ResolveResultRefs(t *testing.T) { TaskRunStatusFields: v1beta1.TaskRunStatusFields{ TaskRunResults: []v1beta1.TaskRunResult{{ Name: "aResult", - Value: "aResultValue", + Value: *v1beta1.NewArrayOrString("aResultValue"), }}, }, }, diff --git a/pkg/termination/parse.go b/pkg/termination/parse.go index 53a9eba9389..8792fff7ca8 100644 --- a/pkg/termination/parse.go +++ b/pkg/termination/parse.go @@ -21,6 +21,7 @@ import ( "fmt" "sort" + "github.com/google/go-cmp/cmp" v1beta1 "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1beta1" "go.uber.org/zap" ) @@ -38,9 +39,8 @@ func ParseMessage(logger *zap.SugaredLogger, msg string) ([]v1beta1.PipelineReso if err := json.Unmarshal([]byte(msg), &r); err != nil { return nil, fmt.Errorf("parsing message json: %v", err) } - for i, rr := range r { - if rr == (v1beta1.PipelineResourceResult{}) { + if cmp.Equal(rr, v1beta1.PipelineResourceResult{}) { // Erase incorrect result r[i] = r[len(r)-1] r = r[:len(r)-1] diff --git a/pkg/termination/parse_test.go b/pkg/termination/parse_test.go index dae81ac510c..33af9359579 100644 --- a/pkg/termination/parse_test.go +++ b/pkg/termination/parse_test.go @@ -34,10 +34,10 @@ func TestParseMessage(t *testing.T) { msg: `[{"key": "digest","value":"hereisthedigest"},{"key":"foo","value":"bar"}]`, want: []v1beta1.PipelineResourceResult{{ Key: "digest", - Value: "hereisthedigest", + Value: *v1beta1.NewArrayOrString("hereisthedigest"), }, { Key: "foo", - Value: "bar", + Value: *v1beta1.NewArrayOrString("bar"), }}, }, { desc: "empty message", @@ -51,7 +51,7 @@ func TestParseMessage(t *testing.T) { {"key":"foo","value":"last"}]`, want: []v1beta1.PipelineResourceResult{{ Key: "foo", - Value: "last", + Value: *v1beta1.NewArrayOrString("last"), }}, }, { desc: "sorted by key", @@ -61,13 +61,13 @@ func TestParseMessage(t *testing.T) { {"key":"aaa","value":"first"}]`, want: []v1beta1.PipelineResourceResult{{ Key: "aaa", - Value: "first", + Value: *v1beta1.NewArrayOrString("first"), }, { Key: "ddd", - Value: "middle", + Value: *v1beta1.NewArrayOrString("middle"), }, { Key: "zzz", - Value: "last", + Value: *v1beta1.NewArrayOrString("last"), }}, }} { t.Run(c.desc, func(t *testing.T) { diff --git a/pkg/termination/write_test.go b/pkg/termination/write_test.go index 3ba07468cce..87918353547 100644 --- a/pkg/termination/write_test.go +++ b/pkg/termination/write_test.go @@ -43,7 +43,7 @@ func TestExistingFile(t *testing.T) { }() output := []v1beta1.PipelineResourceResult{{ Key: "key1", - Value: "hello", + Value: *v1beta1.NewArrayOrString("hello"), }} if err := WriteMessage(tmpFile.Name(), output); err != nil { @@ -52,7 +52,7 @@ func TestExistingFile(t *testing.T) { output = []v1beta1.PipelineResourceResult{{ Key: "key2", - Value: "world", + Value: *v1beta1.NewArrayOrString("world"), }} if err := WriteMessage(tmpFile.Name(), output); err != nil { @@ -80,7 +80,7 @@ func TestMaxSizeFile(t *testing.T) { output := []v1beta1.PipelineResourceResult{{ Key: "key1", - Value: value, + Value: *v1beta1.NewArrayOrString(value), }} if err := WriteMessage(tmpFile.Name(), output); !errors.Is(err, aboveMax) { diff --git a/test/pipelinerun_test.go b/test/pipelinerun_test.go index 50d2248ada2..9fd0d6b6166 100644 --- a/test/pipelinerun_test.go +++ b/test/pipelinerun_test.go @@ -41,47 +41,50 @@ import ( "k8s.io/client-go/kubernetes" "knative.dev/pkg/apis" knativetest "knative.dev/pkg/test" + "knative.dev/pkg/test/helpers" ) var ( - pipelineName = "pipeline" - pipelineRunName = "pipelinerun" - secretName = "secret" - saName = "service-account" - taskName = "task" - task1Name = "task1" - cond1Name = "cond-1" + secretName = "secret" + saName = "service-account" + task1Name = "task1" + cond1Name = "cond-1" ) func TestPipelineRun(t *testing.T) { t.Parallel() type tests struct { name string - testSetup func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) + testSetup func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1beta1.Pipeline) expectedTaskRuns []string expectedNumberOfEvents int - pipelineRunFunc func(*testing.T, int, string) *v1beta1.PipelineRun + pipelineRunFunc func(*testing.T, int, string, string, map[string]*v1alpha1.PipelineResource) *v1beta1.PipelineRun } tds := []tests{{ name: "fan-in and fan-out", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, _ int) (map[string]*v1alpha1.PipelineResource, *v1beta1.Pipeline) { t.Helper() - for _, task := range getFanInFanOutTasks(t, namespace) { + tasks := getFanInFanOutTasks(t, namespace) + for _, task := range tasks { if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } } - for _, res := range getFanInFanOutGitResources(t) { + resources := getFanInFanOutGitResources(t) + for _, res := range resources { if _, err := c.PipelineResourceClient.Create(ctx, res, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", res.Name, err) } } - if _, err := c.PipelineClient.Create(ctx, getFanInFanOutPipeline(t, index, namespace), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + p := getFanInFanOutPipeline(t, namespace, tasks) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return resources, p }, pipelineRunFunc: getFanInFanOutPipelineRun, expectedTaskRuns: []string{"create-file-kritis", "create-fan-out-1", "create-fan-out-2", "check-fan-in"}, @@ -89,7 +92,7 @@ func TestPipelineRun(t *testing.T) { expectedNumberOfEvents: 5, }, { name: "service account propagation and pipeline param", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1beta1.Pipeline) { t.Helper() t.Skip("build-crd-testing project got removed, the secret-sauce doesn't exist anymore, skipping") if _, err := c.KubeClient.CoreV1().Secrets(namespace).Create(ctx, getPipelineRunSecret(index, namespace), metav1.CreateOptions{}); err != nil { @@ -115,14 +118,17 @@ spec: image: gcr.io/tekton-releases/dogfooding/skopeo:latest command: ['skopeo'] args: ['copy', '$(params["the.path"])', '$(params["the.dest"])'] -`, getName(taskName, index), namespace)) +`, helpers.ObjectNameForTest(t), namespace)) if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } - if _, err := c.PipelineClient.Create(ctx, getHelloWorldPipelineWithSingularTask(t, index, namespace), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + p := getHelloWorldPipelineWithSingularTask(t, namespace, task.Name) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return nil, p }, expectedTaskRuns: []string{task1Name}, // 1 from PipelineRun and 1 from Tasks defined in pipelinerun @@ -130,11 +136,11 @@ spec: pipelineRunFunc: getHelloWorldPipelineRun, }, { name: "pipeline succeeds when task skipped due to failed condition", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, _ int) (map[string]*v1alpha1.PipelineResource, *v1beta1.Pipeline) { t.Helper() - cond := getFailingCondition() + cond := getFailingCondition(t) if _, err := c.ConditionClient.Create(ctx, cond, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) + t.Fatalf("Failed to create Condition `%s`: %s", cond.Name, err) } task := parse.MustParseTask(t, fmt.Sprintf(` @@ -146,13 +152,16 @@ spec: - image: ubuntu command: ['/bin/bash'] args: ['-c', 'echo hello, world'] -`, getName(taskName, index), namespace)) +`, helpers.ObjectNameForTest(t), namespace)) if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } - if _, err := c.PipelineClient.Create(ctx, getPipelineWithFailingCondition(t, index, namespace), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + p := getPipelineWithFailingCondition(t, namespace, task.Name, cond.Name) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return nil, p }, expectedTaskRuns: []string{}, // 1 from PipelineRun; 0 from taskrun since it should not be executed due to condition failing @@ -160,7 +169,7 @@ spec: pipelineRunFunc: getConditionalPipelineRun, }, { name: "pipelinerun succeeds with LimitRange minimum in namespace", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1beta1.Pipeline) { t.Helper() t.Skip("build-crd-testing project got removed, the secret-sauce doesn't exist anymore, skipping") if _, err := c.KubeClient.CoreV1().LimitRanges(namespace).Create(ctx, getLimitRange("prlimitrange", namespace, "100m", "99Mi", "100m"), metav1.CreateOptions{}); err != nil { @@ -190,13 +199,17 @@ spec: image: gcr.io/tekton-releases/dogfooding/skopeo:latest command: ['skopeo'] args: ['copy', '$(params["the.path"])', '$(params["the.dest"])'] -`, getName(taskName, index), namespace)) +`, helpers.ObjectNameForTest(t), namespace)) if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", fmt.Sprint("task", index), err) } - if _, err := c.PipelineClient.Create(ctx, getHelloWorldPipelineWithSingularTask(t, index, namespace), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + + p := getHelloWorldPipelineWithSingularTask(t, namespace, task.Name) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return nil, p }, expectedTaskRuns: []string{task1Name}, // 1 from PipelineRun and 1 from Tasks defined in pipelinerun @@ -218,10 +231,11 @@ spec: defer tearDown(ctx, t, c, namespace) t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) - td.testSetup(ctx, t, c, namespace, i) + resources, p := td.testSetup(ctx, t, c, namespace, i) - prName := fmt.Sprintf("%s%d", pipelineRunName, i) - pipelineRun, err := c.PipelineRunClient.Create(ctx, td.pipelineRunFunc(t, i, namespace), metav1.CreateOptions{}) + pipelineRun := td.pipelineRunFunc(t, i, namespace, p.Name, resources) + prName := pipelineRun.Name + _, err := c.PipelineRunClient.Create(ctx, pipelineRun, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) } @@ -300,7 +314,7 @@ spec: } } -func getHelloWorldPipelineWithSingularTask(t *testing.T, suffix int, namespace string) *v1beta1.Pipeline { +func getHelloWorldPipelineWithSingularTask(t *testing.T, namespace string, taskName string) *v1beta1.Pipeline { return parse.MustParsePipeline(t, fmt.Sprintf(` metadata: name: %s @@ -320,7 +334,7 @@ spec: value: $(params.dest) taskRef: name: %s -`, getName(pipelineName, suffix), namespace, task1Name, getName(taskName, suffix))) +`, helpers.ObjectNameForTest(t), namespace, task1Name, taskName)) } // TestPipelineRunRefDeleted tests that a running PipelineRun doesn't fail when the Pipeline @@ -334,12 +348,13 @@ func TestPipelineRunRefDeleted(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - prName := "pipelinerun-referencing-deleted" + pipelineName := helpers.ObjectNameForTest(t) + prName := helpers.ObjectNameForTest(t) t.Logf("Creating Pipeline, and PipelineRun %s in namespace %s", prName, namespace) - pipeline := parse.MustParsePipeline(t, ` + pipeline := parse.MustParsePipeline(t, fmt.Sprintf(` metadata: - name: pipeline-to-be-deleted + name: %s spec: tasks: - name: step1 @@ -361,18 +376,18 @@ spec: #!/usr/bin/env bash # Sleep for another 10s sleep 10 -`) +`, pipelineName)) if _, err := c.PipelineClient.Create(ctx, pipeline, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", prName, err) + t.Fatalf("Failed to create Pipeline `%s`: %s", pipelineName, err) } - pipelinerun := parse.MustParsePipelineRun(t, ` + pipelinerun := parse.MustParsePipelineRun(t, fmt.Sprintf(` metadata: - name: pipelinerun-referencing-deleted + name: %s spec: pipelineRef: - name: pipeline-to-be-deleted -`) + name: %s +`, prName, pipelineName)) _, err := c.PipelineRunClient.Create(ctx, pipelinerun, metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) @@ -407,7 +422,9 @@ func TestPipelineRunPending(t *testing.T) { knativetest.CleanupOnInterrupt(func() { tearDown(ctx, t, c, namespace) }, t.Logf) defer tearDown(ctx, t, c, namespace) - prName := "pending-pipelinerun-test" + taskName := helpers.ObjectNameForTest(t) + pipelineName := helpers.ObjectNameForTest(t) + prName := helpers.ObjectNameForTest(t) t.Logf("Creating Task, Pipeline, and Pending PipelineRun %s in namespace %s", prName, namespace) @@ -420,8 +437,8 @@ spec: - image: ubuntu command: ['/bin/bash'] args: ['-c', 'echo hello, world'] -`, prName, namespace)), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", prName, err) +`, taskName, namespace)), metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", taskName, err) } if _, err := c.PipelineClient.Create(ctx, parse.MustParsePipeline(t, fmt.Sprintf(` @@ -433,8 +450,8 @@ spec: - name: task taskRef: name: %s -`, prName, namespace, prName)), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", prName, err) +`, pipelineName, namespace, taskName)), metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", pipelineName, err) } pipelineRun, err := c.PipelineRunClient.Create(ctx, parse.MustParsePipelineRun(t, fmt.Sprintf(` @@ -445,7 +462,7 @@ spec: pipelineRef: name: %s status: PipelineRunPending -`, prName, namespace, prName)), metav1.CreateOptions{}) +`, prName, namespace, pipelineName)), metav1.CreateOptions{}) if err != nil { t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) } @@ -478,10 +495,11 @@ spec: } } -func getFanInFanOutTasks(t *testing.T, namespace string) []*v1beta1.Task { - return []*v1beta1.Task{parse.MustParseTask(t, fmt.Sprintf(` +func getFanInFanOutTasks(t *testing.T, namespace string) map[string]*v1beta1.Task { + return map[string]*v1beta1.Task{ + "create-file": parse.MustParseTask(t, fmt.Sprintf(` metadata: - name: create-file + name: %s namespace: %s spec: resources: @@ -501,10 +519,10 @@ spec: command: ['/bin/bash'] image: ubuntu name: write-data-task-0-step-1 -`, namespace)), - parse.MustParseTask(t, fmt.Sprintf(` +`, helpers.ObjectNameForTest(t), namespace)), + "check-create-files-exists": parse.MustParseTask(t, fmt.Sprintf(` metadata: - name: check-create-files-exists + name: %s namespace: %s spec: resources: @@ -523,10 +541,10 @@ spec: command: ['/bin/bash'] image: ubuntu name: write-data-task-1 -`, namespace)), - parse.MustParseTask(t, fmt.Sprintf(` +`, helpers.ObjectNameForTest(t), namespace)), + "check-create-files-exists-2": parse.MustParseTask(t, fmt.Sprintf(` metadata: - name: check-create-files-exists-2 + name: %s namespace: %s spec: resources: @@ -545,10 +563,10 @@ spec: command: ['/bin/bash'] image: ubuntu name: write-data-task-1 -`, namespace)), - parse.MustParseTask(t, fmt.Sprintf(` +`, helpers.ObjectNameForTest(t), namespace)), + "read-files": parse.MustParseTask(t, fmt.Sprintf(` metadata: - name: read-files + name: %s namespace: %s spec: resources: @@ -565,11 +583,11 @@ spec: command: ['/bin/bash'] image: ubuntu name: read-from-task-1 -`, namespace)), +`, helpers.ObjectNameForTest(t), namespace)), } } -func getFanInFanOutPipeline(t *testing.T, suffix int, namespace string) *v1beta1.Pipeline { +func getFanInFanOutPipeline(t *testing.T, namespace string, tasks map[string]*v1beta1.Task) *v1beta1.Pipeline { return parse.MustParsePipeline(t, fmt.Sprintf(` metadata: name: %s @@ -588,7 +606,7 @@ spec: - name: workspace resource: git-repo taskRef: - name: create-file + name: %s - name: create-fan-out-1 resources: inputs: @@ -600,7 +618,7 @@ spec: - name: workspace resource: git-repo taskRef: - name: check-create-files-exists + name: %s - name: create-fan-out-2 resources: inputs: @@ -612,7 +630,7 @@ spec: - name: workspace resource: git-repo taskRef: - name: check-create-files-exists-2 + name: %s - name: check-fan-in resources: inputs: @@ -622,14 +640,16 @@ spec: name: workspace resource: git-repo taskRef: - name: read-files -`, getName(pipelineName, suffix), namespace)) + name: %s +`, helpers.ObjectNameForTest(t), namespace, tasks["create-file"].Name, tasks["check-create-files-exists"].Name, + tasks["check-create-files-exists-2"].Name, tasks["read-files"].Name)) } -func getFanInFanOutGitResources(t *testing.T) []*v1alpha1.PipelineResource { - return []*v1alpha1.PipelineResource{parse.MustParsePipelineResource(t, ` +func getFanInFanOutGitResources(t *testing.T) map[string]*v1alpha1.PipelineResource { + return map[string]*v1alpha1.PipelineResource{ + "kritis-resource-git": parse.MustParsePipelineResource(t, fmt.Sprintf(` metadata: - name: kritis-resource-git + name: %s spec: type: git params: @@ -637,7 +657,7 @@ spec: value: https://github.com/grafeas/kritis - name: Revision value: master -`)} +`, helpers.ObjectNameForTest(t)))} } func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceAccount { @@ -651,7 +671,7 @@ func getPipelineRunServiceAccount(suffix int, namespace string) *corev1.ServiceA }}, } } -func getFanInFanOutPipelineRun(t *testing.T, suffix int, namespace string) *v1beta1.PipelineRun { +func getFanInFanOutPipelineRun(t *testing.T, _ int, namespace string, pipelineName string, resources map[string]*v1alpha1.PipelineResource) *v1beta1.PipelineRun { return parse.MustParsePipelineRun(t, fmt.Sprintf(` metadata: name: %s @@ -662,8 +682,8 @@ spec: resources: - name: git-repo resourceRef: - name: kritis-resource-git -`, getName(pipelineRunName, suffix), namespace, getName(pipelineName, suffix))) + name: %s +`, helpers.ObjectNameForTest(t), namespace, pipelineName, resources["kritis-resource-git"].Name)) } func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { @@ -695,7 +715,7 @@ func getPipelineRunSecret(suffix int, namespace string) *corev1.Secret { } } -func getHelloWorldPipelineRun(t *testing.T, suffix int, namespace string) *v1beta1.PipelineRun { +func getHelloWorldPipelineRun(t *testing.T, suffix int, namespace string, pipelineName string, _ map[string]*v1alpha1.PipelineResource) *v1beta1.PipelineRun { return parse.MustParsePipelineRun(t, fmt.Sprintf(` metadata: labels: @@ -711,7 +731,7 @@ spec: pipelineRef: name: %s serviceAccountName: %s%d -`, getName(pipelineRunName, suffix), namespace, getName(pipelineName, suffix), saName, suffix)) +`, helpers.ObjectNameForTest(t), namespace, pipelineName, saName, suffix)) } func getName(namespace string, suffix int) string { @@ -873,7 +893,7 @@ func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations } } -func getPipelineWithFailingCondition(t *testing.T, suffix int, namespace string) *v1beta1.Pipeline { +func getPipelineWithFailingCondition(t *testing.T, namespace string, taskName, condName string) *v1beta1.Pipeline { return parse.MustParsePipeline(t, fmt.Sprintf(` metadata: name: %s @@ -889,27 +909,22 @@ spec: taskRef: name: %s runAfter: ['%s'] -`, getName(pipelineName, suffix), namespace, task1Name, getName(taskName, suffix), cond1Name, getName(taskName, suffix), task1Name)) +`, helpers.ObjectNameForTest(t), namespace, task1Name, taskName, condName, taskName, task1Name)) } -func getFailingCondition() *v1alpha1.Condition { - return &v1alpha1.Condition{ - ObjectMeta: metav1.ObjectMeta{ - Name: cond1Name, - }, - Spec: v1alpha1.ConditionSpec{ - Check: v1alpha1.Step{ - Container: corev1.Container{ - Image: "ubuntu", - Command: []string{"/bin/bash"}, - Args: []string{"exit 1"}, - }, - }, - }, - } +func getFailingCondition(t *testing.T) *v1alpha1.Condition { + return parse.MustParseCondition(t, fmt.Sprintf(` +metadata: + name: %s +spec: + check: + image: ubuntu + command: ['/bin/bash'] + args: ['exit 1'] +`, helpers.ObjectNameForTest(t))) } -func getConditionalPipelineRun(t *testing.T, suffix int, namespace string) *v1beta1.PipelineRun { +func getConditionalPipelineRun(t *testing.T, _ int, namespace string, pipelineName string, _ map[string]*v1alpha1.PipelineResource) *v1beta1.PipelineRun { return parse.MustParsePipelineRun(t, fmt.Sprintf(` metadata: name: %s @@ -919,7 +934,7 @@ metadata: spec: pipelineRef: name: %s -`, getName(pipelineRunName, suffix), namespace, getName(pipelineName, suffix))) +`, helpers.ObjectNameForTest(t), namespace, pipelineName)) } func getLimitRange(name, namespace, resourceCPU, resourceMemory, resourceEphemeralStorage string) *corev1.LimitRange { diff --git a/test/v1alpha1/pipelinerun_test.go b/test/v1alpha1/pipelinerun_test.go index 08ad27cfe71..dffe3a1964b 100644 --- a/test/v1alpha1/pipelinerun_test.go +++ b/test/v1alpha1/pipelinerun_test.go @@ -39,16 +39,13 @@ import ( "k8s.io/client-go/kubernetes" "knative.dev/pkg/apis" knativetest "knative.dev/pkg/test" + "knative.dev/pkg/test/helpers" ) var ( - pipelineName = "pipeline" - pipelineRunName = "pipelinerun" secretName = "secret" saName = "service-account" - taskName = "task" task1Name = "task1" - cond1Name = "cond-1" pipelineRunTimeout = 10 * time.Minute ) @@ -56,31 +53,35 @@ func TestPipelineRun(t *testing.T) { t.Parallel() type tests struct { name string - testSetup func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) + testSetup func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1alpha1.Pipeline) expectedTaskRuns []string expectedNumberOfEvents int - pipelineRunFunc func(*testing.T, int, string) *v1alpha1.PipelineRun + pipelineRunFunc func(*testing.T, int, string, string, map[string]*v1alpha1.PipelineResource) *v1alpha1.PipelineRun } tds := []tests{{ name: "fan-in and fan-out", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1alpha1.Pipeline) { t.Helper() - for _, task := range getFanInFanOutTasks(t) { + tasks := getFanInFanOutTasks(t) + for _, task := range tasks { if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } } - - for _, res := range getFanInFanOutGitResources(t) { + resources := getFanInFanOutGitResources(t) + for _, res := range resources { if _, err := c.PipelineResourceClient.Create(ctx, res, metav1.CreateOptions{}); err != nil { t.Fatalf("Failed to create Pipeline Resource `%s`: %s", res.Name, err) } } - if _, err := c.PipelineClient.Create(ctx, getFanInFanOutPipeline(t, index), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + p := getFanInFanOutPipeline(t, tasks) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return resources, p }, pipelineRunFunc: getFanInFanOutPipelineRun, expectedTaskRuns: []string{"create-file-kritis", "create-fan-out-1", "create-fan-out-2", "check-fan-in"}, @@ -88,7 +89,7 @@ func TestPipelineRun(t *testing.T) { expectedNumberOfEvents: 5, }, { name: "service account propagation and pipeline param", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1alpha1.Pipeline) { t.Helper() t.Skip("build-crd-testing project got removed, the secret-sauce doesn't exist anymore, skipping") if _, err := c.KubeClient.CoreV1().Secrets(namespace).Create(ctx, getPipelineRunSecret(index), metav1.CreateOptions{}); err != nil { @@ -113,14 +114,17 @@ spec: image: gcr.io/tekton-releases/dogfooding/skopeo:latest command: ['skopeo'] args: ['copy', '$(inputs.params.path)', '$(inputs.params.dest)'] -`, getName(taskName, index))) +`, helpers.ObjectNameForTest(t))) if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } - if _, err := c.PipelineClient.Create(ctx, getHelloWorldPipelineWithSingularTask(t, index), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + p := getHelloWorldPipelineWithSingularTask(t, task.Name) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return nil, p }, expectedTaskRuns: []string{task1Name}, // 1 from PipelineRun and 1 from Tasks defined in pipelinerun @@ -128,11 +132,11 @@ spec: pipelineRunFunc: getHelloWorldPipelineRun, }, { name: "pipeline succeeds when task skipped due to failed condition", - testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) { + testSetup: func(ctx context.Context, t *testing.T, c *clients, namespace string, index int) (map[string]*v1alpha1.PipelineResource, *v1alpha1.Pipeline) { t.Helper() - cond := getFailingCondition(namespace) + cond := getFailingCondition(t, namespace) if _, err := c.ConditionClient.Create(ctx, cond, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Condition `%s`: %s", cond1Name, err) + t.Fatalf("Failed to create Condition `%s`: %s", cond.Name, err) } task := parse.MustParseAlphaTask(t, fmt.Sprintf(` @@ -143,14 +147,17 @@ spec: - image: ubuntu command: ['/bin/bash'] args: ['-c', 'echo hello, world'] -`, getName(taskName, index))) +`, helpers.ObjectNameForTest(t))) if _, err := c.TaskClient.Create(ctx, task, metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Task `%s`: %s", getName(taskName, index), err) + t.Fatalf("Failed to create Task `%s`: %s", task.Name, err) } - if _, err := c.PipelineClient.Create(ctx, getPipelineWithFailingCondition(t, index), metav1.CreateOptions{}); err != nil { - t.Fatalf("Failed to create Pipeline `%s`: %s", getName(pipelineName, index), err) + p := getPipelineWithFailingCondition(t, task.Name, cond.Name) + if _, err := c.PipelineClient.Create(ctx, p, metav1.CreateOptions{}); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", p.Name, err) } + + return nil, p }, expectedTaskRuns: []string{}, // 1 from PipelineRun; 0 from taskrun since it should not be executed due to condition failing @@ -172,13 +179,13 @@ spec: defer tearDown(ctx, t, c, namespace) t.Logf("Setting up test resources for %q test in namespace %s", td.name, namespace) - td.testSetup(ctx, t, c, namespace, i) + resources, p := td.testSetup(ctx, t, c, namespace, i) - prName := fmt.Sprintf("%s%d", pipelineRunName, i) - pipelineRun, err := c.PipelineRunClient.Create(ctx, td.pipelineRunFunc(t, i, namespace), metav1.CreateOptions{}) + pipelineRun, err := c.PipelineRunClient.Create(ctx, td.pipelineRunFunc(t, i, namespace, p.Name, resources), metav1.CreateOptions{}) if err != nil { - t.Fatalf("Failed to create PipelineRun `%s`: %s", prName, err) + t.Fatalf("Failed to create PipelineRun `%s`: %s", pipelineRun.Name, err) } + prName := pipelineRun.Name t.Logf("Waiting for PipelineRun %s in namespace %s to complete", prName, namespace) if err := WaitForPipelineRunState(ctx, c, prName, pipelineRunTimeout, PipelineRunSucceed(prName), "PipelineRunSuccess"); err != nil { @@ -254,7 +261,7 @@ spec: } } -func getHelloWorldPipelineWithSingularTask(t *testing.T, suffix int) *v1alpha1.Pipeline { +func getHelloWorldPipelineWithSingularTask(t *testing.T, taskName string) *v1alpha1.Pipeline { return parse.MustParseAlphaPipeline(t, fmt.Sprintf(` metadata: name: %s @@ -273,22 +280,21 @@ spec: value: $(params.dest) taskRef: name: %s -`, getName(pipelineName, suffix), task1Name, getName(taskName, suffix))) +`, helpers.ObjectNameForTest(t), task1Name, taskName)) } -func getFanInFanOutTasks(t *testing.T) []*v1alpha1.Task { - return []*v1alpha1.Task{ - parse.MustParseAlphaTask(t, ` +func getFanInFanOutTasks(t *testing.T) map[string]*v1alpha1.Task { + return map[string]*v1alpha1.Task{ + "create-file": parse.MustParseAlphaTask(t, fmt.Sprintf(` metadata: - name: create-file + name: %s spec: - inputs: - resources: + resources: + inputs: - name: workspace targetPath: brandnewspace type: git - outputs: - resources: + outputs: - name: workspace type: git steps: @@ -300,17 +306,16 @@ spec: command: ['/bin/bash'] image: ubuntu name: write-data-task-0-step-1 -`), - parse.MustParseAlphaTask(t, ` +`, helpers.ObjectNameForTest(t))), + "check-create-files-exists": parse.MustParseAlphaTask(t, fmt.Sprintf(` metadata: - name: check-create-files-exists + name: %s spec: - inputs: - resources: + resources: + inputs: - name: workspace type: git - outputs: - resources: + outputs: - name: workspace type: git steps: @@ -322,17 +327,16 @@ spec: command: ['/bin/bash'] image: ubuntu name: write-data-task-1 -`), - parse.MustParseAlphaTask(t, ` +`, helpers.ObjectNameForTest(t))), + "check-create-files-exists-2": parse.MustParseAlphaTask(t, fmt.Sprintf(` metadata: - name: check-create-files-exists-2 + name: %s spec: - inputs: - resources: + resources: + inputs: - name: workspace type: git - outputs: - resources: + outputs: - name: workspace type: git steps: @@ -344,13 +348,13 @@ spec: command: ['/bin/bash'] image: ubuntu name: write-data-task-1 -`), - parse.MustParseAlphaTask(t, ` +`, helpers.ObjectNameForTest(t))), + "read-files": parse.MustParseAlphaTask(t, fmt.Sprintf(` metadata: - name: read-files + name: %s spec: - inputs: - resources: + resources: + inputs: - name: workspace type: git targetPath: readingspace @@ -363,11 +367,11 @@ spec: command: ['/bin/bash'] image: ubuntu name: read-from-task-1 -`), +`, helpers.ObjectNameForTest(t))), } } -func getFanInFanOutPipeline(t *testing.T, suffix int) *v1alpha1.Pipeline { +func getFanInFanOutPipeline(t *testing.T, tasks map[string]*v1alpha1.Task) *v1alpha1.Pipeline { return parse.MustParseAlphaPipeline(t, fmt.Sprintf(` metadata: name: %s @@ -385,7 +389,7 @@ spec: - name: workspace resource: git-repo taskRef: - name: create-file + name: %s - name: create-fan-out-1 resources: inputs: @@ -397,7 +401,7 @@ spec: - name: workspace resource: git-repo taskRef: - name: check-create-files-exists + name: %s - name: create-fan-out-2 resources: inputs: @@ -409,7 +413,7 @@ spec: - name: workspace resource: git-repo taskRef: - name: check-create-files-exists-2 + name: %s - name: check-fan-in resources: inputs: @@ -419,14 +423,16 @@ spec: name: workspace resource: git-repo taskRef: - name: read-files -`, getName(pipelineName, suffix))) + name: %s +`, helpers.ObjectNameForTest(t), tasks["create-file"].Name, tasks["check-create-files-exists"].Name, + tasks["check-create-files-exists-2"].Name, tasks["read-files"].Name)) } -func getFanInFanOutGitResources(t *testing.T) []*v1alpha1.PipelineResource { - return []*v1alpha1.PipelineResource{parse.MustParsePipelineResource(t, ` +func getFanInFanOutGitResources(t *testing.T) map[string]*v1alpha1.PipelineResource { + return map[string]*v1alpha1.PipelineResource{ + "kritis-resource-git": parse.MustParsePipelineResource(t, fmt.Sprintf(` metadata: - name: kritis-resource-git + name: %s spec: type: git params: @@ -434,7 +440,7 @@ spec: value: https://github.com/grafeas/kritis - name: Revision value: master -`)} +`, helpers.ObjectNameForTest(t)))} } func getPipelineRunServiceAccount(suffix int) *corev1.ServiceAccount { @@ -447,7 +453,7 @@ func getPipelineRunServiceAccount(suffix int) *corev1.ServiceAccount { }}, } } -func getFanInFanOutPipelineRun(t *testing.T, suffix int, namespace string) *v1alpha1.PipelineRun { +func getFanInFanOutPipelineRun(t *testing.T, _ int, namespace string, pipelineName string, resources map[string]*v1alpha1.PipelineResource) *v1alpha1.PipelineRun { return parse.MustParseAlphaPipelineRun(t, fmt.Sprintf(` metadata: name: %s @@ -458,8 +464,8 @@ spec: resources: - name: git-repo resourceRef: - name: kritis-resource-git -`, getName(pipelineRunName, suffix), namespace, getName(pipelineName, suffix))) + name: %s +`, helpers.ObjectNameForTest(t), namespace, pipelineName, resources["kritis-resource-git"].Name)) } func getPipelineRunSecret(suffix int) *corev1.Secret { @@ -490,7 +496,7 @@ func getPipelineRunSecret(suffix int) *corev1.Secret { } } -func getHelloWorldPipelineRun(t *testing.T, suffix int, namespace string) *v1alpha1.PipelineRun { +func getHelloWorldPipelineRun(t *testing.T, suffix int, namespace string, pipelineName string, _ map[string]*v1alpha1.PipelineResource) *v1alpha1.PipelineRun { return parse.MustParseAlphaPipelineRun(t, fmt.Sprintf(` metadata: labels: @@ -506,7 +512,7 @@ spec: pipelineRef: name: %s serviceAccountName: %s%d -`, getName(pipelineRunName, suffix), namespace, getName(pipelineName, suffix), saName, suffix)) +`, helpers.ObjectNameForTest(t), namespace, pipelineName, saName, suffix)) } func getName(namespace string, suffix int) string { @@ -668,7 +674,7 @@ func assertAnnotationsMatch(t *testing.T, expectedAnnotations, actualAnnotations } } -func getPipelineWithFailingCondition(t *testing.T, suffix int) *v1alpha1.Pipeline { +func getPipelineWithFailingCondition(t *testing.T, taskName, condName string) *v1alpha1.Pipeline { return parse.MustParseAlphaPipeline(t, fmt.Sprintf(` metadata: name: %s @@ -683,28 +689,23 @@ spec: taskRef: name: %s runAfter: ['%s'] -`, getName(pipelineName, suffix), task1Name, getName(taskName, suffix), cond1Name, getName(taskName, suffix), task1Name)) +`, helpers.ObjectNameForTest(t), task1Name, taskName, condName, taskName, task1Name)) } -func getFailingCondition(namespace string) *v1alpha1.Condition { - return &v1alpha1.Condition{ - ObjectMeta: metav1.ObjectMeta{ - Name: cond1Name, - Namespace: namespace, - }, - Spec: v1alpha1.ConditionSpec{ - Check: v1alpha1.Step{ - Container: corev1.Container{ - Image: "ubuntu", - Command: []string{"/bin/bash"}, - Args: []string{"exit 1"}, - }, - }, - }, - } +func getFailingCondition(t *testing.T, namespace string) *v1alpha1.Condition { + return parse.MustParseCondition(t, fmt.Sprintf(` +metadata: + name: %s + namespace: %s +spec: + check: + image: ubuntu + command: ['/bin/bash'] + args: ['exit 1'] +`, helpers.ObjectNameForTest(t), namespace)) } -func getConditionalPipelineRun(t *testing.T, suffix int, namespace string) *v1alpha1.PipelineRun { +func getConditionalPipelineRun(t *testing.T, _ int, namespace string, pipelineName string, _ map[string]*v1alpha1.PipelineResource) *v1alpha1.PipelineRun { return parse.MustParseAlphaPipelineRun(t, fmt.Sprintf(` metadata: name: %s @@ -714,5 +715,5 @@ metadata: spec: pipelineRef: name: %s -`, getName(pipelineRunName, suffix), namespace, getName(pipelineName, suffix))) +`, helpers.ObjectNameForTest(t), namespace, pipelineName)) }