From 0918ba8031f6435f23ba64649357e129f216c0df Mon Sep 17 00:00:00 2001 From: Nader Ziada Date: Tue, 22 Jan 2019 23:03:50 -0500 Subject: [PATCH] sharing artifacts between pipeline tasks using bucket - using gcp buckets only for now - bucket information configured using a config map - refactor of pvc implementation for the same feature to use same interface - artifact bucket and artifact pvc return the container spec to execute the upload and download steps - issue https://github.com/knative/build-pipeline/issues/384 --- config/config-artifact-bucket.yaml | 30 +++ docs/developers/README.md | 9 + docs/using.md | 16 +- pkg/apis/pipeline/v1alpha1/artifact_bucket.go | 147 ++++++++++++ .../pipeline/v1alpha1/artifact_bucket_test.go | 97 ++++++++ pkg/apis/pipeline/v1alpha1/artifact_pvc.go | 95 ++++++++ .../pipeline/v1alpha1/artifact_pvc_test.go | 64 +++++ .../pipeline/v1alpha1/pipelinerun_types.go | 31 --- .../v1alpha1/pipelinerun_types_test.go | 21 -- pkg/apis/pipeline/v1alpha1/taskrun_types.go | 11 + .../v1alpha1/zz_generated.deepcopy.go | 37 +++ pkg/artifacts/artifact_storage_test.go | 141 +++++++++++ pkg/artifacts/artifacts_storage.go | 143 +++++++++++ .../v1alpha1/pipelinerun/config/store.go | 79 ++++++ .../v1alpha1/pipelinerun/config/store_test.go | 55 +++++ .../testdata/config-artifact-bucket.yaml | 21 ++ .../v1alpha1/pipelinerun/pipelinerun.go | 43 ++-- .../v1alpha1/pipelinerun/pipelinerun_test.go | 4 + .../resources/input_output_steps.go | 22 +- .../resources/input_output_steps_test.go | 8 +- .../taskrun/resources/input_resource_test.go | 179 +++++++++++++- .../taskrun/resources/input_resources.go | 48 ++-- .../taskrun/resources/output_resource.go | 49 ++-- .../taskrun/resources/output_resource_test.go | 184 +++++++++++++- pkg/reconciler/v1alpha1/taskrun/taskrun.go | 4 +- test/README.md | 2 + test/artifact_bucket_test.go | 224 ++++++++++++++++++ 27 files changed, 1614 insertions(+), 150 deletions(-) create mode 100644 config/config-artifact-bucket.yaml create mode 100644 pkg/apis/pipeline/v1alpha1/artifact_bucket.go create mode 100644 pkg/apis/pipeline/v1alpha1/artifact_bucket_test.go create mode 100644 pkg/apis/pipeline/v1alpha1/artifact_pvc.go create mode 100644 pkg/apis/pipeline/v1alpha1/artifact_pvc_test.go create mode 100644 pkg/artifacts/artifact_storage_test.go create mode 100644 pkg/artifacts/artifacts_storage.go create mode 100644 pkg/reconciler/v1alpha1/pipelinerun/config/store.go create mode 100644 pkg/reconciler/v1alpha1/pipelinerun/config/store_test.go create mode 100644 pkg/reconciler/v1alpha1/pipelinerun/config/testdata/config-artifact-bucket.yaml create mode 100644 test/artifact_bucket_test.go diff --git a/config/config-artifact-bucket.yaml b/config/config-artifact-bucket.yaml new file mode 100644 index 00000000000..dd2ec54a4a7 --- /dev/null +++ b/config/config-artifact-bucket.yaml @@ -0,0 +1,30 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-artifact-bucket + namespace: knative-build-pipeline +data: + # location of the gcs bucket to be used for artifact storage + # location: "gs://bucket-name" + + # name of the secret that will contain the credentials for the service account + # with access to the bucket + # bucket.service.account.secret.name: + + # The key in the secret with the required service account json + # bucket.service.account.secret.key: + diff --git a/docs/developers/README.md b/docs/developers/README.md index 3407ae84fa8..3d93949104c 100644 --- a/docs/developers/README.md +++ b/docs/developers/README.md @@ -16,6 +16,15 @@ on path `/pvc` by PipelineRun. controller adds a step to copy from PVC to directory path `/pvc/previous_task/resource_name`. +Another alternatives is to use a GCS storage bucket to share the artifacts. This can +be configured using a ConfigMap with the name `config-artifact-bucket` with the following attributes: + +- location: the address of the bucket (for example gs://mybucket) +- bucket.service.account.secret.name: the name of the secret that will contain the credentials for the service account + with access to the bucket +- bucket.service.account.secret.key: the key in the secret with the required service account json + The bucket is configured with a retention policy of 24 hours after which files will be deleted + ### How are inputs handled? Input resources, like source code (git) or artifacts, are dumped at path diff --git a/docs/using.md b/docs/using.md index 6599062736c..6cfa65c2a03 100644 --- a/docs/using.md +++ b/docs/using.md @@ -184,9 +184,19 @@ configure that by edit the `image`'s value in a configmap named ### Resource sharing between tasks Pipeline `Tasks` are allowed to pass resources from previous `Tasks` via the -[`from`](#from) field. This feature is implemented using -Persistent Volume Claims under the hood but however has an implication -that tasks cannot have any volume mounted under path `/pvc`. +[`from`](#from) field. This feature is implemented using the two +following alternatives: + +- Persistent Volume Claims under the hood but however has an implication + that tasks cannot have any volume mounted under path `/pvc`. + +- [GCS storage bucket](https://cloud.google.com/storage/docs/json_api/v1/buckets) + A storage bucket can be configured using a ConfigMap with the name `config-artifact-bucket` with the following attributes: +- location: the address of the bucket (for example gs://mybucket) +- bucket.service.account.secret.name: the name of the secret that will contain the credentials for the service account + with access to the bucket +- bucket.service.account.secret.key: the key in the secret with the required service account json +The bucket is configured with a retention policy of 24 hours after which files will be deleted ### Outputs diff --git a/pkg/apis/pipeline/v1alpha1/artifact_bucket.go b/pkg/apis/pipeline/v1alpha1/artifact_bucket.go new file mode 100644 index 00000000000..fc190e4c356 --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/artifact_bucket.go @@ -0,0 +1,147 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "fmt" + "path/filepath" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +const ( + // BucketConfigName is the name of the configmap containing all + // customizations for the storage bucket. + BucketConfigName = "config-artifact-bucket" + + // BucketLocationKey is the name of the configmap entry that specifies + // loction of the bucket. + BucketLocationKey = "location" + + // BucketServiceAccountSecretName is the name of the configmap entry that specifies + // the name of the secret that will provide the servie account with bucket access. + // This secret must have a key called serviceaccount that will have a value with + // the service account with access to the bucket + BucketServiceAccountSecretName = "bucket.service.account.secret.name" + + // BucketServiceAccountSecretKey is the name of the configmap entry that specifies + // the secret key that will have a value with the service account json with access + // to the bucket + BucketServiceAccountSecretKey = "bucket.service.account.secret.key" +) + +var ( + secretVolumeMountPath = "/var/bucketsecret" +) + +// ArtifactBucket contains the Storage bucket configuration defined in the +// Bucket config map. +type ArtifactBucket struct { + Name string + Location string + Secrets []SecretParam +} + +// IsPVC indicates if the temporary storage used for artifacts in a pipelinerun is a PVC +func (b *ArtifactBucket) IsPVC() bool { + return false +} + +// StorageBasePath returns the path to be used to store artifacts in a pipelinerun temporary storage +func (b *ArtifactBucket) StorageBasePath(pr *PipelineRun) string { + return fmt.Sprintf("%s-%s-bucket", pr.Name, pr.Namespace) +} + +// GetCopyFromContainerSpec returns a container used to download artifacts from temporary storage +func (b *ArtifactBucket) GetCopyFromContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { + args := []string{"-args", fmt.Sprintf("cp -r %s %s", fmt.Sprintf("%s/%s/**", b.Location, sourcePath), destinationPath)} + + envVars, secretVolumeMount := getBucketSecretEnvVarsAndVolumeMounts(b.Secrets) + + return []corev1.Container{{ + Name: fmt.Sprintf("artifact-dest-mkdir-%s", name), + Image: *bashNoopImage, + Args: []string{ + "-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " "), + }, + }, { + Name: fmt.Sprintf("artifact-copy-from-%s", name), + Image: *gsutilImage, + Args: args, + Env: envVars, + VolumeMounts: secretVolumeMount, + }} +} + +// GetCopyToContainerSpec returns a container used to upload artifacts for temporary storage +func (b *ArtifactBucket) GetCopyToContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { + args := []string{"-args", fmt.Sprintf("cp -r %s %s", sourcePath, fmt.Sprintf("%s/%s", b.Location, destinationPath))} + + envVars, secretVolumeMount := getBucketSecretEnvVarsAndVolumeMounts(b.Secrets) + + return []corev1.Container{{ + Name: fmt.Sprintf("artifact-copy-to-%s", name), + Image: *gsutilImage, + Args: args, + Env: envVars, + VolumeMounts: secretVolumeMount, + }} +} + +// GetSecretsVolumes retunrs the list of volumes for secrets to be mounted +// on pod +func (b *ArtifactBucket) GetSecretsVolumes() []corev1.Volume { + volumes := []corev1.Volume{} + for _, sec := range b.Secrets { + v := corev1.Volume{ + Name: fmt.Sprintf("bucket-secret-volume-%s", sec.SecretName), + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: sec.SecretName, + }, + }, + } + volumes = append(volumes, v) + } + return volumes +} + +func getBucketSecretEnvVarsAndVolumeMounts(secrets []SecretParam) ([]corev1.EnvVar, []corev1.VolumeMount) { + mountPaths := make(map[string]struct{}) + var ( + envVars []corev1.EnvVar + secretVolumeMount []corev1.VolumeMount + ) + for _, sec := range secrets { + if sec.FieldName != "" { + mountPath := filepath.Join(secretVolumeMountPath, sec.SecretName) + envVars = append(envVars, corev1.EnvVar{ + Name: strings.ToUpper(sec.FieldName), + Value: filepath.Join(mountPath, sec.SecretKey), + }) + if _, ok := mountPaths[mountPath]; !ok { + secretVolumeMount = append(secretVolumeMount, corev1.VolumeMount{ + Name: fmt.Sprintf("bucket-secret-volume-%s", sec.SecretName), + MountPath: mountPath, + }) + mountPaths[mountPath] = struct{}{} + } + } + } + return envVars, secretVolumeMount +} diff --git a/pkg/apis/pipeline/v1alpha1/artifact_bucket_test.go b/pkg/apis/pipeline/v1alpha1/artifact_bucket_test.go new file mode 100644 index 00000000000..5bb5335035e --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/artifact_bucket_test.go @@ -0,0 +1,97 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" +) + +func TestBucketGetCopyFromContainerSpec(t *testing.T) { + bucket := ArtifactBucket{ + Location: "gs://fake-bucket", + Secrets: []SecretParam{{ + FieldName: "GOOGLE_APPLICATION_CREDENTIALS", + SecretName: "secret1", + SecretKey: "serviceaccount", + }}, + } + want := []corev1.Container{{ + Name: "artifact-dest-mkdir-workspace", + Image: "override-with-bash-noop:latest", + Args: []string{"-args", "mkdir -p /workspace/destination"}, + }, { + Name: "artifact-copy-from-workspace", + Image: "override-with-gsutil-image:latest", + Args: []string{"-args", "cp -r gs://fake-bucket/src-path/** /workspace/destination"}, + Env: []corev1.EnvVar{{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/bucketsecret/secret1/serviceaccount"}}, + VolumeMounts: []corev1.VolumeMount{{Name: "bucket-secret-volume-secret1", MountPath: "/var/bucketsecret/secret1"}}, + }} + + got := bucket.GetCopyFromContainerSpec("workspace", "src-path", "/workspace/destination") + if d := cmp.Diff(got, want); d != "" { + t.Errorf("Diff:\n%s", d) + } +} + +func TestBucketGetCopyToContainerSpec(t *testing.T) { + bucket := ArtifactBucket{ + Location: "gs://fake-bucket", + Secrets: []SecretParam{{ + FieldName: "GOOGLE_APPLICATION_CREDENTIALS", + SecretName: "secret1", + SecretKey: "serviceaccount", + }}, + } + want := []corev1.Container{{ + Name: "artifact-copy-to-workspace", + Image: "override-with-gsutil-image:latest", + Args: []string{"-args", "cp -r src-path gs://fake-bucket/workspace/destination"}, + Env: []corev1.EnvVar{{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: "/var/bucketsecret/secret1/serviceaccount"}}, + VolumeMounts: []corev1.VolumeMount{{Name: "bucket-secret-volume-secret1", MountPath: "/var/bucketsecret/secret1"}}, + }} + + got := bucket.GetCopyToContainerSpec("workspace", "src-path", "workspace/destination") + if d := cmp.Diff(got, want); d != "" { + t.Errorf("Diff:\n%s", d) + } +} + +func TestGetSecretsVolumes(t *testing.T) { + bucket := ArtifactBucket{ + Location: "gs://fake-bucket", + Secrets: []SecretParam{{ + FieldName: "GOOGLE_APPLICATION_CREDENTIALS", + SecretName: "secret1", + SecretKey: "serviceaccount", + }}, + } + want := []corev1.Volume{{ + Name: "bucket-secret-volume-secret1", + VolumeSource: corev1.VolumeSource{ + Secret: &corev1.SecretVolumeSource{ + SecretName: "secret1", + }, + }, + }} + got := bucket.GetSecretsVolumes() + if d := cmp.Diff(got, want); d != "" { + t.Errorf("Diff:\n%s", d) + } +} diff --git a/pkg/apis/pipeline/v1alpha1/artifact_pvc.go b/pkg/apis/pipeline/v1alpha1/artifact_pvc.go new file mode 100644 index 00000000000..52500a31923 --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/artifact_pvc.go @@ -0,0 +1,95 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "flag" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +var ( + pvcDir = "/pvc" + bashNoopImage = flag.String("bash-noop-image", "override-with-bash-noop:latest", "The container image containing bash shell") +) + +// ArtifactPVC represents the pvc created by the pipelinerun +// for artifacts temporary storage +type ArtifactPVC struct { + Name string +} + +// IsPVC indicates if the temporary storage used for artifacts in a pipelinerun is a PVC +func (p *ArtifactPVC) IsPVC() bool { + return true +} + +// StorageBasePath returns the path to be used to store artifacts in a pipelinerun temporary storage +func (p *ArtifactPVC) StorageBasePath(pr *PipelineRun) string { + return pvcDir +} + +// GetCopyFromContainerSpec returns a container used to download artifacts from temporary storage +func (p *ArtifactPVC) GetCopyFromContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { + return []corev1.Container{{ + Name: fmt.Sprintf("source-copy-%s", name), + Image: *bashNoopImage, + Args: []string{"-args", strings.Join([]string{"cp", "-r", fmt.Sprintf("%s/.", sourcePath), destinationPath}, " ")}, + }} +} + +// GetCopyToContainerSpec returns a container used to upload artifacts for temporary storage +func (p *ArtifactPVC) GetCopyToContainerSpec(name, sourcePath, destinationPath string) []corev1.Container { + return []corev1.Container{{ + Name: fmt.Sprintf("source-mkdir-%s", name), + Image: *bashNoopImage, + Args: []string{ + "-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " "), + }, + VolumeMounts: []corev1.VolumeMount{getPvcMount(p.Name)}, + }, { + Name: fmt.Sprintf("source-copy-%s", name), + Image: *bashNoopImage, + Args: []string{ + "-args", strings.Join([]string{"cp", "-r", fmt.Sprintf("%s/.", sourcePath), destinationPath}, " "), + }, + VolumeMounts: []corev1.VolumeMount{getPvcMount(p.Name)}, + }} +} + +func getPvcMount(name string) corev1.VolumeMount { + return corev1.VolumeMount{ + Name: name, // taskrun pvc name + MountPath: pvcDir, // nothing should be mounted here + } +} + +func CreateDirContainer(name, destinationPath string) corev1.Container { + return corev1.Container{ + Name: fmt.Sprintf("create-dir-%s", name), + Image: *bashNoopImage, + Args: []string{"-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " ")}, + } +} + +// GetSecretsVolumes retunrs the list of volumes for secrets to be mounted +// on pod +func (p *ArtifactPVC) GetSecretsVolumes() []corev1.Volume { + return []corev1.Volume{} +} diff --git a/pkg/apis/pipeline/v1alpha1/artifact_pvc_test.go b/pkg/apis/pipeline/v1alpha1/artifact_pvc_test.go new file mode 100644 index 00000000000..91f18be527b --- /dev/null +++ b/pkg/apis/pipeline/v1alpha1/artifact_pvc_test.go @@ -0,0 +1,64 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + corev1 "k8s.io/api/core/v1" +) + +func TestPVCGetCopyFromContainerSpec(t *testing.T) { + + pvc := ArtifactPVC{ + Name: "pipelinerun-pvc", + } + want := []corev1.Container{{ + Name: "source-copy-workspace", + Image: "override-with-bash-noop:latest", + Args: []string{"-args", "cp -r src-path/. /workspace/destination"}, + }} + + got := pvc.GetCopyFromContainerSpec("workspace", "src-path", "/workspace/destination") + if d := cmp.Diff(got, want); d != "" { + t.Errorf("Diff:\n%s", d) + } +} + +func TestPVCGetCopyToContainerSpec(t *testing.T) { + + pvc := ArtifactPVC{ + Name: "pipelinerun-pvc", + } + want := []corev1.Container{{ + Name: "source-mkdir-workspace", + Image: "override-with-bash-noop:latest", + Args: []string{"-args", "mkdir -p /workspace/destination"}, + VolumeMounts: []corev1.VolumeMount{{MountPath: "/pvc", Name: "pipelinerun-pvc"}}, + }, { + Name: "source-copy-workspace", + Image: "override-with-bash-noop:latest", + Args: []string{"-args", "cp -r src-path/. /workspace/destination"}, + VolumeMounts: []corev1.VolumeMount{{MountPath: "/pvc", Name: "pipelinerun-pvc"}}, + }} + + got := pvc.GetCopyToContainerSpec("workspace", "src-path", "/workspace/destination") + if d := cmp.Diff(got, want); d != "" { + t.Errorf("Diff:\n%s", d) + } +} diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go index d877b8d29ee..6884ef3243d 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_types.go @@ -17,12 +17,9 @@ limitations under the License. package v1alpha1 import ( - "fmt" - duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" "github.com/knative/pkg/webhook" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" ) @@ -181,37 +178,9 @@ func (pr *PipelineRun) GetTaskRunRef() corev1.ObjectReference { // SetDefaults for pipelinerun func (pr *PipelineRun) SetDefaults() {} -// GetPVC gets PVC for -func (pr *PipelineRun) GetPVC() *corev1.PersistentVolumeClaim { - var pvcSizeBytes int64 - // TODO(shashwathi): make this value configurable - pvcSizeBytes = 5 * 1024 * 1024 * 1024 // 5 GBs - return &corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: pr.Namespace, - Name: pr.GetPVCName(), - OwnerReferences: pr.GetOwnerReference(), - }, - Spec: corev1.PersistentVolumeClaimSpec{ - // Multiple tasks should be allowed to read - AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, - Resources: corev1.ResourceRequirements{ - Requests: map[corev1.ResourceName]resource.Quantity{ - corev1.ResourceStorage: *resource.NewQuantity(pvcSizeBytes, resource.BinarySI), - }, - }, - }, - } -} - // GetOwnerReference gets the pipeline run as owner reference for any related objects func (pr *PipelineRun) GetOwnerReference() []metav1.OwnerReference { return []metav1.OwnerReference{ *metav1.NewControllerRef(pr, groupVersionKind), } } - -// GetPVCName provides name of PVC for corresponding PR -func (pr *PipelineRun) GetPVCName() string { - return fmt.Sprintf("%s-pvc", pr.Name) -} diff --git a/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go b/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go index fa8192d2d21..d61db17cc80 100644 --- a/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go +++ b/pkg/apis/pipeline/v1alpha1/pipelinerun_types_test.go @@ -83,24 +83,3 @@ func TestInitializeConditions(t *testing.T) { t.Fatalf("PipelineRun status getting reset") } } - -func Test_GetPVC(t *testing.T) { - p := &PipelineRun{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-name", - Namespace: "test-ns", - }, - } - expectedPVC := corev1.PersistentVolumeClaim{ - ObjectMeta: metav1.ObjectMeta{ - Namespace: "test-ns", - Name: "test-name-pvc", // prname-pvc - OwnerReferences: []metav1.OwnerReference{ - *metav1.NewControllerRef(p, groupVersionKind), - }, - }, - } - if d := cmp.Diff(p.GetPVC().ObjectMeta, expectedPVC.ObjectMeta); d != "" { - t.Fatalf("GetPVC mismatch; want %v got %v; diff %s", expectedPVC.ObjectMeta, p.GetPVC().ObjectMeta, d) - } -} diff --git a/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/pkg/apis/pipeline/v1alpha1/taskrun_types.go index c546aa95eac..edb32035ddc 100644 --- a/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ b/pkg/apis/pipeline/v1alpha1/taskrun_types.go @@ -203,3 +203,14 @@ func (tr *TaskRun) GetPipelineRunPVCName() string { } return "" } + +// HasPipeluneRunOwnerReference returns true of TaskRun has +// owner reference of type PipelineRun +func (tr *TaskRun) HasPipelineRunOwnerReference() bool { + for _, ref := range tr.GetOwnerReferences() { + if ref.Kind == pipelineRunControllerName { + return true + } + } + return false +} diff --git a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index 1640710074e..ab8dd6a0f5e 100644 --- a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -27,6 +27,43 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactBucket) DeepCopyInto(out *ArtifactBucket) { + *out = *in + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]SecretParam, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactBucket. +func (in *ArtifactBucket) DeepCopy() *ArtifactBucket { + if in == nil { + return nil + } + out := new(ArtifactBucket) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ArtifactPVC) DeepCopyInto(out *ArtifactPVC) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ArtifactPVC. +func (in *ArtifactPVC) DeepCopy() *ArtifactPVC { + if in == nil { + return nil + } + out := new(ArtifactPVC) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ClusterResource) DeepCopyInto(out *ClusterResource) { *out = *in diff --git a/pkg/artifacts/artifact_storage_test.go b/pkg/artifacts/artifact_storage_test.go new file mode 100644 index 00000000000..df81b04365e --- /dev/null +++ b/pkg/artifacts/artifact_storage_test.go @@ -0,0 +1,141 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package artifacts + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/knative/build-pipeline/pkg/system" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" +) + +func TestInitializeArtifactStorage_WithConfigMap(t *testing.T) { + fakekubeclient := fakek8s.NewSimpleClientset( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace, + Name: v1alpha1.BucketConfigName, + }, + Data: map[string]string{ + v1alpha1.BucketLocationKey: "gs://fake-bucket", + v1alpha1.BucketServiceAccountSecretName: "secret1", + v1alpha1.BucketServiceAccountSecretKey: "sakey", + }, + }, + ) + pipelinerun := &v1alpha1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "pipelineruntest", + }, + } + + bucket, err := InitializeArtifactStorage(pipelinerun, fakekubeclient) + if err != nil { + t.Fatalf("Somehow had error initializing artifact storage run out of fake client: %s", err) + } + + expectedArtifactBucket := &v1alpha1.ArtifactBucket{ + Location: "gs://fake-bucket", + Secrets: []v1alpha1.SecretParam{{ + FieldName: "GOOGLE_APPLICATION_CREDENTIALS", + SecretKey: "sakey", + SecretName: "secret1", + }}, + } + + if diff := cmp.Diff(bucket, expectedArtifactBucket); diff != "" { + t.Fatalf("want %v, but got %v", expectedArtifactBucket, bucket) + } +} + +func TestInitializeArtifactStorage_WithoutConfigMap(t *testing.T) { + fakekubeclient := fakek8s.NewSimpleClientset() + pipelinerun := &v1alpha1.PipelineRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "foo", + Name: "pipelineruntest", + }, + } + + pvc, err := InitializeArtifactStorage(pipelinerun, fakekubeclient) + if err != nil { + t.Fatalf("Somehow had error initializing artifact storage run out of fake client: %s", err) + } + + expectedArtifactPVC := &v1alpha1.ArtifactPVC{ + Name: "pipelineruntest", + } + + if diff := cmp.Diff(pvc, expectedArtifactPVC); diff != "" { + t.Fatalf("want %v, but got %v", expectedArtifactPVC, pvc) + } +} + +func TestGetArtifactStorage_WithConfigMap(t *testing.T) { + fakekubeclient := fakek8s.NewSimpleClientset( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: system.Namespace, + Name: v1alpha1.BucketConfigName, + }, + Data: map[string]string{ + v1alpha1.BucketLocationKey: "gs://fake-bucket", + v1alpha1.BucketServiceAccountSecretName: "secret1", + v1alpha1.BucketServiceAccountSecretKey: "sakey", + }, + }, + ) + + bucket, err := GetArtifactStorage("pipelineruntest", fakekubeclient) + if err != nil { + t.Fatalf("Somehow had error initializing artifact storage run out of fake client: %s", err) + } + + expectedArtifactBucket := &v1alpha1.ArtifactBucket{ + Location: "gs://fake-bucket", + Secrets: []v1alpha1.SecretParam{{ + FieldName: "GOOGLE_APPLICATION_CREDENTIALS", + SecretKey: "sakey", + SecretName: "secret1", + }}, + } + + if diff := cmp.Diff(bucket, expectedArtifactBucket); diff != "" { + t.Fatalf("want %v, but got %v", expectedArtifactBucket, bucket) + } +} + +func TestGetArtifactStorage_WithoutConfigMap(t *testing.T) { + fakekubeclient := fakek8s.NewSimpleClientset() + pvc, err := GetArtifactStorage("pipelineruntest", fakekubeclient) + if err != nil { + t.Fatalf("Somehow had error initializing artifact storage run out of fake client: %s", err) + } + + expectedArtifactPVC := &v1alpha1.ArtifactPVC{ + Name: "pipelineruntest", + } + + if diff := cmp.Diff(pvc, expectedArtifactPVC); diff != "" { + t.Fatalf("want %v, but got %v", expectedArtifactPVC, pvc) + } +} diff --git a/pkg/artifacts/artifacts_storage.go b/pkg/artifacts/artifacts_storage.go new file mode 100644 index 00000000000..90c366874e4 --- /dev/null +++ b/pkg/artifacts/artifacts_storage.go @@ -0,0 +1,143 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package artifacts + +import ( + "fmt" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/knative/build-pipeline/pkg/system" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" +) + +// ArtifactStorageInterface is an interface to define the steps to copy +// an pipeline artifact to/from temporary storage +type ArtifactStorageInterface interface { + GetCopyToContainerSpec(name, sourcePath, destinationPath string) []corev1.Container + GetCopyFromContainerSpec(name, sourcePath, destinationPath string) []corev1.Container + GetSecretsVolumes() []corev1.Volume + IsPVC() bool + StorageBasePath(pr *v1alpha1.PipelineRun) string +} + +// InitializeArtifactStorage will check if there is there is a +// bucket configured or create a PVC +func InitializeArtifactStorage(pr *v1alpha1.PipelineRun, c kubernetes.Interface) (ArtifactStorageInterface, error) { + needPVC := false + configMap, err := c.CoreV1().ConfigMaps(system.Namespace).Get(v1alpha1.BucketConfigName, metav1.GetOptions{}) + if err != nil { + needPVC = true + } + if configMap != nil && configMap.Data != nil { + if location, ok := configMap.Data[v1alpha1.BucketLocationKey]; !ok { + if location == "" { + needPVC = true + } + } + } + if needPVC { + err = createPVC(pr, c) + if err != nil { + return nil, err + } + return &v1alpha1.ArtifactPVC{Name: pr.Name}, nil + } + + return NewArtifactBucketConfigFromConfigMap(configMap) +} + +// GetArtifactStorage returnes the storgae interface to enable +// consumer code to get a container step for copy to/from storage +func GetArtifactStorage(prName string, c kubernetes.Interface) (ArtifactStorageInterface, error) { + configMap, err := c.CoreV1().ConfigMaps(system.Namespace).Get(v1alpha1.BucketConfigName, metav1.GetOptions{}) + if err != nil { + return &v1alpha1.ArtifactPVC{Name: prName}, nil + } + return NewArtifactBucketConfigFromConfigMap(configMap) +} + +// NewArtifactBucketConfigFromConfigMap creates a Bucket from the supplied ConfigMap +func NewArtifactBucketConfigFromConfigMap(configMap *corev1.ConfigMap) (*v1alpha1.ArtifactBucket, error) { + c := &v1alpha1.ArtifactBucket{} + + if configMap.Data == nil { + return c, nil + } + if location, ok := configMap.Data[v1alpha1.BucketLocationKey]; !ok { + c.Location = "" + } else { + c.Location = location + } + sp := v1alpha1.SecretParam{} + if secretName, ok := configMap.Data[v1alpha1.BucketServiceAccountSecretName]; !ok { + c.Secrets = nil + } else { + if secretKey, ok := configMap.Data[v1alpha1.BucketServiceAccountSecretKey]; !ok { + c.Secrets = nil + } else { + sp.FieldName = "GOOGLE_APPLICATION_CREDENTIALS" + sp.SecretName = secretName + sp.SecretKey = secretKey + c.Secrets = append(c.Secrets, sp) + } + } + return c, nil +} + +func createPVC(pr *v1alpha1.PipelineRun, c kubernetes.Interface) error { + if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(getPVCName(pr), metav1.GetOptions{}); err != nil { + if errors.IsNotFound(err) { + pvc := getPVCSpec(pr) + if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvc); err != nil { + return fmt.Errorf("failed to claim Persistent Volume %q due to error: %s", pr.Name, err) + } + return nil + } + return fmt.Errorf("failed to get claim Persistent Volume %q due to error: %s", pr.Name, err) + } + return nil +} + +func getPVCSpec(pr *v1alpha1.PipelineRun) *corev1.PersistentVolumeClaim { + var pvcSizeBytes int64 + // TODO(shashwathi): make this value configurable + pvcSizeBytes = 5 * 1024 * 1024 * 1024 // 5 GBs + return &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: pr.Namespace, + Name: getPVCName(pr), + OwnerReferences: pr.GetOwnerReference(), + }, + Spec: corev1.PersistentVolumeClaimSpec{ + // Multiple tasks should be allowed to read + AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce}, + Resources: corev1.ResourceRequirements{ + Requests: map[corev1.ResourceName]resource.Quantity{ + corev1.ResourceStorage: *resource.NewQuantity(pvcSizeBytes, resource.BinarySI), + }, + }, + }, + } +} + +func getPVCName(pr *v1alpha1.PipelineRun) string { + return fmt.Sprintf("%s-pvc", pr.Name) +} diff --git a/pkg/reconciler/v1alpha1/pipelinerun/config/store.go b/pkg/reconciler/v1alpha1/pipelinerun/config/store.go new file mode 100644 index 00000000000..4fc479d9b48 --- /dev/null +++ b/pkg/reconciler/v1alpha1/pipelinerun/config/store.go @@ -0,0 +1,79 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + "github.com/knative/build-pipeline/pkg/artifacts" + "github.com/knative/pkg/configmap" +) + +type cfgKey struct{} + +// +k8s:deepcopy-gen=false +type Config struct { + ArtifactBucket *v1alpha1.ArtifactBucket +} + +func FromContext(ctx context.Context) *Config { + return ctx.Value(cfgKey{}).(*Config) +} + +func ToContext(ctx context.Context, c *Config) context.Context { + return context.WithValue(ctx, cfgKey{}, c) +} + +// +k8s:deepcopy-gen=false +type Store struct { + *configmap.UntypedStore +} + +func NewStore(logger configmap.Logger) *Store { + store := &Store{ + UntypedStore: configmap.NewUntypedStore( + "pipelinerun", + logger, + configmap.Constructors{ + v1alpha1.BucketConfigName: artifacts.NewArtifactBucketConfigFromConfigMap, + }, + ), + } + + return store +} + +func (s *Store) ToContext(ctx context.Context) context.Context { + return ToContext(ctx, s.Load()) +} + +func (s *Store) Load() *Config { + ep := s.UntypedLoad(v1alpha1.BucketConfigName) + if ep == nil { + return &Config{ + ArtifactBucket: &v1alpha1.ArtifactBucket{ + Location: "", + }, + } + } + + return &Config{ + ArtifactBucket: ep.(*v1alpha1.ArtifactBucket).DeepCopy(), + } + +} diff --git a/pkg/reconciler/v1alpha1/pipelinerun/config/store_test.go b/pkg/reconciler/v1alpha1/pipelinerun/config/store_test.go new file mode 100644 index 00000000000..2e6090b53ae --- /dev/null +++ b/pkg/reconciler/v1alpha1/pipelinerun/config/store_test.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package config + +import ( + "context" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/knative/build-pipeline/pkg/artifacts" + logtesting "github.com/knative/pkg/logging/testing" + + test "github.com/knative/build-pipeline/pkg/reconciler/testing" +) + +func TestStoreLoadWithContext(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + bucketConfig := test.ConfigMapFromTestFile(t, "config-artifact-bucket") + store.OnConfigChanged(bucketConfig) + + config := FromContext(store.ToContext(context.Background())) + + expected, _ := artifacts.NewArtifactBucketConfigFromConfigMap(bucketConfig) + if diff := cmp.Diff(expected, config.ArtifactBucket); diff != "" { + t.Errorf("Unexpected controller config (-want, +got): %v", diff) + } +} +func TestStoreImmutableConfig(t *testing.T) { + store := NewStore(logtesting.TestLogger(t)) + store.OnConfigChanged(test.ConfigMapFromTestFile(t, "config-artifact-bucket")) + + config := store.Load() + + config.ArtifactBucket.Location = "mutated" + + newConfig := store.Load() + + if newConfig.ArtifactBucket.Location == "mutated" { + t.Error("Controller config is not immutable") + } +} diff --git a/pkg/reconciler/v1alpha1/pipelinerun/config/testdata/config-artifact-bucket.yaml b/pkg/reconciler/v1alpha1/pipelinerun/config/testdata/config-artifact-bucket.yaml new file mode 100644 index 00000000000..97c09533d66 --- /dev/null +++ b/pkg/reconciler/v1alpha1/pipelinerun/config/testdata/config-artifact-bucket.yaml @@ -0,0 +1,21 @@ +# Copyright 2018 The Knative Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: config-artifact-bucket + namespace: knative-build-pipeline +data: + location: "gs://build-pipeline-fake-bucket" diff --git a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go index a6d1f8b255f..6a61babaa50 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun.go @@ -24,12 +24,15 @@ import ( "github.com/knative/build-pipeline/pkg/apis/pipeline" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + artifacts "github.com/knative/build-pipeline/pkg/artifacts" informers "github.com/knative/build-pipeline/pkg/client/informers/externalversions/pipeline/v1alpha1" listers "github.com/knative/build-pipeline/pkg/client/listers/pipeline/v1alpha1" "github.com/knative/build-pipeline/pkg/reconciler" + "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/pipelinerun/config" "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/taskrun" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/configmap" "github.com/knative/pkg/controller" "github.com/knative/pkg/tracker" "go.uber.org/zap" @@ -37,7 +40,6 @@ import ( "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" ) @@ -67,6 +69,11 @@ const ( eventReasonSucceeded = "PipelineRunSucceeded" ) +type configStore interface { + ToContext(ctx context.Context) context.Context + WatchConfigs(w configmap.Watcher) +} + // Reconciler implements controller.Reconciler for Configuration resources. type Reconciler struct { *reconciler.Base @@ -79,6 +86,7 @@ type Reconciler struct { clusterTaskLister listers.ClusterTaskLister resourceLister listers.PipelineResourceLister tracker tracker.Interface + configStore configStore } // Check that our Reconciler implements controller.Reconciler @@ -118,6 +126,10 @@ func NewController( taskRunInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ UpdateFunc: controller.PassNew(r.tracker.OnChanged), }) + + r.Logger.Info("Setting up ConfigMap receivers") + r.configStore = config.NewStore(r.Logger.Named("config-store")) + r.configStore.WatchConfigs(opt.ConfigMapWatcher) return impl } @@ -132,6 +144,8 @@ func (c *Reconciler) Reconcile(ctx context.Context, key string) error { return nil } + ctx = c.configStore.ToContext(ctx) + // Get the Pipeline Run resource with this namespace/name original, err := c.pipelineRunLister.PipelineRuns(namespace).Get(name) if errors.IsNotFound(err) { @@ -287,14 +301,15 @@ func (c *Reconciler) reconcile(ctx context.Context, pr *v1alpha1.PipelineRun) er serviceAccount := pr.Spec.ServiceAccount rprt := resources.GetNextTask(pr.Name, pipelineState, c.Logger) - if err := getOrCreatePVC(pr, c.KubeClientSet); err != nil { - c.Logger.Infof("PipelineRun failed to create/get volume %s", pr.Name) - return fmt.Errorf("Failed to create/get persistent volume claim %s for task %q: %v", pr.Name, err, pr.Name) + var as artifacts.ArtifactStorageInterface + if as, err = artifacts.InitializeArtifactStorage(pr, c.KubeClientSet); err != nil { + c.Logger.Infof("PipelineRun failed to initialize artifact storage %s", pr.Name) + return err } if rprt != nil { c.Logger.Infof("Creating a new TaskRun object %s", rprt.TaskRunName) - rprt.TaskRun, err = c.createTaskRun(c.Logger, rprt, pr, serviceAccount) + rprt.TaskRun, err = c.createTaskRun(c.Logger, rprt, pr, serviceAccount, as.StorageBasePath(pr)) if err != nil { c.Recorder.Eventf(pr, corev1.EventTypeWarning, "TaskRunCreationFailed", "Failed to create TaskRun %q: %v", rprt.TaskRunName, err) return fmt.Errorf("error creating TaskRun called %s for PipelineTask %s from PipelineRun %s: %s", rprt.TaskRunName, rprt.PipelineTask.Name, pr.Name, err) @@ -321,7 +336,7 @@ func updateTaskRunsStatus(pr *v1alpha1.PipelineRun, pipelineState []*resources.R } } -func (c *Reconciler) createTaskRun(logger *zap.SugaredLogger, rprt *resources.ResolvedPipelineRunTask, pr *v1alpha1.PipelineRun, sa string) (*v1alpha1.TaskRun, error) { +func (c *Reconciler) createTaskRun(logger *zap.SugaredLogger, rprt *resources.ResolvedPipelineRunTask, pr *v1alpha1.PipelineRun, sa, storageBasePath string) (*v1alpha1.TaskRun, error) { tr := &v1alpha1.TaskRun{ ObjectMeta: metav1.ObjectMeta{ Name: rprt.TaskRunName, @@ -342,7 +357,7 @@ func (c *Reconciler) createTaskRun(logger *zap.SugaredLogger, rprt *resources.Re ServiceAccount: sa, }, } - resources.WrapSteps(&tr.Spec, rprt.PipelineTask, rprt.ResolvedTaskResources.Inputs, rprt.ResolvedTaskResources.Outputs) + resources.WrapSteps(&tr.Spec, rprt.PipelineTask, rprt.ResolvedTaskResources.Inputs, rprt.ResolvedTaskResources.Outputs, storageBasePath) return c.PipelineClientSet.PipelineV1alpha1().TaskRuns(pr.Namespace).Create(tr) } @@ -359,20 +374,6 @@ func (c *Reconciler) updateStatus(pr *v1alpha1.PipelineRun) (*v1alpha1.PipelineR return newPr, nil } -func getOrCreatePVC(pr *v1alpha1.PipelineRun, c kubernetes.Interface) error { - if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Get(pr.GetPVCName(), metav1.GetOptions{}); err != nil { - if errors.IsNotFound(err) { - pvc := pr.GetPVC() - if _, err := c.CoreV1().PersistentVolumeClaims(pr.Namespace).Create(pvc); err != nil { - return fmt.Errorf("failed to claim Persistent Volume %q due to error: %s", pr.Name, err) - } - return nil - } - return fmt.Errorf("failed to get claim Persistent Volume %q due to error: %s", pr.Name, err) - } - return nil -} - // isDone returns true if the PipelineRun's status indicates the build is done. func isDone(status *v1alpha1.PipelineRunStatus) bool { return !status.GetCondition(duckv1alpha1.ConditionSucceeded).IsUnknown() diff --git a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go index e75b0276e61..2bfdb3147ca 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/pipelinerun_test.go @@ -26,9 +26,11 @@ import ( "github.com/knative/build-pipeline/pkg/reconciler" "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/pipelinerun/resources" taskrunresources "github.com/knative/build-pipeline/pkg/reconciler/v1alpha1/taskrun/resources" + "github.com/knative/build-pipeline/pkg/system" "github.com/knative/build-pipeline/test" tb "github.com/knative/build-pipeline/test/builder" duckv1alpha1 "github.com/knative/pkg/apis/duck/v1alpha1" + "github.com/knative/pkg/configmap" "go.uber.org/zap" "go.uber.org/zap/zaptest/observer" corev1 "k8s.io/api/core/v1" @@ -47,6 +49,7 @@ func getRunName(pr *v1alpha1.PipelineRun) string { func getPipelineRunController(d test.Data, recorder record.EventRecorder) test.TestAssets { c, i := test.SeedTestData(d) observer, logs := observer.New(zap.InfoLevel) + configMapWatcher := configmap.NewInformedWatcher(c.Kube, system.Namespace) return test.TestAssets{ Controller: NewController( reconciler.Options{ @@ -54,6 +57,7 @@ func getPipelineRunController(d test.Data, recorder record.EventRecorder) test.T KubeClientSet: c.Kube, PipelineClientSet: c.Pipeline, Recorder: recorder, + ConfigMapWatcher: configMapWatcher, }, i.PipelineRun, i.Pipeline, diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps.go index 6458bcf9ccd..16273124f7a 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps.go @@ -22,12 +22,8 @@ import ( "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" ) -var ( - pvcDir = "/pvc" -) - // GetOutputSteps will add the correct `path` to the input resources for pt -func GetOutputSteps(outputs map[string]*v1alpha1.PipelineResource, taskName string) []v1alpha1.TaskResourceBinding { +func GetOutputSteps(outputs map[string]*v1alpha1.PipelineResource, taskName, storageBasePath string) []v1alpha1.TaskResourceBinding { var taskOutputResources []v1alpha1.TaskResourceBinding for name, outputResource := range outputs { @@ -37,15 +33,15 @@ func GetOutputSteps(outputs map[string]*v1alpha1.PipelineResource, taskName stri Name: outputResource.Name, APIVersion: outputResource.APIVersion, }, - Paths: []string{filepath.Join(pvcDir, taskName, name)}, + Paths: []string{filepath.Join(storageBasePath, taskName, name)}, }) } return taskOutputResources } -// GetInputSteps will add the correct `path` to the input resources for pt. If the resources are from -// a previous task, the correct `path` will be used so that the resource from that task will be used. -func GetInputSteps(inputs map[string]*v1alpha1.PipelineResource, pt *v1alpha1.PipelineTask) []v1alpha1.TaskResourceBinding { +// GetInputSteps will add the correct `path` to the input resources for pt. If the resources are provided by +// a previous task, the correct `path` will be used so that the resource provided by that task will be used. +func GetInputSteps(inputs map[string]*v1alpha1.PipelineResource, pt *v1alpha1.PipelineTask, storageBasePath string) []v1alpha1.TaskResourceBinding { var taskInputResources []v1alpha1.TaskResourceBinding for name, inputResource := range inputs { @@ -62,7 +58,7 @@ func GetInputSteps(inputs map[string]*v1alpha1.PipelineResource, pt *v1alpha1.Pi for _, pipelineTaskInput := range pt.Resources.Inputs { if pipelineTaskInput.Name == name { for _, constr := range pipelineTaskInput.From { - stepSourceNames = append(stepSourceNames, filepath.Join(pvcDir, constr, name)) + stepSourceNames = append(stepSourceNames, filepath.Join(storageBasePath, constr, name)) } } } @@ -76,12 +72,12 @@ func GetInputSteps(inputs map[string]*v1alpha1.PipelineResource, pt *v1alpha1.Pi } // WrapSteps will add the correct `paths` to all of the inputs and outputs for pt -func WrapSteps(tr *v1alpha1.TaskRunSpec, pt *v1alpha1.PipelineTask, inputs, outputs map[string]*v1alpha1.PipelineResource) { +func WrapSteps(tr *v1alpha1.TaskRunSpec, pt *v1alpha1.PipelineTask, inputs, outputs map[string]*v1alpha1.PipelineResource, storageBasePath string) { if pt == nil { return } // Add presteps to setup updated input - tr.Inputs.Resources = append(tr.Inputs.Resources, GetInputSteps(inputs, pt)...) + tr.Inputs.Resources = append(tr.Inputs.Resources, GetInputSteps(inputs, pt, storageBasePath)...) // Add poststeps to setup outputs - tr.Outputs.Resources = append(tr.Outputs.Resources, GetOutputSteps(outputs, pt.Name)...) + tr.Outputs.Resources = append(tr.Outputs.Resources, GetOutputSteps(outputs, pt.Name, storageBasePath)...) } diff --git a/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps_test.go b/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps_test.go index a53dc534d11..e38fd4de0bd 100644 --- a/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps_test.go +++ b/pkg/reconciler/v1alpha1/pipelinerun/resources/input_output_steps_test.go @@ -26,6 +26,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var pvcDir = "/pvc" + func TestGetOutputSteps(t *testing.T) { r1 := &v1alpha1.PipelineResource{ ObjectMeta: metav1.ObjectMeta{ @@ -70,7 +72,7 @@ func TestGetOutputSteps(t *testing.T) { }} for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - postTasks := resources.GetOutputSteps(tc.outputs, tc.pipelineTaskName) + postTasks := resources.GetOutputSteps(tc.outputs, tc.pipelineTaskName, pvcDir) sort.SliceStable(postTasks, func(i, j int) bool { return postTasks[i].Name < postTasks[j].Name }) if d := cmp.Diff(postTasks, tc.expectedtaskOuputResources); d != "" { t.Errorf("error comparing post steps: %s", d) @@ -137,7 +139,7 @@ func TestGetInputSteps(t *testing.T) { } for _, tc := range tcs { t.Run(tc.name, func(t *testing.T) { - taskInputResources := resources.GetInputSteps(tc.inputs, tc.pipelineTask) + taskInputResources := resources.GetInputSteps(tc.inputs, tc.pipelineTask, pvcDir) sort.SliceStable(taskInputResources, func(i, j int) bool { return taskInputResources[i].Name < taskInputResources[j].Name }) if d := cmp.Diff(tc.expectedtaskInputResources, taskInputResources); d != "" { t.Errorf("error comparing task resource inputs: %s", d) @@ -172,7 +174,7 @@ func TestWrapSteps(t *testing.T) { } taskRunSpec := &v1alpha1.TaskRunSpec{} - resources.WrapSteps(taskRunSpec, pt, inputs, outputs) + resources.WrapSteps(taskRunSpec, pt, inputs, outputs, pvcDir) expectedtaskInputResources := []v1alpha1.TaskResourceBinding{{ ResourceRef: v1alpha1.PipelineResourceRef{Name: "resource1"}, diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/input_resource_test.go b/pkg/reconciler/v1alpha1/taskrun/resources/input_resource_test.go index 9fda08ec863..0a1bcc8b64b 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/input_resource_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/input_resource_test.go @@ -26,6 +26,7 @@ import ( "go.uber.org/zap" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" ) var ( @@ -868,7 +869,8 @@ func TestAddResourceToBuild(t *testing.T) { }} { t.Run(c.desc, func(t *testing.T) { setUp() - got, err := AddInputResource(c.build, c.task.Name, &c.task.Spec, c.taskRun, pipelineResourceLister, logger) + fakekubeclient := fakek8s.NewSimpleClientset() + got, err := AddInputResource(fakekubeclient, c.build, c.task.Name, &c.task.Spec, c.taskRun, pipelineResourceLister, logger) if (err != nil) != c.wantErr { t.Errorf("Test: %q; AddInputResource() error = %v, WantErr %v", c.desc, err, c.wantErr) } @@ -985,7 +987,180 @@ func Test_StorageInputResource(t *testing.T) { }} { t.Run(c.desc, func(t *testing.T) { setUp() - got, err := AddInputResource(c.build, c.task.Name, &c.task.Spec, c.taskRun, pipelineResourceLister, logger) + fakekubeclient := fakek8s.NewSimpleClientset() + got, err := AddInputResource(fakekubeclient, c.build, c.task.Name, &c.task.Spec, c.taskRun, pipelineResourceLister, logger) + if (err != nil) != c.wantErr { + t.Errorf("Test: %q; AddInputResource() error = %v, WantErr %v", c.desc, err, c.wantErr) + } + if d := cmp.Diff(got, c.want); d != "" { + t.Errorf("Diff:\n%s", d) + } + }) + } +} + +func TestAddStepsToBuild_WithBucketFromConfigMap(t *testing.T) { + boolTrue := true + task := &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "build-from-repo", + Namespace: "marshmallow", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{{ + Name: "gitspace", + Type: "git", + }}, + }, + }, + } + taskWithTargetPath := &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task-with-targetpath", + Namespace: "marshmallow", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{{ + Name: "workspace", + Type: "gcs", + TargetPath: "gcs-dir", + }}, + }, + }, + } + + for _, c := range []struct { + desc string + task *v1alpha1.Task + taskRun *v1alpha1.TaskRun + build *buildv1alpha1.Build + wantErr bool + want *buildv1alpha1.Build + }{{ + desc: "git resource as input from previous task - copy to bucket", + task: task, + taskRun: &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "get-from-git", + Namespace: "marshmallow", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "PipelineRun", + Name: "pipelinerun", + }}, + }, + Spec: v1alpha1.TaskRunSpec{ + Inputs: v1alpha1.TaskRunInputs{ + Resources: []v1alpha1.TaskResourceBinding{{ + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "the-git", + }, + Name: "gitspace", + Paths: []string{"prev-task-path"}, + }}, + }, + }, + }, + build: build(), + wantErr: false, + want: &buildv1alpha1.Build{ + TypeMeta: metav1.TypeMeta{ + Kind: "Build", + APIVersion: "build.knative.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "build-from-repo", + Namespace: "marshmallow", + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "pipeline.knative.dev/v1alpha1", + Kind: "TaskRun", + Name: "build-from-repo-run", + Controller: &boolTrue, + BlockOwnerDeletion: &boolTrue, + }}, + }, + Spec: buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "artifact-dest-mkdir-gitspace-0", + Image: "override-with-bash-noop:latest", + Args: []string{"-args", "mkdir -p /workspace/gitspace"}, + }, + { + Name: "artifact-copy-from-gitspace-0", + Image: "override-with-gsutil-image:latest", + Args: []string{"-args", "cp -r gs://fake-bucket/prev-task-path/** /workspace/gitspace"}, + }}, + }, + }, + }, { + desc: "storage resource as input from previous task - copy from bucket", + task: taskWithTargetPath, + taskRun: &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "get-from-gcs", + Namespace: "marshmallow", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "PipelineRun", + Name: "pipelinerun", + }}, + }, + Spec: v1alpha1.TaskRunSpec{ + Inputs: v1alpha1.TaskRunInputs{ + Resources: []v1alpha1.TaskResourceBinding{{ + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "storage1", + }, + Name: "workspace", + Paths: []string{"prev-task-path"}, + }}, + }, + }, + }, + build: build(), + wantErr: false, + want: &buildv1alpha1.Build{ + TypeMeta: metav1.TypeMeta{ + Kind: "Build", + APIVersion: "build.knative.dev/v1alpha1"}, + ObjectMeta: metav1.ObjectMeta{ + Name: "build-from-repo", + Namespace: "marshmallow", + OwnerReferences: []metav1.OwnerReference{{ + APIVersion: "pipeline.knative.dev/v1alpha1", + Kind: "TaskRun", + Name: "build-from-repo-run", + Controller: &boolTrue, + BlockOwnerDeletion: &boolTrue, + }}, + }, + Spec: buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{{ + Name: "artifact-dest-mkdir-workspace-0", + Image: "override-with-bash-noop:latest", + Args: []string{"-args", "mkdir -p /workspace/gcs-dir"}, + }, + { + Name: "artifact-copy-from-workspace-0", + Image: "override-with-gsutil-image:latest", + Args: []string{"-args", "cp -r gs://fake-bucket/prev-task-path/** /workspace/gcs-dir"}, + }}, + }, + }, + }} { + t.Run(c.desc, func(t *testing.T) { + setUp() + fakekubeclient := fakek8s.NewSimpleClientset( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "knative-build-pipeline", + Name: v1alpha1.BucketConfigName, + }, + Data: map[string]string{ + v1alpha1.BucketLocationKey: "gs://fake-bucket", + }, + }, + ) + got, err := AddInputResource(fakekubeclient, c.build, c.task.Name, &c.task.Spec, c.taskRun, pipelineResourceLister, logger) if (err != nil) != c.wantErr { t.Errorf("Test: %q; AddInputResource() error = %v, WantErr %v", c.desc, err, c.wantErr) } diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go b/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go index 556337bd84f..e7702846f64 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/input_resources.go @@ -22,16 +22,18 @@ import ( "path/filepath" "strings" + "github.com/knative/build-pipeline/pkg/apis/pipeline" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + artifacts "github.com/knative/build-pipeline/pkg/artifacts" listers "github.com/knative/build-pipeline/pkg/client/listers/pipeline/v1alpha1" buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" ) var ( kubeconfigWriterImage = flag.String("kubeconfig-writer-image", "override-with-kubeconfig-writer:latest", "The container image containing our kubeconfig writer binary.") - bashNoopImage = flag.String("bash-noop-image", "override-with-bash-noop:latest", "The container image containing bash shell") ) func getBoundResource(resourceName string, boundResources []v1alpha1.TaskResourceBinding) (*v1alpha1.TaskResourceBinding, error) { @@ -50,6 +52,7 @@ func getBoundResource(resourceName string, boundResources []v1alpha1.TaskResourc // from previous task // 3. If resource has paths declared then fresh copy of resource is not fetched func AddInputResource( + kubeclient kubernetes.Interface, build *buildv1alpha1.Build, taskName string, taskSpec *v1alpha1.TaskSpec, @@ -64,6 +67,15 @@ func AddInputResource( pvcName := taskRun.GetPipelineRunPVCName() mountPVC := false + prNameFromLabel := taskRun.Labels[pipeline.GroupName+pipeline.PipelineRunLabelKey] + if prNameFromLabel == "" { + prNameFromLabel = pvcName + } + as, err := artifacts.GetArtifactStorage(prNameFromLabel, kubeclient) + if err != nil { + return nil, err + } + for _, input := range taskSpec.Inputs.Resources { boundResource, err := getBoundResource(input.Name, taskRun.Spec.Inputs.Resources) if err != nil { @@ -86,15 +98,23 @@ func AddInputResource( } else { dPath = filepath.Join(workspaceDir, input.TargetPath) } - cpContainer := copyContainer(fmt.Sprintf("%s-%d", boundResource.Name, i), path, dPath) - cpContainer.VolumeMounts = []corev1.VolumeMount{getPvcMount(pvcName)} - copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, cpContainer) - mountPVC = true + + cpContainers := as.GetCopyFromContainerSpec(fmt.Sprintf("%s-%d", boundResource.Name, i), path, dPath) + if as.IsPVC() { + mountPVC = true + for _, ct := range cpContainers { + ct.VolumeMounts = []corev1.VolumeMount{getPvcMount(pvcName)} + copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, ct) + } + } else { + copyStepsFromPrevTasks = append(copyStepsFromPrevTasks, cpContainers...) + } } // source is copied from previous task so skip fetching cluster , storage types if len(copyStepsFromPrevTasks) > 0 { build.Spec.Steps = append(copyStepsFromPrevTasks, build.Spec.Steps...) + build.Spec.Volumes = append(build.Spec.Volumes, as.GetSecretsVolumes()...) } else { switch resource.Spec.Type { case v1alpha1.PipelineResourceTypeGit: @@ -179,7 +199,7 @@ func addStorageFetchStep(build *buildv1alpha1.Build, storageResource v1alpha1.Pi } storageResource.SetDestinationDirectory(destDirectory) - gcsCreateDirContainer := createDirContainer(storageResource.GetName(), destDirectory) + gcsCreateDirContainer := v1alpha1.CreateDirContainer(storageResource.GetName(), destDirectory) gcsDownloadContainers, err := storageResource.GetDownloadContainerSpec() if err != nil { return err @@ -218,19 +238,3 @@ func addStorageFetchStep(build *buildv1alpha1.Build, storageResource v1alpha1.Pi build.Spec.Steps = append(buildSteps, build.Spec.Steps...) return nil } - -func createDirContainer(name, destinationPath string) corev1.Container { - return corev1.Container{ - Name: fmt.Sprintf("create-dir-%s", name), - Image: *bashNoopImage, - Args: []string{"-args", strings.Join([]string{"mkdir", "-p", destinationPath}, " ")}, - } -} - -func copyContainer(name, sourcePath, destinationPath string) corev1.Container { - return corev1.Container{ - Name: fmt.Sprintf("source-copy-%s", name), - Image: *bashNoopImage, - Args: []string{"-args", strings.Join([]string{"cp", "-r", fmt.Sprintf("%s/.", sourcePath), destinationPath}, " ")}, - } -} diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go b/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go index fcfb3ba6f4f..6c775b0abbb 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/output_resource.go @@ -19,13 +19,14 @@ package resources import ( "fmt" "path/filepath" - "strings" "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + artifacts "github.com/knative/build-pipeline/pkg/artifacts" listers "github.com/knative/build-pipeline/pkg/client/listers/pipeline/v1alpha1" buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" "go.uber.org/zap" corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes" ) var ( @@ -52,6 +53,7 @@ var ( // 1. If resource is declared in inputs then target path from input resource is used to identify source path // 2. If resource is declared in outputs only then the default is /output/resource_name func AddOutputResources( + kubeclient kubernetes.Interface, b *buildv1alpha1.Build, taskName string, taskSpec *v1alpha1.TaskSpec, @@ -64,7 +66,11 @@ func AddOutputResources( return nil } - pipelineRunpvcName := taskRun.GetPipelineRunPVCName() + pvcName := taskRun.GetPipelineRunPVCName() + as, err := artifacts.GetArtifactStorage(pvcName, kubeclient) + if err != nil { + return err + } // track resources that are present in input of task cuz these resources will be copied onto PVC inputResourceMap := map[string]string{} @@ -116,41 +122,30 @@ func AddOutputResources( // outputs, or until we have metadata on the resource that declares whether // the output should be copied to the PVC, only copy git and storage output // resources. - // copy to pvc if pvc is present - if allowedOutputResources[resource.Spec.Type] && pipelineRunpvcName != "" { + if allowedOutputResources[resource.Spec.Type] && taskRun.HasPipelineRunOwnerReference() { var newSteps []corev1.Container for _, dPath := range boundResource.Paths { - newSteps = append(newSteps, []corev1.Container{{ - Name: fmt.Sprintf("source-mkdir-%s", resource.GetName()), - Image: *bashNoopImage, - Args: []string{ - "-args", strings.Join([]string{"mkdir", "-p", dPath}, " "), - }, - VolumeMounts: []corev1.VolumeMount{getPvcMount(pipelineRunpvcName)}, - }, { - Name: fmt.Sprintf("source-copy-%s", resource.GetName()), - Image: *bashNoopImage, - Args: []string{ - "-args", strings.Join([]string{"cp", "-r", fmt.Sprintf("%s/.", sourcePath), dPath}, " "), - }, - VolumeMounts: []corev1.VolumeMount{getPvcMount(pipelineRunpvcName)}, - }}...) + containers := as.GetCopyToContainerSpec(resource.GetName(), sourcePath, dPath) + newSteps = append(newSteps, containers...) } b.Spec.Steps = append(b.Spec.Steps, newSteps...) + b.Spec.Volumes = append(b.Spec.Volumes, as.GetSecretsVolumes()...) } } - if pipelineRunpvcName == "" { - return nil - } - - // attach pvc volume only if it is not already attached - for _, buildVol := range b.Spec.Volumes { - if buildVol.Name == pipelineRunpvcName { + if as.IsPVC() { + if pvcName == "" { return nil } + + // attach pvc volume only if it is not already attached + for _, buildVol := range b.Spec.Volumes { + if buildVol.Name == pvcName { + return nil + } + } + b.Spec.Volumes = append(b.Spec.Volumes, GetPVCVolume(pvcName)) } - b.Spec.Volumes = append(b.Spec.Volumes, GetPVCVolume(pipelineRunpvcName)) return nil } diff --git a/pkg/reconciler/v1alpha1/taskrun/resources/output_resource_test.go b/pkg/reconciler/v1alpha1/taskrun/resources/output_resource_test.go index ec4ba10757c..bcf92571a76 100644 --- a/pkg/reconciler/v1alpha1/taskrun/resources/output_resource_test.go +++ b/pkg/reconciler/v1alpha1/taskrun/resources/output_resource_test.go @@ -27,13 +27,14 @@ import ( buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakek8s "k8s.io/client-go/kubernetes/fake" ) var ( outputpipelineResourceLister listers.PipelineResourceLister ) -func outputResourcesetUp() { +func outputResourceSetup() { fakeClient := fakeclientset.NewSimpleClientset() sharedInfomer := informers.NewSharedInformerFactory(fakeClient, 0) pipelineResourceInformer := sharedInfomer.Pipeline().V1alpha1().PipelineResources() @@ -598,8 +599,9 @@ func Test_Valid_OutputResources(t *testing.T) { build: build(), }} { t.Run(c.name, func(t *testing.T) { - outputResourcesetUp() - err := AddOutputResources(c.build, c.task.Name, &c.task.Spec, c.taskRun, outputpipelineResourceLister, logger) + outputResourceSetup() + fakekubeclient := fakek8s.NewSimpleClientset() + err := AddOutputResources(fakekubeclient, c.build, c.task.Name, &c.task.Spec, c.taskRun, outputpipelineResourceLister, logger) if err != nil { t.Fatalf("Failed to declare output resources for test name %q ; test description %q: error %v", c.name, c.desc, err) } @@ -628,6 +630,177 @@ func Test_Valid_OutputResources(t *testing.T) { } } +func Test_Valid_OutputResources_WithBucketStorage(t *testing.T) { + for _, c := range []struct { + name string + desc string + task *v1alpha1.Task + build *buildv1alpha1.Build + taskRun *v1alpha1.TaskRun + wantSteps []corev1.Container + wantVolumes []corev1.Volume + }{{ + name: "git resource in input and output with bucket storage", + desc: "git resource declared as both input and output with pipelinerun owner reference", + taskRun: &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun-run-output-steps", + Namespace: "marshmallow", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "PipelineRun", + Name: "pipelinerun", + }}, + }, + Spec: v1alpha1.TaskRunSpec{ + Inputs: v1alpha1.TaskRunInputs{ + Resources: []v1alpha1.TaskResourceBinding{{ + Name: "source-workspace", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "source-git", + }, + }}, + }, + Outputs: v1alpha1.TaskRunOutputs{ + Resources: []v1alpha1.TaskResourceBinding{{ + Name: "source-workspace", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "source-git", + }, + Paths: []string{"pipeline-task-name"}, + }}, + }, + }, + }, + task: &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task1", + Namespace: "marshmallow", + }, + Spec: v1alpha1.TaskSpec{ + Inputs: &v1alpha1.Inputs{ + Resources: []v1alpha1.TaskResource{{ + Name: "source-workspace", + Type: "git", + }}, + }, + Outputs: &v1alpha1.Outputs{ + Resources: []v1alpha1.TaskResource{{ + Name: "source-workspace", + Type: "git", + }}, + }, + }, + }, + wantSteps: []corev1.Container{{ + Name: "artifact-copy-to-source-git", + Image: "override-with-gsutil-image:latest", + Args: []string{"-args", "cp -r /workspace/source-workspace gs://fake-bucket/pipeline-task-name"}, + }}, + build: build(), + }, { + name: "git resource in output only with bucket storage", + desc: "git resource declared as output with pipelinerun owner reference", + taskRun: &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun-run-output-steps", + Namespace: "marshmallow", + OwnerReferences: []metav1.OwnerReference{{ + Kind: "PipelineRun", + Name: "pipelinerun", + }}, + }, + Spec: v1alpha1.TaskRunSpec{ + Outputs: v1alpha1.TaskRunOutputs{ + Resources: []v1alpha1.TaskResourceBinding{{ + Name: "source-workspace", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "source-git", + }, + Paths: []string{"pipeline-task-name"}, + }}, + }, + }, + }, + task: &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task1", + Namespace: "marshmallow", + }, + Spec: v1alpha1.TaskSpec{ + Outputs: &v1alpha1.Outputs{ + Resources: []v1alpha1.TaskResource{{ + Name: "source-workspace", + Type: "git", + }}, + }, + }, + }, + wantSteps: []corev1.Container{{ + Name: "artifact-copy-to-source-git", + Image: "override-with-gsutil-image:latest", + Args: []string{"-args", "cp -r /workspace/output/source-workspace gs://fake-bucket/pipeline-task-name"}, + }}, + build: build(), + }, { + name: "git resource in output", + desc: "git resource declared in output without pipelinerun owner reference", + taskRun: &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-taskrun-run-output-steps", + Namespace: "marshmallow", + }, + Spec: v1alpha1.TaskRunSpec{ + Outputs: v1alpha1.TaskRunOutputs{ + Resources: []v1alpha1.TaskResourceBinding{{ + Name: "source-workspace", + ResourceRef: v1alpha1.PipelineResourceRef{ + Name: "source-git", + }, + }}, + }, + }, + }, + task: &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Name: "task1", + Namespace: "marshmallow", + }, + Spec: v1alpha1.TaskSpec{ + Outputs: &v1alpha1.Outputs{ + Resources: []v1alpha1.TaskResource{{ + Name: "source-workspace", + Type: "git", + }}, + }, + }, + }, + build: build(), + }} { + t.Run(c.name, func(t *testing.T) { + outputResourceSetup() + fakekubeclient := fakek8s.NewSimpleClientset( + &corev1.ConfigMap{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "knative-build-pipeline", + Name: v1alpha1.BucketConfigName, + }, + Data: map[string]string{ + v1alpha1.BucketLocationKey: "gs://fake-bucket", + }, + }, + ) + err := AddOutputResources(fakekubeclient, c.build, c.task.Name, &c.task.Spec, c.taskRun, outputpipelineResourceLister, logger) + if err != nil { + t.Fatalf("Failed to declare output resources for test name %q ; test description %q: error %v", c.name, c.desc, err) + } + + if d := cmp.Diff(c.build.Spec.Steps, c.wantSteps); d != "" { + t.Fatalf("post build steps mismatch: %s", d) + } + }) + } +} + func Test_InValid_OutputResources(t *testing.T) { for _, c := range []struct { desc string @@ -755,8 +928,9 @@ func Test_InValid_OutputResources(t *testing.T) { wantErr: true, }} { t.Run(c.desc, func(t *testing.T) { - outputResourcesetUp() - err := AddOutputResources(build(), c.task.Name, &c.task.Spec, c.taskRun, outputpipelineResourceLister, logger) + outputResourceSetup() + fakekubeclient := fakek8s.NewSimpleClientset() + err := AddOutputResources(fakekubeclient, build(), c.task.Name, &c.task.Spec, c.taskRun, outputpipelineResourceLister, logger) if (err != nil) != c.wantErr { t.Fatalf("Test AddOutputResourceSteps error %v ", c.desc) } diff --git a/pkg/reconciler/v1alpha1/taskrun/taskrun.go b/pkg/reconciler/v1alpha1/taskrun/taskrun.go index aef0bc77061..99bf88d209d 100644 --- a/pkg/reconciler/v1alpha1/taskrun/taskrun.go +++ b/pkg/reconciler/v1alpha1/taskrun/taskrun.go @@ -425,13 +425,13 @@ func (c *Reconciler) createBuildPod(ctx context.Context, tr *v1alpha1.TaskRun, t return nil, fmt.Errorf("couldn't create redirected Build: %v", err) } - build, err = resources.AddInputResource(build, taskName, ts, tr, c.resourceLister, c.Logger) + build, err = resources.AddInputResource(c.KubeClientSet, build, taskName, ts, tr, c.resourceLister, c.Logger) if err != nil { c.Logger.Errorf("Failed to create a build for taskrun: %s due to input resource error %v", tr.Name, err) return nil, err } - err = resources.AddOutputResources(build, taskName, ts, tr, c.resourceLister, c.Logger) + err = resources.AddOutputResources(c.KubeClientSet, build, taskName, ts, tr, c.resourceLister, c.Logger) if err != nil { c.Logger.Errorf("Failed to create a build for taskrun: %s due to output resource error %v", tr.Name, err) return nil, err diff --git a/test/README.md b/test/README.md index c9793c72a9f..e500bc84663 100644 --- a/test/README.md +++ b/test/README.md @@ -115,6 +115,8 @@ permissions inside the TaskRun to run the Kaniko e2e test and GCS taskrun test. `KANIKO_SECRET_CONFIG_FILE` is used to generate Kubernetes secret to access GCS bucket. This e2e test requires valid service account configuration json but it does not require any role binding. +- In Storage artifact bucket, GCP service account JSON key file at path + `KANIKO_SECRET_CONFIG_FILE` is used to create/delete a bucket. To reduce e2e test setup developers can use the same environment variable for both Kaniko e2e test and GCS taskrun test. To create a service account usable in diff --git a/test/artifact_bucket_test.go b/test/artifact_bucket_test.go new file mode 100644 index 00000000000..bebd10c388a --- /dev/null +++ b/test/artifact_bucket_test.go @@ -0,0 +1,224 @@ +// +build e2e + +/* +Copyright 2018 Knative Authors LLC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "testing" + "time" + + "cloud.google.com/go/storage" + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + tb "github.com/knative/build-pipeline/test/builder" + knativetest "github.com/knative/pkg/test" + "github.com/knative/pkg/test/logging" + "google.golang.org/api/iterator" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +const ( + helloworldResourceName = "helloworldgit" + addFileTaskName = "add-file-to-resource-task" + readFileTaskName = "read-new-file-task" + bucketTestPipelineName = "bucket-test-pipeline" + bucketTestPipelineRunName = "bucket-test-pipeline-run" + systemNamespace = "knative-build-pipeline" + bucketSecretName = "bucket-secret" + bucketSecretKey = "bucket-secret-key" +) + +// TestStorageBucketPipelineRun is an integration test that will verify a pipeline +// can use a bucket for temporary storage of artifacts shared between tasks +func TestStorageBucketPipelineRun(t *testing.T) { + configFilePath := os.Getenv("KANIKO_SECRET_CONFIG_FILE") + if configFilePath == "" { + t.Skip("KANIKO_SECRET_CONFIG_FILE variable is not set.") + } + logger := logging.GetContextLogger(t.Name()) + c, namespace := setup(t, logger) + + knativetest.CleanupOnInterrupt(func() { tearDown(t, logger, c, namespace) }, logger) + defer tearDown(t, logger, c, namespace) + + bucketName := fmt.Sprintf("build-pipeline-test-%s-%d", namespace, time.Now().Unix()) + ctx := context.Background() + logger.Infof("Creating GCS bucket %s", bucketName) + storageClient, err := storage.NewClient(ctx) + if err != nil { + t.Fatalf("Failed to create bucket: `%s`: %s", bucketName, err) + } + if err := storageClient.Bucket(bucketName).Create(ctx, os.Getenv("GCP_PROJECT"), nil); err != nil { + t.Fatalf("Failed to create bucket: `%s`: %s", bucketName, err) + } + defer deleteBucket(t, logger, storageClient, bucketName, namespace) + + logger.Infof("Creating Secret %s", bucketSecretName) + if _, err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Create(getBucketSecret(t, logger, configFilePath, namespace)); err != nil { + t.Fatalf("Failed to create Secret `%s`: %s", bucketSecretName, err) + } + defer deleteBucketSecret(c, t, logger, namespace) + + logger.Infof("Creating ConfigMap %s", v1alpha1.BucketConfigName) + configMapData := map[string]string{ + v1alpha1.BucketLocationKey: fmt.Sprintf("gs://%s", bucketName), + v1alpha1.BucketServiceAccountSecretName: bucketSecretName, + v1alpha1.BucketServiceAccountSecretKey: bucketSecretKey, + } + c.KubeClient.UpdateConfigMap(systemNamespace, v1alpha1.BucketConfigName, configMapData) + + logger.Infof("Creating Git PipelineResource %s", helloworldResourceName) + helloworldResource := tb.PipelineResource(helloworldResourceName, namespace, tb.PipelineResourceSpec( + v1alpha1.PipelineResourceTypeGit, + tb.PipelineResourceSpecParam("Url", "https://github.com/pivotal-nader-ziada/gohelloworld"), + tb.PipelineResourceSpecParam("Revision", "master"), + ), + ) + if _, err := c.PipelineResourceClient.Create(helloworldResource); err != nil { + t.Fatalf("Failed to create Pipeline Resource `%s`: %s", helloworldResourceName, err) + } + + logger.Infof("Creating Task %s", addFileTaskName) + addFileTask := tb.Task(addFileTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.TaskOutputs(tb.OutputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.Step("addfile", "ubuntu", tb.Command("/bin/bash"), + tb.Args("-c", "echo stuff > /workspace/helloworldgit/newfile"), + ), + )) + if _, err := c.TaskClient.Create(addFileTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", addFileTaskName, err) + } + + logger.Infof("Creating Task %s", readFileTaskName) + readFileTask := tb.Task(readFileTaskName, namespace, tb.TaskSpec( + tb.TaskInputs(tb.InputsResource(helloworldResourceName, v1alpha1.PipelineResourceTypeGit)), + tb.Step("readfile", "ubuntu", tb.Command("/bin/bash"), + tb.Args("-c", "cat /workspace/helloworldgit/newfile"), + ), + )) + if _, err := c.TaskClient.Create(readFileTask); err != nil { + t.Fatalf("Failed to create Task `%s`: %s", readFileTaskName, err) + } + + logger.Infof("Creating Pipeline %s", bucketTestPipelineName) + bucketTestPipeline := tb.Pipeline(bucketTestPipelineName, namespace, tb.PipelineSpec( + tb.PipelineDeclaredResource("source-repo", "git"), + tb.PipelineTask("addfile", addFileTaskName, + tb.PipelineTaskInputResource("helloworldgit", "source-repo"), + tb.PipelineTaskOutputResource("helloworldgit", "source-repo"), + ), + tb.PipelineTask("readfile", readFileTaskName, + tb.PipelineTaskInputResource("helloworldgit", "source-repo", tb.From("addfile")), + ), + )) + if _, err := c.PipelineClient.Create(bucketTestPipeline); err != nil { + t.Fatalf("Failed to create Pipeline `%s`: %s", bucketTestPipelineName, err) + } + + logger.Infof("Creating PipelineRun %s", bucketTestPipelineRunName) + bucketTestPipelineRun := tb.PipelineRun(bucketTestPipelineRunName, namespace, tb.PipelineRunSpec( + bucketTestPipelineName, + tb.PipelineRunResourceBinding("source-repo", tb.PipelineResourceBindingRef(helloworldResourceName)), + )) + if _, err := c.PipelineRunClient.Create(bucketTestPipelineRun); err != nil { + t.Fatalf("Failed to create PipelineRun `%s`: %s", bucketTestPipelineRunName, err) + } + + // Verify status of PipelineRun (wait for it) + if err := WaitForPipelineRunState(c, bucketTestPipelineRunName, timeout, PipelineRunSucceed(bucketTestPipelineRunName), "PipelineRunCompleted"); err != nil { + t.Errorf("Error waiting for PipelineRun %s to finish: %s", bucketTestPipelineRunName, err) + taskruns, err := c.TaskRunClient.List(metav1.ListOptions{}) + if err != nil { + t.Errorf("Error getting TaskRun list for PipelineRun %s %s", bucketTestPipelineRunName, err) + } + for _, tr := range taskruns.Items { + if tr.Status.PodName != "" { + CollectBuildLogs(c, tr.Status.PodName, namespace, logger) + } + } + t.Fatalf("PipelineRun execution failed") + } +} + +func createBucket(client *storage.Client, projectID, bucketName, configFilePath string) error { + t.Helper() + f, err := ioutil.ReadFile(configFilePath) + if err != nil { + t.Fatalf("Failed to read json key file %s at path %s", err, configFilePath) + } + os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", string(f)) + ctx := context.Background() + // [START create_bucket] + if err := client.Bucket(bucketName).Create(ctx, projectID, nil); err != nil { + return err + } + // [END create_bucket] + return nil +} + +func deleteBucket(t *testing.T, logger *logging.BaseLogger, storageClient *storage.Client, bucketName, namespace string) { + t.Helper() + ctx := context.Background() + // clear bucket + it := storageClient.Bucket(bucketName).Objects(ctx, &storage.Query{}) + for { + attrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + logger.Errorf("Failed to delete bucket %s: %s", bucketName, err) + } + // [START delete_file] + o := storageClient.Bucket(bucketName).Object(attrs.Name) + if err := o.Delete(ctx); err != nil { + logger.Errorf("Failed to delete bucket %s: %s", bucketName, err) + } + // [END delete_file + } + // [START delete_bucket] + if err := storageClient.Bucket(bucketName).Delete(ctx); err != nil { + logger.Errorf("Failed to delete bucket %s: %s", bucketName, err) + } + // [END delete_bucket] +} + +func getBucketSecret(t *testing.T, logger *logging.BaseLogger, configFilePath, namespace string) *corev1.Secret { + t.Helper() + f, err := ioutil.ReadFile(configFilePath) + if err != nil { + t.Fatalf("Failed to read json key file %s at path %s", err, configFilePath) + } + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: bucketSecretName, + }, + StringData: map[string]string{ + bucketSecretKey: string(f), + }, + } +} + +func deleteBucketSecret(c *clients, t *testing.T, logger *logging.BaseLogger, namespace string) { + if err := c.KubeClient.Kube.CoreV1().Secrets(namespace).Delete(bucketSecretName, &metav1.DeleteOptions{}); err != nil { + t.Fatalf("Failed to delete Secret `%s`: %s", bucketSecretName, err) + } +}