From b1c2915333ace2322c496308c2ad396629af96f5 Mon Sep 17 00:00:00 2001 From: Christie Wilson Date: Thu, 4 Oct 2018 17:16:06 -0700 Subject: [PATCH] Add initial helloworld integration test This is a WIP of adding an integration test to cover #59, which is a simple TaskRun which has no inputs or outputs but just runs a container that echoes helloworld. At this point we have the logic to: - Create the Task and TaskRun - Wait for the TaskRun to have a condition (TODO: actually check the conditions, waiting to rebase onto #86) - Get the Build associated (not yet created, will be via #86) - Get the Pod for the build - TODO: get the logs for that Pod (we've been looking at https://github.com/knative/build/blob/e8c2cb6eb5cb09d9737ca9e6da4a1c68af3247b2/pkg/logs/logs.go#L36:6 for inspiration) This also changes namespaces to be test specific, so each test can create a namespace and tear it down, without needing to keep track of resources created and delete them individually. Also had to change some optional resources to be pointers, because json marshalling will still require optional fields to be present if they are not nullable (https://stackoverflow.com/questions/18088294/how-to-not-marshal-an-empty-struct-into-json-with-go) --- pkg/apis/pipeline/v1alpha1/task_types.go | 18 +-- pkg/apis/pipeline/v1alpha1/taskrun_types.go | 12 +- .../v1alpha1/zz_generated.deepcopy.go | 66 ++++---- test/README.md | 41 +++++ test/clients.go | 33 ++-- test/crd_checks.go | 52 +++++++ test/init_test.go | 48 +++--- test/pipeline_test.go | 20 +-- test/taskrun_test.go | 146 ++++++++++++++++++ 9 files changed, 336 insertions(+), 100 deletions(-) create mode 100644 test/crd_checks.go create mode 100644 test/taskrun_test.go diff --git a/pkg/apis/pipeline/v1alpha1/task_types.go b/pkg/apis/pipeline/v1alpha1/task_types.go index 0302773c664..c1a038437f3 100644 --- a/pkg/apis/pipeline/v1alpha1/task_types.go +++ b/pkg/apis/pipeline/v1alpha1/task_types.go @@ -18,7 +18,6 @@ package v1alpha1 import ( buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -27,8 +26,8 @@ type TaskSpec struct { // +optional Inputs *Inputs `json:"inputs,omitempty"` // +optional - Outputs *Outputs `json:"outputs,omitempty"` - BuildSpec BuildSpec `json:"buildSpec"` + Outputs *Outputs `json:"outputs,omitempty"` + BuildSpec *buildv1alpha1.BuildSpec `json:"buildSpec"` } // TaskStatus defines the observed state of Task @@ -70,6 +69,9 @@ type Inputs struct { // used as the name of the volume containing this context which will be mounted // into the container executed by the Build/Task, e.g. a Source with the // name "workspace" would be mounted into "/workspace". +// +// TODO(#62): Something is wrong here, this should be a reference to a resource, +// could just be that the names and comments are out of date. type Source struct { // name of the source should match the name of the SourceBinding in the pipeline Name string `json:"name"` @@ -101,16 +103,6 @@ type TestResult struct { Path string `json:"path"` } -// BuildSpec describes how to create a Build for this Task. -// A BuildSpec will contain either a Template or a series of Steps. -type BuildSpec struct { - // Trying to emulate https://github.com/knative/build/blob/master/pkg/apis/build/v1alpha1/build_types.go - // +optional - Steps []corev1.Container `json:"steps,omitempty"` - // +optional - Template buildv1alpha1.TemplateInstantiationSpec `json:"template,omitempty"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TaskList contains a list of Task diff --git a/pkg/apis/pipeline/v1alpha1/taskrun_types.go b/pkg/apis/pipeline/v1alpha1/taskrun_types.go index 0d7792b714f..cd756b6bfe4 100644 --- a/pkg/apis/pipeline/v1alpha1/taskrun_types.go +++ b/pkg/apis/pipeline/v1alpha1/taskrun_types.go @@ -19,8 +19,6 @@ package v1alpha1 import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" ) // TaskRunSpec defines the desired state of TaskRun @@ -28,7 +26,7 @@ type TaskRunSpec struct { TaskRef TaskRef `json:"taskRef"` Trigger TaskTrigger `json:"trigger"` // +optional - Inputs TaskRunInputs `json:"inputs,omitempty"` + Inputs *TaskRunInputs `json:"inputs,omitempty"` // +optional Outputs Outputs `json:"outputs,omitempty"` Results Results `json:"results"` @@ -36,7 +34,8 @@ type TaskRunSpec struct { // TaskRunInputs holds the input values that this task was invoked with. type TaskRunInputs struct { - Resources []PipelineResourceVersion `json:"resourcesVersion"` + // +optional + Resources []PipelineResourceVersion `json:"resourcesVersion,omitempty"` // +optional Params []Param `json:"params,omitempty"` } @@ -125,10 +124,9 @@ type TaskRun struct { metav1.ObjectMeta `json:"metadata,omitempty"` // +optional - Spec TaskRunSpec `json:"spec,omitempty"` + Spec *TaskRunSpec `json:"spec,omitempty"` // +optional - //TODO(aaron-prindle) change back to TaskRunStatus - Status buildv1alpha1.BuildStatus `json:"status,omitempty"` + Status *TaskRunStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go index e60dac21d58..f810da8f173 100644 --- a/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go +++ b/pkg/apis/pipeline/v1alpha1/zz_generated.deepcopy.go @@ -21,34 +21,10 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" + build_v1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BuildSpec) DeepCopyInto(out *BuildSpec) { - *out = *in - if in.Steps != nil { - in, out := &in.Steps, &out.Steps - *out = make([]v1.Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BuildSpec. -func (in *BuildSpec) DeepCopy() *BuildSpec { - if in == nil { - return nil - } - out := new(BuildSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Cluster) DeepCopyInto(out *Cluster) { *out = *in @@ -941,8 +917,24 @@ func (in *TaskRun) DeepCopyInto(out *TaskRun) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + if in.Spec != nil { + in, out := &in.Spec, &out.Spec + if *in == nil { + *out = nil + } else { + *out = new(TaskRunSpec) + (*in).DeepCopyInto(*out) + } + } + if in.Status != nil { + in, out := &in.Status, &out.Status + if *in == nil { + *out = nil + } else { + *out = new(TaskRunStatus) + (*in).DeepCopyInto(*out) + } + } return } @@ -1045,7 +1037,15 @@ func (in *TaskRunSpec) DeepCopyInto(out *TaskRunSpec) { *out = *in out.TaskRef = in.TaskRef out.Trigger = in.Trigger - in.Inputs.DeepCopyInto(&out.Inputs) + if in.Inputs != nil { + in, out := &in.Inputs, &out.Inputs + if *in == nil { + *out = nil + } else { + *out = new(TaskRunInputs) + (*in).DeepCopyInto(*out) + } + } in.Outputs.DeepCopyInto(&out.Outputs) in.Results.DeepCopyInto(&out.Results) return @@ -1110,7 +1110,15 @@ func (in *TaskSpec) DeepCopyInto(out *TaskSpec) { (*in).DeepCopyInto(*out) } } - in.BuildSpec.DeepCopyInto(&out.BuildSpec) + if in.BuildSpec != nil { + in, out := &in.BuildSpec, &out.BuildSpec + if *in == nil { + *out = nil + } else { + *out = new(build_v1alpha1.BuildSpec) + (*in).DeepCopyInto(*out) + } + } return } diff --git a/test/README.md b/test/README.md index 71d9cfe6f85..c2de70a806a 100644 --- a/test/README.md +++ b/test/README.md @@ -166,6 +166,47 @@ namespace := test.AppendRandomString('arendelle') _See [randstring.go](./randstring.go)._ +#### Check Pipeline resources + +After creating Pipeline resources or making changes to them, you will need to wait for the system +to realize those changes. You can use polling methods to check the resources reach the desired state. + +The `WaitFor*` functions use the kubernetes [`wait` package](https://godoc.org/k8s.io/apimachinery/pkg/util/wait). +To poll they use [`PollImmediate`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#PollImmediate) +and the return values of the function you provide behave the same as +[`ConditionFunc`](https://godoc.org/k8s.io/apimachinery/pkg/util/wait#ConditionFunc): +a `bool` to indicate if the function should stop or continue polling, and an `error` to indicate if +there has been an error. + +For example, you can poll a `TaskRun` object to wait for it to have a `Status.Condition`: + +```go +// Verify status of TaskRun (wait for it) +err = WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + if tr.Status != nil && len(tr.Status.Conditions) > 0 { + return true, nil + } + return false, nil +}, "TaskRunHasCondition") +``` + +_[Metrics will be emitted](#emit-metrics) for these `Wait` method tracking how long test poll for._ + +We also have `Check*` variants of many of these methods with identical signatures, same example: + +```go +var revisionName string +err := test.CheckConfigurationState(clients.ServingClient, configName, func(c *v1alpha1.Configuration) (bool, error) { + if c.Status.LatestCreatedRevisionName != "" { + revisionName = c.Status.LatestCreatedRevisionName + return true, nil + } + return false, nil +}) +``` + +_See [crd_checks.go](./crd_checks.go) and [kube_checks.go](./kube_checks.go)._ + ## Presubmit tests [`presubmit-tests.sh`](./presubmit-tests.sh) is the entry point for all tests diff --git a/test/clients.go b/test/clients.go index 8d311085efc..5505e3a4402 100644 --- a/test/clients.go +++ b/test/clients.go @@ -20,23 +20,30 @@ import ( "github.com/knative/build-pipeline/pkg/client/clientset/versioned" "github.com/knative/build-pipeline/pkg/client/clientset/versioned/typed/pipeline/v1alpha1" + buildversioned "github.com/knative/build/pkg/client/clientset/versioned" + buildv1alpha1 "github.com/knative/build/pkg/client/clientset/versioned/typed/build/v1alpha1" knativetest "github.com/knative/pkg/test" ) -// Clients holds instances of interfaces for making requests to the Pipeline controllers. -type Clients struct { - KubeClient *knativetest.KubeClient +// clients holds instances of interfaces for making requests to the Pipeline controllers. +type clients struct { + KubeClient *knativetest.KubeClient + PipelineClient v1alpha1.PipelineInterface + TaskClient v1alpha1.TaskInterface + TaskRunClient v1alpha1.TaskRunInterface + + BuildClient buildv1alpha1.BuildInterface } -// NewClients instantiates and returns several clientsets required for making requests to the +// newClients instantiates and returns several clientsets required for making requests to the // Pipeline cluster specified by the combination of clusterName and configPath. Clients can // make requests within namespace. -func NewClients(configPath, clusterName, namespace string) (*Clients, error) { +func newClients(configPath, clusterName, namespace string) (*clients, error) { var err error - clients := &Clients{} + c := &clients{} - clients.KubeClient, err = knativetest.NewKubeClient(configPath, clusterName) + c.KubeClient, err = knativetest.NewKubeClient(configPath, clusterName) if err != nil { return nil, fmt.Errorf("failed to create kubeclient from config file at %s: %s", configPath, err) } @@ -50,7 +57,15 @@ func NewClients(configPath, clusterName, namespace string) (*Clients, error) { if err != nil { return nil, fmt.Errorf("failed to create pipeline clientset from config file at %s: %s", configPath, err) } - clients.PipelineClient = cs.PipelineV1alpha1().Pipelines(namespace) + c.PipelineClient = cs.PipelineV1alpha1().Pipelines(namespace) + c.TaskClient = cs.PipelineV1alpha1().Tasks(namespace) + c.TaskRunClient = cs.PipelineV1alpha1().TaskRuns(namespace) + + bcs, err := buildversioned.NewForConfig(cfg) + if err != nil { + return nil, fmt.Errorf("failed to create build clientset from config file at %s: %s", configPath, err) + } + c.BuildClient = bcs.BuildV1alpha1().Builds(namespace) - return clients, nil + return c, nil } diff --git a/test/crd_checks.go b/test/crd_checks.go new file mode 100644 index 00000000000..2de72f3bcf1 --- /dev/null +++ b/test/crd_checks.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Knative Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "context" + "fmt" + "time" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + "go.opencensus.io/trace" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" +) + +const ( + interval = 1 * time.Second + timeout = 10 * time.Second + //timeout = 6 * time.Minute +) + +// WaitForTaskRunState polls the status of the TaskRun called name from client every +// interval until inState returns `true` indicating it is done, returns an +// error or timeout. desc will be used to name the metric that is emitted to +// track how long it took for name to get into the state checked by inState. +func WaitForTaskRunState(c *clients, name string, inState func(r *v1alpha1.TaskRun) (bool, error), desc string) error { + metricName := fmt.Sprintf("WaitForTaskRunState/%s/%s", name, desc) + _, span := trace.StartSpan(context.Background(), metricName) + defer span.End() + + return wait.PollImmediate(interval, timeout, func() (bool, error) { + r, err := c.TaskRunClient.Get(name, metav1.GetOptions{}) + if err != nil { + return true, err + } + return inState(r) + }) +} diff --git a/test/init_test.go b/test/init_test.go index 2f382a3d2f7..f6317a5778b 100644 --- a/test/init_test.go +++ b/test/init_test.go @@ -28,10 +28,27 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// namespace is the namespace that will be created before all tests run and will be torn down once -// the tests complete. It will be generated randomly so that tests can run back to back without -// interfering with each other. -var namespace string +func setup(t *testing.T, logger *logging.BaseLogger) (*clients, string) { + namespace := AppendRandomString("arendelle") + + c, err := newClients(knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace) + if err != nil { + t.Fatalf("Couldn't initialize clients: %v", err) + } + + createNamespace(namespace, logger, c.KubeClient) + + return c, namespace +} + +func tearDown(logger *logging.BaseLogger, kubeClient *knativetest.KubeClient, namespace string) { + if kubeClient != nil { + logger.Infof("Deleting namespace %s", namespace) + if err := kubeClient.Kube.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil { + logger.Errorf("Failed to delete namespace %s: %s", namespace, err) + } + } +} func initializeLogsAndMetrics() { flag.Parse() @@ -43,11 +60,7 @@ func initializeLogsAndMetrics() { } } -func createNamespace(namespace string, logger *logging.BaseLogger) *knativetest.KubeClient { - kubeClient, err := knativetest.NewKubeClient(knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster) - if err != nil { - logger.Fatalf("failed to create kubeclient from config file at %s: %s", knativetest.Flags.Kubeconfig, err) - } +func createNamespace(namespace string, logger *logging.BaseLogger, kubeClient *knativetest.KubeClient) { logger.Infof("Create namespace %s to deploy to", namespace) if _, err := kubeClient.Kube.CoreV1().Namespaces().Create(&corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{ @@ -56,16 +69,6 @@ func createNamespace(namespace string, logger *logging.BaseLogger) *knativetest. }); err != nil { logger.Fatalf("Failed to create namespace %s for tests: %s", namespace, err) } - return kubeClient -} - -func tearDownMain(kubeClient *knativetest.KubeClient, logger *logging.BaseLogger) { - if kubeClient != nil { - logger.Infof("Deleting namespace %s", namespace) - if err := kubeClient.Kube.CoreV1().Namespaces().Delete(namespace, &metav1.DeleteOptions{}); err != nil { - logger.Errorf("Failed to delete namespace %s: %s", namespace, err) - } - } } // TestMain initializes anything global needed by the tests. Right now this is just log and metric @@ -74,13 +77,6 @@ func TestMain(m *testing.M) { initializeLogsAndMetrics() logger := logging.GetContextLogger("TestMain") logger.Infof("Using kubeconfig at `%s` with cluster `%s`", knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster) - - namespace = AppendRandomString("arendelle") - kubeClient := createNamespace(namespace, logger) - knativetest.CleanupOnInterrupt(func() { tearDownMain(kubeClient, logger) }, logger) - c := m.Run() - - tearDownMain(kubeClient, logger) os.Exit(c) } diff --git a/test/pipeline_test.go b/test/pipeline_test.go index 9957b2221c7..57525b793c0 100644 --- a/test/pipeline_test.go +++ b/test/pipeline_test.go @@ -26,28 +26,16 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" ) -func setup(t *testing.T) *Clients { - clients, err := NewClients(knativetest.Flags.Kubeconfig, knativetest.Flags.Cluster, namespace) - if err != nil { - t.Fatalf("Couldn't initialize clients: %v", err) - } - return clients -} - -func tearDown(logger *logging.BaseLogger) { - logger.Infof("TODO: implement teardown of any resources created once this test is implemented") -} - // TestPipeline is just a dummy test right now to make sure the whole integration test // setup and execution is working. func TestPipeline(t *testing.T) { - clients := setup(t) logger := logging.GetContextLogger(t.Name()) + c, namespace := setup(t, logger) - knativetest.CleanupOnInterrupt(func() { tearDown(logger) }, logger) - defer tearDown(logger) + knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger) + defer tearDown(logger, c.KubeClient, namespace) - p, err := clients.PipelineClient.List(metav1.ListOptions{}) + p, err := c.PipelineClient.List(metav1.ListOptions{}) if err != nil { t.Fatalf("Couldn't list Pipelines in the cluster (did you deploy the CRDs to %s?): %s", knativetest.Flags.Cluster, err) } diff --git a/test/taskrun_test.go b/test/taskrun_test.go new file mode 100644 index 00000000000..690543e700f --- /dev/null +++ b/test/taskrun_test.go @@ -0,0 +1,146 @@ +// +build e2e + +/* +Copyright 2018 Knative Authors LLC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "fmt" + "testing" + + buildv1alpha1 "github.com/knative/build/pkg/apis/build/v1alpha1" + knativetest "github.com/knative/pkg/test" + "github.com/knative/pkg/test/logging" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/knative/build-pipeline/pkg/apis/pipeline/v1alpha1" + + // Mysteriously by k8s libs, or they fail to create `KubeClient`s from config. Apparently just importing it is enough. @_@ side effects @_@. https://github.com/kubernetes/client-go/issues/242 + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" +) + +const ( + hwTaskName = "helloworld" + hwTaskRunName = "helloworld-run" +) + +func getHelloWorldTask(namespace string) *v1alpha1.Task { + return &v1alpha1.Task{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: hwTaskName, + }, + Spec: v1alpha1.TaskSpec{ + BuildSpec: &buildv1alpha1.BuildSpec{ + Steps: []corev1.Container{ + corev1.Container{ + Name: "helloworld-busybox", + Image: "busybox", + Args: []string{ + "echo", "hello world", + }, + }, + }, + }, + }, + } +} + +func getHelloWorldTaskRun(namespace string) *v1alpha1.TaskRun { + return &v1alpha1.TaskRun{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: hwTaskRunName, + }, + Spec: &v1alpha1.TaskRunSpec{ + TaskRef: v1alpha1.TaskRef{ + Name: hwTaskName, + }, + Trigger: v1alpha1.TaskTrigger{ + TriggerRef: v1alpha1.TaskTriggerRef{ + Type: v1alpha1.TaskTriggerTypeManual, + }, + }, + }, + } +} + +// TestTaskRun is an integration test that will verify a very simple "hello world" TaskRun can be +// executed. +func TestTaskRun(t *testing.T) { + t.Skip("Will fail until #59 is completed :D") + // TODO: when committing, actually skip this test since it won't pass yet + + logger := logging.GetContextLogger(t.Name()) + c, namespace := setup(t, logger) + + knativetest.CleanupOnInterrupt(func() { tearDown(logger, c.KubeClient, namespace) }, logger) + defer tearDown(logger, c.KubeClient, namespace) + + // Create task + _, err := c.TaskClient.Create(getHelloWorldTask(namespace)) + if err != nil { + t.Fatalf("Failed to create Task `%s`: %s", hwTaskName, err) + } + + // Create TaskRun + _, err = c.TaskRunClient.Create(getHelloWorldTaskRun(namespace)) + if err != nil { + t.Fatalf("Failed to create TaskRun `%s`: %s", hwTaskRunName, err) + } + + // Verify status of TaskRun (wait for it) + err = WaitForTaskRunState(c, hwTaskRunName, func(tr *v1alpha1.TaskRun) (bool, error) { + if tr.Status != nil && len(tr.Status.Conditions) > 0 { + // TODO: use actual conditions + return true, nil + } + return false, nil + }, "TaskRunCompleted") + if err != nil { + t.Errorf("Error waiting for TaskRun %s to finish: %s", hwTaskRunName, err) + } + + // Verify logs + // make sure it output hello world + // get pod from taskrun + // taskrun -> build + b, err := c.BuildClient.Get(hwTaskRunName, metav1.GetOptions{}) + if err != nil { + t.Errorf("Expected there to be a Build with the same name as TaskRun %s but got error: %s", hwTaskRunName, err) + } + // build -> pod + cluster := b.Status.Cluster + if cluster == nil || cluster.PodName == "" { + t.Fatalf("Expected build status to have a podname but it didn't!") + } + // pod -> logs + podName := cluster.PodName + + pods := c.KubeClient.Kube.CoreV1().Pods(namespace) + + fmt.Printf("I GOT PODS %s %s\n", podName, pods) + /* TODO + rc, err := pods.GetLogs(podName, &corev1.PodLogOptions{ + Container: containerName, + }).Stream() + if err != nil { + return err + } + defer rc.Close() + + // get logs from the pod + */ +}