diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000000..5aa085d288 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,9 @@ +integration/**/* +scripts/**/* +hack/**/* +examples/**/* +docs/**/* +.github/**/* +logo/**/* +out/**/* + diff --git a/DEVELOPMENT.md b/DEVELOPMENT.md index c446791e74..2d28171651 100644 --- a/DEVELOPMENT.md +++ b/DEVELOPMENT.md @@ -86,6 +86,8 @@ find . -name "*.go" | grep -v vendor/ | xargs gofmt -l -s -w Currently the integration tests that live in [`integration`](./integration) can be run against your own gcloud space or a local registry. +These tests will be kicked off by [reviewers](#reviews) for submitted PRs using GitHub Actions. + In either case, you will need the following tools: * [`container-diff`](https://github.com/GoogleContainerTools/container-diff#installation) @@ -134,33 +136,25 @@ go test ./integration -v --bucket $GCS_BUCKET --repo $IMAGE_REPO -run TestLayers These tests will be kicked off by [reviewers](#reviews) for submitted PRs by the kokoro task. -#### Local repository +#### Local integration tests -To run integration tests locally against a local registry, install a local docker registry +To run integration tests locally against a local registry and gcs bucket, set the LOCAL environment variable ```shell -docker run --rm -d -p 5000:5000 --name registry registry:2 +LOCAL=1 make integration-test ``` -Then export the `IMAGE_REPO` variable with the `localhost:5000`value - -```shell -export IMAGE_REPO=localhost:5000 -``` +#### Running integration tests for a specific dockerfile -And run the integration tests +In order to test only specific dockerfiles during local integration testing, you can specify a pattern to match against inside the integration/dockerfiles directory. ```shell -make integration-test +DOCKERFILE_PATTERN="Dockerfile_test_add*" make integration-test-run ``` -You can also run tests with `go test`, for example to run tests individually: +This will only run dockerfiles that match the pattern `Dockerfile_test_add*` -```shell -go test ./integration -v --repo localhost:5000 -run TestLayers/test_layer_Dockerfile_test_copy_bucket -``` -These tests will be kicked off by [reviewers](#reviews) for submitted PRs using GitHub Actions. ### Benchmarking diff --git a/go.mod b/go.mod index 11167f595c..2af4ffe519 100644 --- a/go.mod +++ b/go.mod @@ -143,7 +143,7 @@ require ( golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect - google.golang.org/api v0.74.0 // indirect + google.golang.org/api v0.74.0 google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20220405205423-9d709892a2bf // indirect google.golang.org/grpc v1.45.0 // indirect diff --git a/integration/benchmark_test.go b/integration/benchmark_test.go index 92efc4f426..e5d9c4e100 100644 --- a/integration/benchmark_test.go +++ b/integration/benchmark_test.go @@ -60,7 +60,7 @@ func TestSnapshotBenchmark(t *testing.T) { buildArgs := []string{"--build-arg", fmt.Sprintf("NUM=%d", num)} var benchmarkDir string benchmarkDir, *err = buildKanikoImage(t.Logf, "", dockerfile, - buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, + buildArgs, []string{}, kanikoImage, contextDir, config.gcsBucket, config.gcsClient, config.serviceAccount, false) if *err != nil { return diff --git a/integration/config.go b/integration/config.go index 2a6df72a21..7bd0675320 100644 --- a/integration/config.go +++ b/integration/config.go @@ -16,7 +16,11 @@ limitations under the License. package integration -import "strings" +import ( + "strings" + + "cloud.google.com/go/storage" +) type integrationTestConfig struct { gcsBucket string @@ -25,6 +29,8 @@ type integrationTestConfig struct { hardlinkBaseImage string serviceAccount string dockerMajorVersion int + gcsClient *storage.Client + dockerfilesPattern string } const gcrRepoPrefix string = "gcr.io/" diff --git a/integration/gcs.go b/integration/gcs.go deleted file mode 100644 index 5680f1e7d0..0000000000 --- a/integration/gcs.go +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package integration - -import ( - "fmt" - "io/ioutil" - "log" - "os" - "os/exec" - "time" -) - -// CreateIntegrationTarball will take the contents of the integration directory and write -// them to a tarball in a temmporary dir. It will return a path to the tarball. -func CreateIntegrationTarball() (string, error) { - log.Println("Creating tarball of integration test files to use as build context") - dir, err := os.Getwd() - if err != nil { - return "", fmt.Errorf("Failed find path to integration dir: %w", err) - } - tempDir, err := ioutil.TempDir("", "") - if err != nil { - return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err) - } - contextFile := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano()) - cmd := exec.Command("tar", "-C", dir, "-zcvf", contextFile, ".") - _, err = RunCommandWithoutTest(cmd) - if err != nil { - return "", fmt.Errorf("Failed to create build context tarball from integration dir: %w", err) - } - return contextFile, err -} - -// UploadFileToBucket will upload the at filePath to gcsBucket. It will return the path -// of the file in gcsBucket. -func UploadFileToBucket(gcsBucket string, filePath string, gcsPath string) (string, error) { - dst := fmt.Sprintf("%s/%s", gcsBucket, gcsPath) - log.Printf("Uploading file at %s to GCS bucket at %s\n", filePath, dst) - - cmd := exec.Command("gsutil", "cp", filePath, dst) - out, err := RunCommandWithoutTest(cmd) - if err != nil { - log.Printf("Error uploading file %s to GCS at %s: %s", filePath, dst, err) - log.Println(string(out)) - return "", fmt.Errorf("Failed to copy tarball to GCS bucket %s: %w", gcsBucket, err) - } - - return dst, nil -} - -// DeleteFromBucket will remove the content at path. path should be the full path -// to a file in GCS. -func DeleteFromBucket(path string) error { - cmd := exec.Command("gsutil", "rm", path) - _, err := RunCommandWithoutTest(cmd) - if err != nil { - return fmt.Errorf("Failed to delete file %s from GCS: %w", path, err) - } - return err -} diff --git a/integration/images.go b/integration/images.go index cece599df0..5b68310aa7 100644 --- a/integration/images.go +++ b/integration/images.go @@ -18,6 +18,7 @@ package integration import ( "bytes" + "context" "fmt" "io/ioutil" "os" @@ -30,8 +31,10 @@ import ( "testing" "time" + "cloud.google.com/go/storage" "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/GoogleContainerTools/kaniko/pkg/util/bucket" ) const ( @@ -157,13 +160,16 @@ func GetVersionedKanikoImage(imageRepo, dockerfile string, version int) string { return strings.ToLower(imageRepo + kanikoPrefix + dockerfile + strconv.Itoa(version)) } -// FindDockerFiles will look for test docker files in the directory dockerfilesPath. -// These files must start with `Dockerfile_test`. If the file is one we are intentionally +// FindDockerFiles will look for test docker files in the directory dir +// and match the files against dockerfilesPattern. +// If the file is one we are intentionally // skipping, it will not be included in the returned list. -func FindDockerFiles(dockerfilesPath string) ([]string, error) { - allDockerfiles, err := filepath.Glob(path.Join(dockerfilesPath, "Dockerfile_test*")) +func FindDockerFiles(dir, dockerfilesPattern string) ([]string, error) { + pattern := filepath.Join(dir, dockerfilesPattern) + fmt.Printf("finding docker images with pattern %v\n", pattern) + allDockerfiles, err := filepath.Glob(pattern) if err != nil { - return []string{}, fmt.Errorf("Failed to find docker files at %s: %w", dockerfilesPath, err) + return []string{}, fmt.Errorf("Failed to find docker files with pattern %s: %w", dockerfilesPattern, err) } var dockerfiles []string @@ -285,7 +291,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(t *testing.T, config *integrat if _, present := d.filesBuilt[dockerfile]; present { return nil } - gcsBucket, serviceAccount, imageRepo := config.gcsBucket, config.serviceAccount, config.imageRepo + gcsBucket, gcsClient, serviceAccount, imageRepo := config.gcsBucket, config.gcsClient, config.serviceAccount, config.imageRepo var buildArgs []string buildArgFlag := "--build-arg" @@ -318,7 +324,7 @@ func (d *DockerFileBuilder) BuildImageWithContext(t *testing.T, config *integrat kanikoImage := GetKanikoImage(imageRepo, dockerfile) timer = timing.Start(dockerfile + "_kaniko") if _, err := buildKanikoImage(t.Logf, dockerfilesPath, dockerfile, buildArgs, additionalKanikoFlags, kanikoImage, - contextDir, gcsBucket, serviceAccount, true); err != nil { + contextDir, gcsBucket, gcsClient, serviceAccount, true); err != nil { return err } timing.DefaultRun.Stop(timer) @@ -443,6 +449,7 @@ func buildKanikoImage( kanikoImage string, contextDir string, gcsBucket string, + gcsClient *storage.Client, serviceAccount string, shdUpload bool, ) (string, error) { @@ -457,7 +464,11 @@ func buildKanikoImage( benchmarkFile := path.Join(benchmarkDir, dockerfile) fileName := fmt.Sprintf("run_%s_%s", time.Now().Format("2006-01-02-15:04"), dockerfile) dst := path.Join("benchmarks", fileName) - defer UploadFileToBucket(gcsBucket, benchmarkFile, dst) + file, err := os.Open(benchmarkFile) + if err != nil { + return "", err + } + defer bucket.Upload(context.Background(), gcsBucket, dst, file, gcsClient) } } diff --git a/integration/integration_test.go b/integration/integration_test.go index 2ace5184e9..0f4ef28dc8 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -17,6 +17,7 @@ limitations under the License. package integration import ( + "context" "encoding/json" "flag" "fmt" @@ -33,9 +34,11 @@ import ( "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/v1/daemon" "github.com/pkg/errors" + "google.golang.org/api/option" "github.com/GoogleContainerTools/kaniko/pkg/timing" "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/GoogleContainerTools/kaniko/pkg/util/bucket" "github.com/GoogleContainerTools/kaniko/testutil" ) @@ -86,22 +89,33 @@ func getDockerMajorVersion() int { func launchTests(m *testing.M) (int, error) { if config.isGcrRepository() { - contextFile, err := CreateIntegrationTarball() + contextFilePath, err := CreateIntegrationTarball() if err != nil { return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context") } - fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile) + bucketName, item, err := bucket.GetNameAndFilepathFromURI(config.gcsBucket) + if err != nil { + return 1, errors.Wrap(err, "failed to get bucket name from uri") + } + contextFile, err := os.Open(contextFilePath) + if err != nil { + return 1, fmt.Errorf("failed to read file at path %v: %w", contextFilePath, err) + } + err = bucket.Upload(context.Background(), bucketName, item, contextFile, config.gcsClient) if err != nil { return 1, errors.Wrap(err, "Failed to upload build context") } - if err = os.Remove(contextFile); err != nil { - return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile)) + if err = os.Remove(contextFilePath); err != nil { + return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFilePath)) } - RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) }) - defer DeleteFromBucket(fileInBucket) + deleteFunc := func() { + bucket.Delete(context.Background(), bucketName, item, config.gcsClient) + } + RunOnInterrupt(deleteFunc) + defer deleteFunc() } if err := buildRequiredImages(); err != nil { return 1, errors.Wrap(err, "Error while building images") @@ -119,18 +133,18 @@ func TestMain(m *testing.M) { os.Exit(1) } - if allDockerfiles, err = FindDockerFiles(dockerfilesPath); err != nil { + config = initIntegrationTestConfig() + if allDockerfiles, err = FindDockerFiles(dockerfilesPath, config.dockerfilesPattern); err != nil { fmt.Println("Coudn't create map of dockerfiles", err) os.Exit(1) - } else { - config = initIntegrationTestConfig() - exitCode, err := launchTests(m) - if err != nil { - fmt.Println(err) - } - os.Exit(exitCode) } + exitCode, err := launchTests(m) + if err != nil { + fmt.Println(err) + } + os.Exit(exitCode) + } func buildRequiredImages() error { @@ -859,9 +873,16 @@ func (i imageDetails) String() string { func initIntegrationTestConfig() *integrationTestConfig { var c integrationTestConfig + + var gcsEndpoint string + var disableGcsAuth bool flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.") flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.") flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.") + flag.StringVar(&gcsEndpoint, "gcs-endpoint", "", "Custom endpoint for GCS. Used for local integration tests") + flag.BoolVar(&disableGcsAuth, "disable-gcs-auth", false, "Disable GCS Authentication. Used for local integration tests") + // adds the possibility to run a single dockerfile. This is useful since running all images can exhaust the dockerhub pull limit + flag.StringVar(&c.dockerfilesPattern, "dockerfiles-pattern", "Dockerfile_test*", "The pattern to match dockerfiles with") flag.Parse() if len(c.serviceAccount) > 0 { @@ -886,6 +907,23 @@ func initIntegrationTestConfig() *integrationTestConfig { if !strings.HasSuffix(c.imageRepo, "/") { c.imageRepo = c.imageRepo + "/" } + + if c.gcsBucket != "" { + var opts []option.ClientOption + if gcsEndpoint != "" { + opts = append(opts, option.WithEndpoint(gcsEndpoint)) + } + if disableGcsAuth { + opts = append(opts, option.WithoutAuthentication()) + } + + gcsClient, err := bucket.NewClient(context.Background(), opts...) + if err != nil { + log.Fatalf("Could not create a new Google Storage Client: %s", err) + } + c.gcsClient = gcsClient + } + c.dockerMajorVersion = getDockerMajorVersion() c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest" c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest" @@ -893,7 +931,7 @@ func initIntegrationTestConfig() *integrationTestConfig { } func meetsRequirements() bool { - requiredTools := []string{"container-diff", "gsutil"} + requiredTools := []string{"container-diff"} hasRequirements := true for _, tool := range requiredTools { _, err := exec.LookPath(tool) diff --git a/integration/tar.go b/integration/tar.go new file mode 100644 index 0000000000..3bb51cb936 --- /dev/null +++ b/integration/tar.go @@ -0,0 +1,58 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "compress/gzip" + "fmt" + "io/ioutil" + "log" + "os" + "time" + + "github.com/GoogleContainerTools/kaniko/pkg/util" +) + +// CreateIntegrationTarball will take the contents of the integration directory and write +// them to a tarball in a temmporary dir. It will return the path to the tarball. +func CreateIntegrationTarball() (string, error) { + log.Println("Creating tarball of integration test files to use as build context") + dir, err := os.Getwd() + if err != nil { + return "nil", fmt.Errorf("Failed find path to integration dir: %w", err) + } + tempDir, err := ioutil.TempDir("", "") + if err != nil { + return "", fmt.Errorf("Failed to create temporary directory to hold tarball: %w", err) + } + contextFilePath := fmt.Sprintf("%s/context_%d.tar.gz", tempDir, time.Now().UnixNano()) + + file, err := os.OpenFile(contextFilePath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) + if err != nil { + return "", err + } + + gzipWriter := gzip.NewWriter(file) + defer gzipWriter.Close() + + err = util.CreateTarballOfDirectory(dir, file) + if err != nil { + return "", fmt.Errorf("creating tarball of integration dir: %w", err) + } + + return contextFilePath, nil +} diff --git a/pkg/buildcontext/gcs.go b/pkg/buildcontext/gcs.go index 87fb251efd..6af45d5c65 100644 --- a/pkg/buildcontext/gcs.go +++ b/pkg/buildcontext/gcs.go @@ -17,15 +17,15 @@ limitations under the License. package buildcontext import ( + "fmt" "io" "os" "path/filepath" - "strings" - "cloud.google.com/go/storage" kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/GoogleContainerTools/kaniko/pkg/util/bucket" "github.com/sirupsen/logrus" "golang.org/x/net/context" ) @@ -36,27 +36,24 @@ type GCS struct { } func (g *GCS) UnpackTarFromBuildContext() (string, error) { - bucket, item := util.GetBucketAndItem(g.context) - return kConfig.BuildContextDir, unpackTarFromGCSBucket(bucket, item, kConfig.BuildContextDir) + bucketName, filepath, err := bucket.GetNameAndFilepathFromURI(g.context) + if err != nil { + return "", fmt.Errorf("getting bucketname and filepath from context: %w", err) + } + return kConfig.BuildContextDir, unpackTarFromGCSBucket(bucketName, filepath, kConfig.BuildContextDir) } func UploadToBucket(r io.Reader, dest string) error { ctx := context.Background() - context := strings.SplitAfter(dest, "://")[1] - bucketName, item := util.GetBucketAndItem(context) - client, err := storage.NewClient(ctx) + bucketName, filepath, err := bucket.GetNameAndFilepathFromURI(dest) if err != nil { - return err + return fmt.Errorf("getting bucketname and filepath from dest: %w", err) } - bucket := client.Bucket(bucketName) - w := bucket.Object(item).NewWriter(ctx) - if _, err := io.Copy(w, r); err != nil { - return err - } - if err := w.Close(); err != nil { + client, err := bucket.NewClient(ctx) + if err != nil { return err } - return nil + return bucket.Upload(ctx, bucketName, filepath, r, client) } // unpackTarFromGCSBucket unpacks the context.tar.gz file in the given bucket to the given directory @@ -77,15 +74,14 @@ func unpackTarFromGCSBucket(bucketName, item, directory string) error { // getTarFromBucket gets context.tar.gz from the GCS bucket and saves it to the filesystem // It returns the path to the tar file -func getTarFromBucket(bucketName, item, directory string) (string, error) { +func getTarFromBucket(bucketName, filepathInBucket, directory string) (string, error) { ctx := context.Background() - client, err := storage.NewClient(ctx) + client, err := bucket.NewClient(ctx) if err != nil { return "", err } - bucket := client.Bucket(bucketName) // Get the tarfile context.tar.gz from the GCS bucket, and save it to a tar object - reader, err := bucket.Object(item).NewReader(ctx) + reader, err := bucket.ReadCloser(ctx, bucketName, filepathInBucket, client) if err != nil { return "", err } diff --git a/pkg/buildcontext/s3.go b/pkg/buildcontext/s3.go index ff7285f895..b0880636fe 100644 --- a/pkg/buildcontext/s3.go +++ b/pkg/buildcontext/s3.go @@ -17,6 +17,7 @@ limitations under the License. package buildcontext import ( + "fmt" "os" "path/filepath" "strings" @@ -24,6 +25,7 @@ import ( kConfig "github.com/GoogleContainerTools/kaniko/pkg/config" "github.com/GoogleContainerTools/kaniko/pkg/constants" "github.com/GoogleContainerTools/kaniko/pkg/util" + "github.com/GoogleContainerTools/kaniko/pkg/util/bucket" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" @@ -37,7 +39,11 @@ type S3 struct { // UnpackTarFromBuildContext download and untar a file from s3 func (s *S3) UnpackTarFromBuildContext() (string, error) { - bucket, item := util.GetBucketAndItem(s.context) + bucket, item, err := bucket.GetNameAndFilepathFromURI(s.context) + if err != nil { + return "", fmt.Errorf("getting bucketname and filepath from context: %w", err) + } + option := session.Options{ SharedConfigState: session.SharedConfigEnable, } diff --git a/pkg/util/bucket/bucket_util.go b/pkg/util/bucket/bucket_util.go new file mode 100644 index 0000000000..9013d0dbaa --- /dev/null +++ b/pkg/util/bucket/bucket_util.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bucket + +import ( + "context" + "fmt" + "io" + "net/url" + "strings" + + "cloud.google.com/go/storage" + "github.com/GoogleContainerTools/kaniko/pkg/constants" + "google.golang.org/api/option" +) + +// Upload uploads everything from Reader to the bucket under path +func Upload(ctx context.Context, bucketName string, path string, r io.Reader, client *storage.Client) error { + bucket := client.Bucket(bucketName) + w := bucket.Object(path).NewWriter(ctx) + if _, err := io.Copy(w, r); err != nil { + return err + } + if err := w.Close(); err != nil { + return err + } + return nil +} + +// Delete will remove the content at path. path should be the full path +// to a file in GCS. +func Delete(ctx context.Context, bucketName string, path string, client *storage.Client) error { + err := client.Bucket(bucketName).Object(path).Delete(ctx) + if err != nil { + return fmt.Errorf("failed to delete file at %s in gcs bucket %v: %w", path, bucketName, err) + } + return err +} + +// ReadCloser will create io.ReadCloser for the specified bucket and path +func ReadCloser(ctx context.Context, bucketName string, path string, client *storage.Client) (io.ReadCloser, error) { + bucket := client.Bucket(bucketName) + r, err := bucket.Object(path).NewReader(ctx) + if err != nil { + return nil, err + } + return r, nil +} + +// NewClient returns a new google storage client +func NewClient(ctx context.Context, opts ...option.ClientOption) (*storage.Client, error) { + client, err := storage.NewClient(ctx, opts...) + if err != nil { + return nil, err + } + return client, err +} + +// GetNameAndFilepathFromURI returns the bucketname and the path to the item inside. +// Will error if provided URI is not a valid URL. +// If the filepath is empty, returns the contextTar filename +func GetNameAndFilepathFromURI(bucketURI string) (bucketName string, path string, err error) { + url, err := url.Parse(bucketURI) + if err != nil { + return "", "", err + } + bucketName = url.Host + // remove leading slash + filePath := strings.TrimPrefix(url.Path, "/") + if filePath == "" { + filePath = constants.ContextTar + } + return bucketName, filePath, nil +} diff --git a/pkg/util/bucket_util_test.go b/pkg/util/bucket/bucket_util_test.go similarity index 82% rename from pkg/util/bucket_util_test.go rename to pkg/util/bucket/bucket_util_test.go index dd0e7626d0..a9abb4cfb0 100644 --- a/pkg/util/bucket_util_test.go +++ b/pkg/util/bucket/bucket_util_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package util +package bucket import ( "testing" @@ -29,28 +29,29 @@ func Test_GetBucketAndItem(t *testing.T) { context string expectedBucket string expectedItem string + expectedErr bool }{ { name: "three slashes", - context: "test1/test2/test3", + context: "gs://test1/test2/test3", expectedBucket: "test1", expectedItem: "test2/test3", }, { name: "two slashes", - context: "test1/test2", + context: "gs://test1/test2", expectedBucket: "test1", expectedItem: "test2", }, { name: "one slash", - context: "test1/", + context: "gs://test1/", expectedBucket: "test1", expectedItem: constants.ContextTar, }, { name: "zero slash", - context: "test1", + context: "gs://test1", expectedBucket: "test1", expectedItem: constants.ContextTar, }, @@ -58,7 +59,8 @@ func Test_GetBucketAndItem(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - gotBucket, gotItem := GetBucketAndItem(test.context) + gotBucket, gotItem, err := GetNameAndFilepathFromURI(test.context) + testutil.CheckError(t, test.expectedErr, err) testutil.CheckDeepEqual(t, test.expectedBucket, gotBucket) testutil.CheckDeepEqual(t, test.expectedItem, gotItem) }) diff --git a/pkg/util/bucket_util.go b/pkg/util/bucket_util.go deleted file mode 100644 index 6627339af9..0000000000 --- a/pkg/util/bucket_util.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2018 Google LLC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package util - -import ( - "strings" - - "github.com/GoogleContainerTools/kaniko/pkg/constants" -) - -func GetBucketAndItem(context string) (string, string) { - split := strings.SplitN(context, "/", 2) - if len(split) == 2 && split[1] != "" { - return split[0], split[1] - } - return split[0], constants.ContextTar -} diff --git a/pkg/util/tar_util.go b/pkg/util/tar_util.go index 963babc9ff..47436b9ae6 100644 --- a/pkg/util/tar_util.go +++ b/pkg/util/tar_util.go @@ -22,6 +22,7 @@ import ( "compress/gzip" "fmt" "io" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -50,6 +51,26 @@ func NewTar(f io.Writer) Tar { } } +func CreateTarballOfDirectory(pathToDir string, f io.Writer) error { + if !filepath.IsAbs(pathToDir) { + return errors.New("pathToDir is not absolute") + } + tarWriter := NewTar(f) + defer tarWriter.Close() + + walkFn := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !filepath.IsAbs(path) { + return fmt.Errorf("path %v is not absolute, cant read file", path) + } + return tarWriter.AddFileToTar(path) + } + + return filepath.WalkDir(pathToDir, walkFn) +} + // Close will close any open streams used by Tar. func (t *Tar) Close() { t.w.Close() diff --git a/pkg/util/tar_util_test.go b/pkg/util/tar_util_test.go index a6681247a4..565b2881de 100644 --- a/pkg/util/tar_util_test.go +++ b/pkg/util/tar_util_test.go @@ -20,6 +20,7 @@ import ( "archive/tar" "bytes" "compress/gzip" + "fmt" "io" "io/ioutil" "os" @@ -129,3 +130,46 @@ func createTar(testdir string, writer io.Writer) error { } return nil } + +func Test_CreateTarballOfDirectory(t *testing.T) { + tmpDir := t.TempDir() + wantErr := false + createFilesInTempDir(t, tmpDir) + f := &bytes.Buffer{} + err := CreateTarballOfDirectory(tmpDir, f) + testutil.CheckError(t, wantErr, err) + + extracedFilesDir := filepath.Join(tmpDir, "extracted") + err = os.Mkdir(extracedFilesDir, 0755) + if err != nil { + t.Error(err) + return + } + files, err := UnTar(f, extracedFilesDir) + testutil.CheckError(t, wantErr, err) + for _, filePath := range files { + fileInfo, err := os.Lstat(filePath) + testutil.CheckError(t, wantErr, err) + if fileInfo.IsDir() { + // skip directory + continue + } + file, err := os.Open(filePath) + testutil.CheckError(t, wantErr, err) + body, err := io.ReadAll(file) + testutil.CheckError(t, wantErr, err) + index := filepath.Base(filePath) + testutil.CheckDeepEqual(t, string(body), fmt.Sprintf("hello from %s\n", index)) + } +} + +func createFilesInTempDir(t *testing.T, tmpDir string) { + for i := 0; i < 2; i++ { + fName := filepath.Join(tmpDir, fmt.Sprint(i)) + content := fmt.Sprintf("hello from %d\n", i) + if err := os.WriteFile(fName, []byte(content), 0666); err != nil { + t.Error(err) + return + } + } +} diff --git a/scripts/integration-test.sh b/scripts/integration-test.sh index 0cbcd2277e..f576518377 100755 --- a/scripts/integration-test.sh +++ b/scripts/integration-test.sh @@ -13,9 +13,19 @@ # See the License for the specific language governing permissions and # limitations under the License. -set -ex +set -e + +function start_local_registry { + docker start registry || docker run --name registry -d -p 5000:5000 registry:2 +} + +# TODO: to get this working, we need a way to override the gcs endpoint of kaniko at runtime +# If this is done, integration test main includes flags --gcs-endpoint and --disable-gcs-auth +# to mock the gcs endpoints and upload files to the fake-gcs-server +function start_fake_gcs_server { + docker start fake-gcs-server || docker run -d -p 4443:4443 --name fake-gcs-server fsouza/fake-gcs-server -scheme http +} -GCS_BUCKET="${GCS_BUCKET:-gs://kaniko-test-bucket}" IMAGE_REPO="${IMAGE_REPO:-gcr.io/kaniko-test}" docker version @@ -23,4 +33,26 @@ docker version echo "Running integration tests..." make out/executor make out/warmer -go test ./integration/... --bucket "${GCS_BUCKET}" --repo "${IMAGE_REPO}" --timeout 50m "$@" + +FLAGS=( + "--timeout=50m" +) + +if [[ -n $DOCKERFILE_PATTERN ]]; then + FLAGS+=("--dockerfiles-pattern=$DOCKERFILE_PATTERN") +fi + +if [[ -n $LOCAL ]]; then + echo "running in local mode, mocking registry and gcs bucket..." + start_local_registry + + IMAGE_REPO="localhost:5000/kaniko-test" + GCS_BUCKET="" +fi + +FLAGS+=( + "--bucket=${GCS_BUCKET}" + "--repo=${IMAGE_REPO}" +) + +go test ./integration/... "${FLAGS[@]}" "$@"